From 647b1a2cfb1ad25be294d4180825b74fff9451f1 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Fri, 8 Jun 2018 07:51:45 +0200 Subject: [PATCH 01/24] flush job to ensure all results have been written (#31187) flush ml job to ensure all results have been written fixes #31173 --- .../xpack/ml/integration/ForecastIT.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java index 18b1071280307..2f3ea6c83a536 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -239,6 +239,9 @@ public void testOverflowToDisk() throws Exception { throw e; } + // flushing the job forces an index refresh, see https://github.com/elastic/elasticsearch/issues/31173 + flushJob(job.getId(), false); + List forecastStats = getForecastStats(); assertThat(forecastStats.size(), equalTo(1)); ForecastRequestStats forecastRequestStats = forecastStats.get(0); @@ -261,6 +264,16 @@ public void testOverflowToDisk() throws Exception { } closeJob(job.getId()); + + forecastStats = getForecastStats(); + assertThat(forecastStats.size(), equalTo(2)); + for (ForecastRequestStats stats : forecastStats) { + forecasts = getForecasts(job.getId(), stats); + + assertThat(forecastRequestStats.getRecordCount(), equalTo(8000L)); + assertThat(forecasts.size(), equalTo(8000)); + } + } private void createDataWithLotsOfClientIps(TimeValue bucketSpan, Job.Builder job) throws IOException { From aeb2c3266d43dc1b424f4492e2c00652f72bcdf1 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 8 Jun 2018 08:58:46 +0200 Subject: [PATCH 02/24] Move number of language analyzers to analysis-common module (#31143) The following analyzers were moved from server module to analysis-common module: `snowball`, `arabic`, `armenian`, `basque`, `bengali`, `brazilian`, `bulgarian`, `catalan`, `chinese`, `cjk`, `czech`, `danish`, `dutch`, `english`, `finnish`, `french`, `galician` and `german`. Relates to #23658 --- .../common}/ArabicAnalyzerProvider.java | 6 +- .../common}/ArmenianAnalyzerProvider.java | 6 +- .../common}/BasqueAnalyzerProvider.java | 6 +- .../common}/BengaliAnalyzerProvider.java | 6 +- .../common}/BrazilianAnalyzerProvider.java | 6 +- .../common}/BulgarianAnalyzerProvider.java | 6 +- .../common}/CatalanAnalyzerProvider.java | 6 +- .../common}/ChineseAnalyzerProvider.java | 9 +- .../analysis/common}/CjkAnalyzerProvider.java | 6 +- .../analysis/common/CommonAnalysisPlugin.java | 140 ++++- .../common}/CzechAnalyzerProvider.java | 6 +- .../common}/DanishAnalyzerProvider.java | 6 +- .../common}/DutchAnalyzerProvider.java | 6 +- .../common}/EnglishAnalyzerProvider.java | 6 +- .../common}/FinnishAnalyzerProvider.java | 6 +- .../common}/FrenchAnalyzerProvider.java | 6 +- .../common}/GalicianAnalyzerProvider.java | 6 +- .../common}/GermanAnalyzerProvider.java | 6 +- .../analysis/common}/SnowballAnalyzer.java | 6 +- .../common}/SnowballAnalyzerProvider.java | 6 +- .../common}/SnowballAnalyzerTests.java | 10 +- .../test/analysis-common/20_analyzers.yml | 523 ++++++++++++++++++ .../test/search.query/40_query_string.yml | 58 ++ .../TokenCountFieldMapperIntegrationIT.java | 15 +- .../test/update_by_query/30_new_fields.yml | 7 +- .../test/count/20_query_string.yml | 8 - .../test/explain/30_query_string.yml | 10 - .../20_query_string.yml | 8 - .../test/search/60_query_string.yml | 8 - .../index/analysis/AnalysisRegistry.java | 2 +- .../indices/analysis/AnalysisModule.java | 36 -- .../indices/analysis/PreBuiltAnalyzers.java | 179 ------ .../index/analysis/PreBuiltAnalyzerTests.java | 19 +- .../index/mapper/TextFieldMapperTests.java | 40 +- .../query/QueryStringQueryBuilderTests.java | 4 +- .../query/SimpleQueryStringBuilderTests.java | 4 +- .../highlight/HighlighterSearchIT.java | 53 +- .../search/query/SimpleQueryStringIT.java | 61 +- .../validate/SimpleValidateQueryIT.java | 2 +- 39 files changed, 955 insertions(+), 349 deletions(-) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/ArabicAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/ArmenianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/BasqueAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/BengaliAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/BrazilianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/BulgarianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/CatalanAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/ChineseAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/CjkAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/CzechAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/DanishAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/DutchAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/EnglishAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/FinnishAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/FrenchAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/GalicianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/GermanAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/SnowballAnalyzer.java (95%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/SnowballAnalyzerProvider.java (92%) rename {server/src/test/java/org/elasticsearch/index/analysis => modules/analysis-common/src/test/java/org/elasticsearch/analysis/common}/SnowballAnalyzerTests.java (97%) create mode 100644 modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/40_query_string.yml diff --git a/server/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ArabicAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ArabicAnalyzerProvider.java index 10d8f22bde7e8..11e452ddae8db 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ArabicAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.ar.ArabicAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final ArabicAnalyzer arabicAnalyzer; - public ArabicAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + ArabicAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); arabicAnalyzer = new ArabicAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, ArabicAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ArmenianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ArmenianAnalyzerProvider.java index 6c5193bbb773a..1e99a56979564 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ArmenianAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.hy.ArmenianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class ArmenianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final ArmenianAnalyzer analyzer; - public ArmenianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + ArmenianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new ArmenianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, ArmenianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BasqueAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BasqueAnalyzerProvider.java index d55e3fdcba480..b28dec592309c 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BasqueAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.eu.BasqueAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class BasqueAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final BasqueAnalyzer analyzer; - public BasqueAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + BasqueAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new BasqueAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, BasqueAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/BengaliAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BengaliAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/BengaliAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BengaliAnalyzerProvider.java index ba11cde8fa190..8136ace4224f0 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/BengaliAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BengaliAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.bn.BengaliAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class BengaliAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final BengaliAnalyzer analyzer; - public BengaliAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + BengaliAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new BengaliAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, BengaliAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BrazilianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BrazilianAnalyzerProvider.java index 7ca11542ac632..05f72a6c0793f 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BrazilianAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.br.BrazilianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class BrazilianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final BrazilianAnalyzer analyzer; - public BrazilianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + BrazilianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new BrazilianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, BrazilianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BulgarianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BulgarianAnalyzerProvider.java index f64987d95e836..0463ddb3b0ef2 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/BulgarianAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.bg.BulgarianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final BulgarianAnalyzer analyzer; - public BulgarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + BulgarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new BulgarianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, BulgarianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CatalanAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CatalanAnalyzerProvider.java index ff0f9e323097d..591a352c7215c 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CatalanAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.ca.CatalanAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final CatalanAnalyzer analyzer; - public CatalanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + CatalanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new CatalanAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, CatalanAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java index 10e6f0dc42f1e..01b529188c6f0 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java @@ -17,12 +17,13 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; /** * Only for old indexes @@ -31,16 +32,16 @@ public class ChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final CJKAnalyzer analyzer; - public CjkAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + CjkAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); CharArraySet stopWords = Analysis.parseStopWords( env, indexSettings.getIndexVersionCreated(), settings, CJKAnalyzer.getDefaultStopSet()); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 433bef902c1a1..24dce7abcf370 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -24,11 +24,17 @@ import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.ar.ArabicAnalyzer; import org.apache.lucene.analysis.ar.ArabicNormalizationFilter; import org.apache.lucene.analysis.ar.ArabicStemFilter; +import org.apache.lucene.analysis.bg.BulgarianAnalyzer; +import org.apache.lucene.analysis.bn.BengaliAnalyzer; import org.apache.lucene.analysis.bn.BengaliNormalizationFilter; +import org.apache.lucene.analysis.br.BrazilianAnalyzer; import org.apache.lucene.analysis.br.BrazilianStemFilter; +import org.apache.lucene.analysis.ca.CatalanAnalyzer; import org.apache.lucene.analysis.charfilter.HTMLStripCharFilter; +import org.apache.lucene.analysis.cjk.CJKAnalyzer; import org.apache.lucene.analysis.cjk.CJKBigramFilter; import org.apache.lucene.analysis.cjk.CJKWidthFilter; import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter; @@ -40,14 +46,22 @@ import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.UpperCaseFilter; import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.apache.lucene.analysis.cz.CzechAnalyzer; import org.apache.lucene.analysis.cz.CzechStemFilter; +import org.apache.lucene.analysis.da.DanishAnalyzer; +import org.apache.lucene.analysis.de.GermanAnalyzer; import org.apache.lucene.analysis.de.GermanNormalizationFilter; import org.apache.lucene.analysis.de.GermanStemFilter; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.en.KStemFilter; import org.apache.lucene.analysis.en.PorterStemFilter; +import org.apache.lucene.analysis.eu.BasqueAnalyzer; import org.apache.lucene.analysis.fa.PersianNormalizationFilter; +import org.apache.lucene.analysis.fi.FinnishAnalyzer; import org.apache.lucene.analysis.fr.FrenchAnalyzer; +import org.apache.lucene.analysis.gl.GalicianAnalyzer; import org.apache.lucene.analysis.hi.HindiNormalizationFilter; +import org.apache.lucene.analysis.hy.ArmenianAnalyzer; import org.apache.lucene.analysis.in.IndicNormalizationFilter; import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; @@ -64,6 +78,7 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.apache.lucene.analysis.ngram.NGramTokenizer; +import org.apache.lucene.analysis.nl.DutchAnalyzer; import org.apache.lucene.analysis.path.PathHierarchyTokenizer; import org.apache.lucene.analysis.pattern.PatternTokenizer; import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter; @@ -73,6 +88,7 @@ import org.apache.lucene.analysis.snowball.SnowballFilter; import org.apache.lucene.analysis.standard.ClassicFilter; import org.apache.lucene.analysis.standard.ClassicTokenizer; +import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer; import org.apache.lucene.analysis.th.ThaiTokenizer; import org.apache.lucene.analysis.tr.ApostropheFilter; @@ -113,6 +129,24 @@ public Map>> getAn analyzers.put("fingerprint", FingerprintAnalyzerProvider::new); analyzers.put("standard_html_strip", StandardHtmlStripAnalyzerProvider::new); analyzers.put("pattern", PatternAnalyzerProvider::new); + analyzers.put("snowball", SnowballAnalyzerProvider::new); + analyzers.put("arabic", ArabicAnalyzerProvider::new); + analyzers.put("armenian", ArmenianAnalyzerProvider::new); + analyzers.put("basque", BasqueAnalyzerProvider::new); + analyzers.put("bengali", BengaliAnalyzerProvider::new); + analyzers.put("brazilian", BrazilianAnalyzerProvider::new); + analyzers.put("bulgarian", BulgarianAnalyzerProvider::new); + analyzers.put("catalan", CatalanAnalyzerProvider::new); + analyzers.put("chinese", ChineseAnalyzerProvider::new); + analyzers.put("cjk", CjkAnalyzerProvider::new); + analyzers.put("czech", CzechAnalyzerProvider::new); + analyzers.put("danish", DanishAnalyzerProvider::new); + analyzers.put("dutch", DutchAnalyzerProvider::new); + analyzers.put("english", EnglishAnalyzerProvider::new); + analyzers.put("finnish", FinnishAnalyzerProvider::new); + analyzers.put("french", FrenchAnalyzerProvider::new); + analyzers.put("galician", GalicianAnalyzerProvider::new); + analyzers.put("german", GermanAnalyzerProvider::new); return analyzers; } @@ -213,10 +247,108 @@ public Map> getTokenizers() { @Override public List getPreBuiltAnalyzerProviderFactories() { List analyzers = new ArrayList<>(); - analyzers.add(new PreBuiltAnalyzerProviderFactory("standard_html_strip", CachingStrategy.LUCENE, - version -> new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET))); - analyzers.add(new PreBuiltAnalyzerProviderFactory("pattern", CachingStrategy.ELASTICSEARCH, version -> - new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET))); + analyzers.add(new PreBuiltAnalyzerProviderFactory("standard_html_strip", CachingStrategy.LUCENE, version -> { + Analyzer a = new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("pattern", CachingStrategy.ELASTICSEARCH, version -> { + Analyzer a = new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, + CharArraySet.EMPTY_SET); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("snowball", CachingStrategy.LUCENE, version -> { + Analyzer a = new SnowballAnalyzer("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("arabic", CachingStrategy.LUCENE, version -> { + Analyzer a = new ArabicAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("armenian", CachingStrategy.LUCENE, version -> { + Analyzer a = new ArmenianAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("basque", CachingStrategy.LUCENE, version -> { + Analyzer a = new BasqueAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("bengali", CachingStrategy.LUCENE, version -> { + Analyzer a = new BengaliAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("brazilian", CachingStrategy.LUCENE, version -> { + Analyzer a = new BrazilianAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("bulgarian", CachingStrategy.LUCENE, version -> { + Analyzer a = new BulgarianAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("catalan", CachingStrategy.LUCENE, version -> { + Analyzer a = new CatalanAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.LUCENE, version -> { + // only for old indices, best effort + Analyzer a = new StandardAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("cjk", CachingStrategy.LUCENE, version -> { + Analyzer a = new CJKAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("czech", CachingStrategy.LUCENE, version -> { + Analyzer a = new CzechAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("danish", CachingStrategy.LUCENE, version -> { + Analyzer a = new DanishAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("dutch", CachingStrategy.LUCENE, version -> { + Analyzer a = new DutchAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("english", CachingStrategy.LUCENE, version -> { + Analyzer a = new EnglishAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("finnish", CachingStrategy.LUCENE, version -> { + Analyzer a = new FinnishAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("french", CachingStrategy.LUCENE, version -> { + Analyzer a = new FrenchAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("galician", CachingStrategy.LUCENE, version -> { + Analyzer a = new GalicianAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); + analyzers.add(new PreBuiltAnalyzerProviderFactory("german", CachingStrategy.LUCENE, version -> { + Analyzer a = new GermanAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + })); return analyzers; } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CzechAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CzechAnalyzerProvider.java index 27d20beef4325..9dd75fbf3b882 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CzechAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.cz.CzechAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final CzechAnalyzer analyzer; - public CzechAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + CzechAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new CzechAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, CzechAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/DanishAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/DanishAnalyzerProvider.java index 897997992b24c..66a789247334d 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/DanishAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.da.DanishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class DanishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final DanishAnalyzer analyzer; - public DanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + DanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new DanishAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, DanishAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/DutchAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/DutchAnalyzerProvider.java index eaa69e939cb1e..e442ff4fa9367 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/DutchAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.nl.DutchAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class DutchAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final DutchAnalyzer analyzer; - public DutchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + DutchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new DutchAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, DutchAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EnglishAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EnglishAnalyzerProvider.java index 952f43296ffeb..ba30d02e20b9e 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EnglishAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class EnglishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final EnglishAnalyzer analyzer; - public EnglishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + EnglishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new EnglishAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, EnglishAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FinnishAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FinnishAnalyzerProvider.java index b914fab66fda7..ecd4e19dfad46 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FinnishAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.fi.FinnishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class FinnishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final FinnishAnalyzer analyzer; - public FinnishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + FinnishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new FinnishAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, FinnishAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FrenchAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FrenchAnalyzerProvider.java index 96cdb8ed03e2c..84ec03ebe17f0 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FrenchAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.fr.FrenchAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class FrenchAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final FrenchAnalyzer analyzer; - public FrenchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + FrenchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new FrenchAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, FrenchAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/GalicianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/GalicianAnalyzerProvider.java index 1dc6de99a4d64..58bb20c7b86b1 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/GalicianAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.gl.GalicianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class GalicianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final GalicianAnalyzer analyzer; - public GalicianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + GalicianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new GalicianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, GalicianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/GermanAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/GermanAnalyzerProvider.java index 52a116acca5ac..65ff5fe7a5e18 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/GermanAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.de.GermanAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class GermanAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final GermanAnalyzer analyzer; - public GermanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + GermanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new GermanAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, GermanAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java similarity index 95% rename from server/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzer.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java index 1a096b8fa4b9f..5dbe902fe1500 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java @@ -1,4 +1,4 @@ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; /* * Licensed to Elasticsearch under one or more contributor @@ -48,12 +48,12 @@ public final class SnowballAnalyzer extends Analyzer { private CharArraySet stopSet; /** Builds the named analyzer with no stop words. */ - public SnowballAnalyzer(String name) { + SnowballAnalyzer(String name) { this.name = name; } /** Builds the named analyzer with the given stop words. */ - public SnowballAnalyzer(String name, CharArraySet stopWords) { + SnowballAnalyzer(String name, CharArraySet stopWords) { this(name); stopSet = CharArraySet.unmodifiableSet(CharArraySet.copy(stopWords)); } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java similarity index 92% rename from server/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java index bd3201e3c8a54..e5584ba6b6d45 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.core.StopAnalyzer; @@ -26,6 +26,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; import java.util.HashMap; import java.util.Map; @@ -60,7 +62,7 @@ public class SnowballAnalyzerProvider extends AbstractIndexAnalyzerProvider>> setupAnalyzers(List analyzers.register("stop", StopAnalyzerProvider::new); analyzers.register("whitespace", WhitespaceAnalyzerProvider::new); analyzers.register("keyword", KeywordAnalyzerProvider::new); - analyzers.register("snowball", SnowballAnalyzerProvider::new); - analyzers.register("arabic", ArabicAnalyzerProvider::new); - analyzers.register("armenian", ArmenianAnalyzerProvider::new); - analyzers.register("basque", BasqueAnalyzerProvider::new); - analyzers.register("bengali", BengaliAnalyzerProvider::new); - analyzers.register("brazilian", BrazilianAnalyzerProvider::new); - analyzers.register("bulgarian", BulgarianAnalyzerProvider::new); - analyzers.register("catalan", CatalanAnalyzerProvider::new); - analyzers.register("chinese", ChineseAnalyzerProvider::new); - analyzers.register("cjk", CjkAnalyzerProvider::new); - analyzers.register("czech", CzechAnalyzerProvider::new); - analyzers.register("danish", DanishAnalyzerProvider::new); - analyzers.register("dutch", DutchAnalyzerProvider::new); - analyzers.register("english", EnglishAnalyzerProvider::new); - analyzers.register("finnish", FinnishAnalyzerProvider::new); - analyzers.register("french", FrenchAnalyzerProvider::new); - analyzers.register("galician", GalicianAnalyzerProvider::new); - analyzers.register("german", GermanAnalyzerProvider::new); analyzers.register("greek", GreekAnalyzerProvider::new); analyzers.register("hindi", HindiAnalyzerProvider::new); analyzers.register("hungarian", HungarianAnalyzerProvider::new); diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java index 18cc247b84493..0e9aed3c142d9 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java @@ -20,37 +20,21 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; -import org.apache.lucene.analysis.ar.ArabicAnalyzer; -import org.apache.lucene.analysis.bg.BulgarianAnalyzer; -import org.apache.lucene.analysis.bn.BengaliAnalyzer; -import org.apache.lucene.analysis.br.BrazilianAnalyzer; -import org.apache.lucene.analysis.ca.CatalanAnalyzer; -import org.apache.lucene.analysis.cjk.CJKAnalyzer; import org.apache.lucene.analysis.ckb.SoraniAnalyzer; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.SimpleAnalyzer; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; -import org.apache.lucene.analysis.cz.CzechAnalyzer; -import org.apache.lucene.analysis.da.DanishAnalyzer; -import org.apache.lucene.analysis.de.GermanAnalyzer; import org.apache.lucene.analysis.el.GreekAnalyzer; -import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.es.SpanishAnalyzer; -import org.apache.lucene.analysis.eu.BasqueAnalyzer; import org.apache.lucene.analysis.fa.PersianAnalyzer; -import org.apache.lucene.analysis.fi.FinnishAnalyzer; -import org.apache.lucene.analysis.fr.FrenchAnalyzer; import org.apache.lucene.analysis.ga.IrishAnalyzer; -import org.apache.lucene.analysis.gl.GalicianAnalyzer; import org.apache.lucene.analysis.hi.HindiAnalyzer; import org.apache.lucene.analysis.hu.HungarianAnalyzer; -import org.apache.lucene.analysis.hy.ArmenianAnalyzer; import org.apache.lucene.analysis.id.IndonesianAnalyzer; import org.apache.lucene.analysis.it.ItalianAnalyzer; import org.apache.lucene.analysis.lt.LithuanianAnalyzer; import org.apache.lucene.analysis.lv.LatvianAnalyzer; -import org.apache.lucene.analysis.nl.DutchAnalyzer; import org.apache.lucene.analysis.no.NorwegianAnalyzer; import org.apache.lucene.analysis.pt.PortugueseAnalyzer; import org.apache.lucene.analysis.ro.RomanianAnalyzer; @@ -61,7 +45,6 @@ import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.elasticsearch.Version; -import org.elasticsearch.index.analysis.SnowballAnalyzer; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import java.util.Locale; @@ -129,168 +112,6 @@ protected Analyzer create(Version version) { } }, - SNOWBALL { - @Override - protected Analyzer create(Version version) { - Analyzer analyzer = new SnowballAnalyzer("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET); - analyzer.setVersion(version.luceneVersion); - return analyzer; - } - }, - - ARABIC { - @Override - protected Analyzer create(Version version) { - Analyzer a = new ArabicAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - ARMENIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new ArmenianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - BASQUE { - @Override - protected Analyzer create(Version version) { - Analyzer a = new BasqueAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - BENGALI { - @Override - protected Analyzer create(Version version) { - Analyzer a = new BengaliAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - BRAZILIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new BrazilianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - BULGARIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new BulgarianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - CATALAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new CatalanAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - CHINESE(CachingStrategy.ONE) { - @Override - protected Analyzer create(Version version) { - Analyzer a = new StandardAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - CJK { - @Override - protected Analyzer create(Version version) { - Analyzer a = new CJKAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - CZECH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new CzechAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - DUTCH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new DutchAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - DANISH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new DanishAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - ENGLISH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new EnglishAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - FINNISH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new FinnishAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - FRENCH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new FrenchAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - GALICIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new GalicianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - GERMAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new GermanAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - GREEK { @Override protected Analyzer create(Version version) { diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index d0ffdbe229dd6..8c4879fd35e82 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -61,14 +61,17 @@ public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { } public void testThatInstancesAreCachedAndReused() { - assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT), - PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT)); - // same lucene version should be cached - assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_1), - PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_2)); - - assertNotSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_0_0), - PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_0_1)); + assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT), + PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT)); + // same es version should be cached + assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_2_1), + PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_2_1)); + assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_0_0), + PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_0_1)); + + // Same Lucene version should be cached: + assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_5_2_1), + PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_5_2_2)); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 9a6c264ce3688..51668ec21ad5b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -55,7 +55,6 @@ import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; import org.elasticsearch.index.query.MatchPhraseQueryBuilder; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.search.MatchQuery; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -87,6 +86,9 @@ public void setup() { .putList("index.analysis.filter.mySynonyms.synonyms", Collections.singletonList("car, auto")) .put("index.analysis.analyzer.synonym.tokenizer", "standard") .put("index.analysis.analyzer.synonym.filter", "mySynonyms") + // Stop filter remains in server as it is part of lucene-core + .put("index.analysis.analyzer.my_stop_analyzer.tokenizer", "standard") + .put("index.analysis.analyzer.my_stop_analyzer.filter", "stop") .build(); indexService = createIndex("test", settings); parser = indexService.mapperService().documentMapperParser(); @@ -621,7 +623,7 @@ public void testIndexPrefixIndexTypes() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes").endObject() .field("index_options", "offsets") .endObject().endObject().endObject().endObject()); @@ -637,7 +639,7 @@ public void testIndexPrefixIndexTypes() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes").endObject() .field("index_options", "freqs") .endObject().endObject().endObject().endObject()); @@ -654,7 +656,7 @@ public void testIndexPrefixIndexTypes() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes").endObject() .field("index_options", "positions") .endObject().endObject().endObject().endObject()); @@ -675,7 +677,7 @@ public void testIndexPrefixIndexTypes() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes").endObject() .field("term_vector", "with_positions_offsets") .endObject().endObject().endObject().endObject()); @@ -696,7 +698,7 @@ public void testIndexPrefixIndexTypes() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes").endObject() .field("term_vector", "with_positions") .endObject().endObject().endObject().endObject()); @@ -725,7 +727,7 @@ public void testFastPhraseMapping() throws IOException { .startObject("properties") .startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "my_stop_analyzer") .field("index_phrases", true) .endObject() .startObject("synfield") @@ -742,20 +744,20 @@ public void testFastPhraseMapping() throws IOException { queryShardContext.getMapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, true); Query q = new MatchPhraseQueryBuilder("field", "two words").toQuery(queryShardContext); - assertThat(q, is(new PhraseQuery("field._index_phrase", "two word"))); + assertThat(q, is(new PhraseQuery("field._index_phrase", "two words"))); Query q2 = new MatchPhraseQueryBuilder("field", "three words here").toQuery(queryShardContext); - assertThat(q2, is(new PhraseQuery("field._index_phrase", "three word", "word here"))); + assertThat(q2, is(new PhraseQuery("field._index_phrase", "three words", "words here"))); Query q3 = new MatchPhraseQueryBuilder("field", "two words").slop(1).toQuery(queryShardContext); - assertThat(q3, is(new PhraseQuery(1, "field", "two", "word"))); + assertThat(q3, is(new PhraseQuery(1, "field", "two", "words"))); Query q4 = new MatchPhraseQueryBuilder("field", "singleton").toQuery(queryShardContext); assertThat(q4, is(new TermQuery(new Term("field", "singleton")))); Query q5 = new MatchPhraseQueryBuilder("field", "sparkle a stopword").toQuery(queryShardContext); assertThat(q5, - is(new PhraseQuery.Builder().add(new Term("field", "sparkl")).add(new Term("field", "stopword"), 2).build())); + is(new PhraseQuery.Builder().add(new Term("field", "sparkle")).add(new Term("field", "stopword"), 2).build())); Query q6 = new MatchPhraseQueryBuilder("synfield", "motor car").toQuery(queryShardContext); assertThat(q6, is(new MultiPhraseQuery.Builder() @@ -778,7 +780,7 @@ public void testFastPhraseMapping() throws IOException { CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); ts.reset(); assertTrue(ts.incrementToken()); - assertEquals("some english", termAtt.toString()); + assertEquals("Some English", termAtt.toString()); } { @@ -821,7 +823,7 @@ public void testIndexPrefixMapping() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes") .field("min_chars", 1) .field("max_chars", 10) @@ -855,7 +857,7 @@ public void testIndexPrefixMapping() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes").endObject() .endObject().endObject() .endObject().endObject()); @@ -880,7 +882,7 @@ public void testIndexPrefixMapping() throws IOException { String illegalMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes") .field("min_chars", 1) .field("max_chars", 10) @@ -903,7 +905,7 @@ public void testIndexPrefixMapping() throws IOException { String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes") .field("min_chars", 11) .field("max_chars", 10) @@ -920,7 +922,7 @@ public void testIndexPrefixMapping() throws IOException { String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes") .field("min_chars", 0) .field("max_chars", 10) @@ -937,7 +939,7 @@ public void testIndexPrefixMapping() throws IOException { String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .startObject("index_prefixes") .field("min_chars", 1) .field("max_chars", 25) @@ -954,7 +956,7 @@ public void testIndexPrefixMapping() throws IOException { String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .field("analyzer", "english") + .field("analyzer", "standard") .field("index_prefixes", (String) null) .endObject().endObject() .endObject().endObject()); diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index cbaf9e0b7e604..ae917a9499c71 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -1306,7 +1306,7 @@ public void testWithStopWords() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); Query query = new QueryStringQueryBuilder("the quick fox") .field(STRING_FIELD_NAME) - .analyzer("english") + .analyzer("stop") .toQuery(createShardContext()); BooleanQuery expected = new BooleanQuery.Builder() .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), Occur.SHOULD) @@ -1319,7 +1319,7 @@ public void testWithPrefixStopWords() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); Query query = new QueryStringQueryBuilder("the* quick fox") .field(STRING_FIELD_NAME) - .analyzer("english") + .analyzer("stop") .toQuery(createShardContext()); BooleanQuery expected = new BooleanQuery.Builder() .add(new PrefixQuery(new Term(STRING_FIELD_NAME, "the")), Occur.SHOULD) diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index ceb75f26d7711..b51e2c22a90eb 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -630,7 +630,7 @@ public void testWithStopWords() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); Query query = new SimpleQueryStringBuilder("the quick fox") .field(STRING_FIELD_NAME) - .analyzer("english") + .analyzer("stop") .toQuery(createShardContext()); BooleanQuery expected = new BooleanQuery.Builder() .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) @@ -643,7 +643,7 @@ public void testWithPrefixStopWords() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); Query query = new SimpleQueryStringBuilder("the* quick fox") .field(STRING_FIELD_NAME) - .analyzer("english") + .analyzer("stop") .toQuery(createShardContext()); BooleanQuery expected = new BooleanQuery.Builder() .add(new PrefixQuery(new Term(STRING_FIELD_NAME, "the")), BooleanClause.Occur.SHOULD) diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 717bab12ea5cb..35c5a19cc2e8c 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -22,6 +22,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -36,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; import org.elasticsearch.index.analysis.AnalyzerProvider; +import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -66,9 +70,11 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; +import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.elasticsearch.client.Requests.searchRequest; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -113,7 +119,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockWhitespacePlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockAnalysisPlugin.class); } public void testHighlightingWithStoredKeyword() throws IOException { @@ -765,14 +771,19 @@ public void testMatchedFieldsFvhNoRequireFieldMatch() throws Exception { } private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception { + Settings.Builder settings = Settings.builder(); + settings.put(indexSettings()); + settings.put("index.analysis.analyzer.mock_english.tokenizer", "standard"); + settings.put("index.analysis.analyzer.mock_english.filter", "mock_snowball"); assertAcked(prepareCreate("test") + .setSettings(settings) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties") .startObject("foo") .field("type", "text") .field("term_vector", "with_positions_offsets") .field("store", true) - .field("analyzer", "english") + .field("analyzer", "mock_english") .startObject("fields") .startObject("plain") .field("type", "text") @@ -785,7 +796,7 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .field("type", "text") .field("term_vector", "with_positions_offsets") .field("store", true) - .field("analyzer", "english") + .field("analyzer", "mock_english") .startObject("fields") .startObject("plain") .field("type", "text") @@ -2819,7 +2830,7 @@ public void testSynonyms() throws IOException { assertAcked(prepareCreate("test").setSettings(builder.build()) .addMapping("type1", "field1", "type=text,term_vector=with_positions_offsets,search_analyzer=synonym," + - "analyzer=english,index_options=offsets")); + "analyzer=standard,index_options=offsets")); ensureGreen(); client().prepareIndex("test", "type1", "0").setSource( @@ -2983,7 +2994,39 @@ public void testWithNormalizer() throws Exception { } } - public static class MockWhitespacePlugin extends Plugin implements AnalysisPlugin { + public static class MockAnalysisPlugin extends Plugin implements AnalysisPlugin { + + public final class MockSnowBall extends TokenFilter { + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + + /** Sole constructor. */ + MockSnowBall(TokenStream in) { + super(in); + } + + @Override + public boolean incrementToken() throws IOException { + if (input.incrementToken()) { + final char[] buffer = termAtt.buffer(); + final int length = termAtt.length(); + if (buffer[length - 1] == 's') { + termAtt.setLength(length - 1); + } + if (length > 3) { + if (buffer[length - 1] == 'g' && buffer[length - 2] == 'n' && buffer[length - 3] == 'i') { + termAtt.setLength(length- 3); + } + } + return true; + } else + return false; + } + } + + @Override + public List getPreConfiguredTokenFilters() { + return singletonList(PreConfiguredTokenFilter.singleton("mock_snowball", false, MockSnowBall::new)); + } @Override public Map>> getAnalyzers() { diff --git a/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 502b10e9a43dd..3ecb34861eb06 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -19,17 +19,32 @@ package org.elasticsearch.search.query; +import org.apache.lucene.analysis.CharacterUtils; +import org.apache.lucene.analysis.MockLowerCaseFilter; +import org.apache.lucene.analysis.MockTokenizer; +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.index.analysis.MultiTermAwareComponent; +import org.elasticsearch.index.analysis.PreConfiguredCharFilter; +import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; +import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.SimpleQueryStringFlag; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -38,14 +53,19 @@ import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; +import java.io.Reader; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; @@ -68,11 +88,15 @@ public class SimpleQueryStringIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created + return Arrays.asList(MockAnalysisPlugin.class, InternalSettingsPlugin.class); // uses index.version.created } public void testSimpleQueryString() throws ExecutionException, InterruptedException { - createIndex("test"); + Settings.Builder settings = Settings.builder(); + settings.put(indexSettings()); + settings.put("index.analysis.analyzer.mock_snowball.tokenizer", "standard"); + settings.put("index.analysis.analyzer.mock_snowball.filter", "mock_snowball"); + createIndex("test", settings.build()); indexRandom(true, false, client().prepareIndex("test", "type1", "1").setSource("body", "foo"), client().prepareIndex("test", "type1", "2").setSource("body", "bar"), @@ -104,7 +128,7 @@ public void testSimpleQueryString() throws ExecutionException, InterruptedExcept assertSearchHits(searchResponse, "4", "5"); searchResponse = client().prepareSearch().setQuery( - simpleQueryStringQuery("eggplants").analyzer("snowball")).get(); + simpleQueryStringQuery("eggplants").analyzer("mock_snowball")).get(); assertHitCount(searchResponse, 1L); assertFirstHit(searchResponse, hasId("4")); @@ -308,7 +332,7 @@ public void testSimpleQueryStringAnalyzeWildcard() throws ExecutionException, In .startObject("properties") .startObject("location") .field("type", "text") - .field("analyzer", "german") + .field("analyzer", "standard") .endObject() .endObject() .endObject() @@ -569,4 +593,33 @@ private void assertHits(SearchHits hits, String... ids) { } assertThat(hitIds, containsInAnyOrder(ids)); } + + public static class MockAnalysisPlugin extends Plugin implements AnalysisPlugin { + + public final class MockSnowBall extends TokenFilter { + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + + /** Sole constructor. */ + MockSnowBall(TokenStream in) { + super(in); + } + + @Override + public boolean incrementToken() throws IOException { + if (input.incrementToken()) { + char[] buffer = termAtt.buffer(); + if (buffer[termAtt.length() - 1] == 's') { + termAtt.setLength(termAtt.length() - 1); + } + return true; + } else + return false; + } + } + + @Override + public List getPreConfiguredTokenFilters() { + return singletonList(PreConfiguredTokenFilter.singleton("mock_snowball", false, MockSnowBall::new)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 66fdf81744410..36902b55f5688 100644 --- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -87,7 +87,7 @@ public void testExplainValidateQueryTwoNodes() throws IOException { .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("foo").field("type", "text").endObject() .startObject("bar").field("type", "integer").endObject() - .startObject("baz").field("type", "text").field("analyzer", "snowball").endObject() + .startObject("baz").field("type", "text").field("analyzer", "standard").endObject() .startObject("pin").startObject("properties").startObject("location").field("type", "geo_point").endObject().endObject().endObject() .endObject().endObject().endObject()) .execute().actionGet(); From 10e2013d3a1d046b3c80b13aeba936922dc6b5ef Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 6 Jun 2018 13:39:02 +0200 Subject: [PATCH 03/24] Move RestGetSettingsAction to RestToXContentListener (#31101) --- .../admin/indices/RestGetSettingsAction.java | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index d9fa50cf9410d..6dead8060425e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -20,23 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; public class RestGetSettingsAction extends BaseRestHandler { @@ -68,15 +63,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); getSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSettingsRequest.masterNodeTimeout())); - - return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(GetSettingsResponse getSettingsResponse, XContentBuilder builder) throws Exception { - getSettingsResponse.toXContent(builder, request); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestToXContentListener<>(channel)); } - } From 67c2868e436306cc8e2640019ef6573847466507 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 6 Jun 2018 16:13:02 +0200 Subject: [PATCH 04/24] Remove RestGetAllMappingsAction (#31129) We currently have a specific REST action to retrieve all indices and types mappings, which used internally the get index API. This doesn't seem to be required anymore though as the existing RestGetMappingAction could as well take the requests with no indices and types specified. This commit removes the RestGetAllMappingsAction in favour of using RestGetMappingAction also for requests that don't specify indices nor types. --- .../elasticsearch/action/ActionModule.java | 12 +- .../indices/RestGetAllMappingsAction.java | 109 ------------------ .../admin/indices/RestGetMappingAction.java | 5 +- 3 files changed, 7 insertions(+), 119 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index fa4d751a54aed..235effdcf4492 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -206,6 +206,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.persistent.CompletionPersistentTaskAction; +import org.elasticsearch.persistent.RemovePersistentTaskAction; +import org.elasticsearch.persistent.StartPersistentTaskAction; +import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ActionPlugin.ActionHandler; import org.elasticsearch.rest.RestController; @@ -241,7 +245,6 @@ import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction; -import org.elasticsearch.rest.action.admin.indices.RestResizeHandler; import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction; import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction; import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction; @@ -252,7 +255,6 @@ import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction; import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction; -import org.elasticsearch.rest.action.admin.indices.RestGetAllMappingsAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction; @@ -269,6 +271,7 @@ import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction; import org.elasticsearch.rest.action.admin.indices.RestRecoveryAction; import org.elasticsearch.rest.action.admin.indices.RestRefreshAction; +import org.elasticsearch.rest.action.admin.indices.RestResizeHandler; import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction; import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; @@ -313,10 +316,6 @@ import org.elasticsearch.rest.action.search.RestSearchScrollAction; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.usage.UsageService; -import org.elasticsearch.persistent.CompletionPersistentTaskAction; -import org.elasticsearch.persistent.RemovePersistentTaskAction; -import org.elasticsearch.persistent.StartPersistentTaskAction; -import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import java.util.ArrayList; import java.util.Collections; @@ -556,7 +555,6 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestSnapshotsStatusAction(settings, restController)); registerHandler.accept(new RestGetAllAliasesAction(settings, restController)); - registerHandler.accept(new RestGetAllMappingsAction(settings, restController)); registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestIndicesStatsAction(settings, restController)); registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java deleted file mode 100644 index 9892717cd779b..0000000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.indices; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.xcontent.ToXContent.Params; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; - -import java.io.IOException; -import java.util.List; -import java.util.Set; - -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestRequest.Method.HEAD; -import static org.elasticsearch.rest.RestStatus.OK; - -/** - * The REST handler for retrieving all mappings - */ -public class RestGetAllMappingsAction extends BaseRestHandler { - - public RestGetAllMappingsAction(final Settings settings, final RestController controller) { - super(settings); - controller.registerHandler(GET, "/_mapping", this); - controller.registerHandler(GET, "/_mappings", this); - } - - @Override - public String getName() { - return "get_all_mappings_action"; - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final GetIndexRequest getIndexRequest = new GetIndexRequest(); - getIndexRequest.indices(Strings.EMPTY_ARRAY); - getIndexRequest.features(Feature.MAPPINGS); - getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); - getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); - getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); - return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(final GetIndexResponse response, final XContentBuilder builder) throws Exception { - builder.startObject(); - { - for (final String index : response.indices()) { - builder.startObject(index); - { - writeMappings(response.mappings().get(index), builder); - } - builder.endObject(); - } - } - builder.endObject(); - - return new BytesRestResponse(OK, builder); - } - - private void writeMappings(final ImmutableOpenMap mappings, - final XContentBuilder builder) throws IOException { - builder.startObject("mappings"); - { - for (final ObjectObjectCursor typeEntry : mappings) { - builder.field(typeEntry.key); - builder.map(typeEntry.value.sourceAsMap()); - } - } - builder.endObject(); - } - }); - } - -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 97bd23f1a9b71..1d4e31d01caa1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -20,8 +20,6 @@ package org.elasticsearch.rest.action.admin.indices; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -56,12 +54,13 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; -import static org.elasticsearch.rest.RestStatus.OK; public class RestGetMappingAction extends BaseRestHandler { public RestGetMappingAction(final Settings settings, final RestController controller) { super(settings); + controller.registerHandler(GET, "/_mapping", this); + controller.registerHandler(GET, "/_mappings", this); controller.registerHandler(GET, "/{index}/{type}/_mapping", this); controller.registerHandler(GET, "/{index}/_mappings", this); controller.registerHandler(GET, "/{index}/_mapping", this); From 4a280ad377d5f3322160ac22b822f28e3b6085b5 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 6 Jun 2018 23:17:45 +0200 Subject: [PATCH 05/24] Add high-level client methods that accept RequestOptions (#31069) With #30490 we have introduced a new way to provide request options whenever sending a request using the high-level REST client. Before you could provide headers as the last argument varargs of each API method, now you can provide `RequestOptions` that in the future will allow to provide more options which can be specified per request. This commit deprecates all of the client methods that accept a `Header` varargs argument in favour of new methods that accept `RequestOptions` instead. For some API we don't even go through deprecation given that they were not released since they were added, hence in that case we can just move them to the new method. --- .../elasticsearch/client/ClusterClient.java | 36 +- .../elasticsearch/client/IndicesClient.java | 698 +++++++++++++++--- .../elasticsearch/client/IngestClient.java | 76 +- .../client/RequestConverters.java | 2 +- .../client/RestHighLevelClient.java | 455 ++++++++++-- .../elasticsearch/client/SnapshotClient.java | 93 ++- .../org/elasticsearch/client/TasksClient.java | 28 +- .../elasticsearch/client/BulkProcessorIT.java | 13 +- .../client/BulkProcessorRetryIT.java | 4 +- .../elasticsearch/client/ClusterClientIT.java | 3 + .../java/org/elasticsearch/client/CrudIT.java | 171 +++-- .../client/ESRestHighLevelClientTestCase.java | 45 +- .../elasticsearch/client/IndicesClientIT.java | 102 ++- .../elasticsearch/client/PingAndInfoIT.java | 4 +- .../org/elasticsearch/client/RankEvalIT.java | 7 +- .../client/RestHighLevelClientTests.java | 25 +- .../org/elasticsearch/client/SearchIT.java | 56 +- .../documentation/CRUDDocumentationIT.java | 102 +-- .../ClusterClientDocumentationIT.java | 16 +- .../IndicesClientDocumentationIT.java | 161 ++-- .../IngestClientDocumentationIT.java | 13 +- .../MigrationDocumentationIT.java | 17 +- .../MiscellaneousDocumentationIT.java | 5 +- .../documentation/SearchDocumentationIT.java | 73 +- .../SnapshotClientDocumentationIT.java | 19 +- .../TasksClientDocumentationIT.java | 5 +- ...rossClusterSearchUnavailableClusterIT.java | 30 +- 27 files changed, 1677 insertions(+), 582 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index f3c84db79d65f..488579785e0f7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -41,11 +41,28 @@ public final class ClusterClient { } /** - * Updates cluster wide specific settings using the Cluster Update Settings API + * Updates cluster wide specific settings using the Cluster Update Settings API. + * See Cluster Update Settings + * API on elastic.co + * @param clusterUpdateSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, + options, ClusterUpdateSettingsResponse::fromXContent, emptySet()); + } + + /** + * Updates cluster wide specific settings using the Cluster Update Settings API. *

* See Cluster Update Settings * API on elastic.co + * @deprecated Prefer {@link #putSettings(ClusterUpdateSettingsRequest, RequestOptions)} */ + @Deprecated public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, @@ -53,11 +70,26 @@ public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest cl } /** - * Asynchronously updates cluster wide specific settings using the Cluster Update Settings API + * Asynchronously updates cluster wide specific settings using the Cluster Update Settings API. + * See Cluster Update Settings + * API on elastic.co + * @param clusterUpdateSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, + options, ClusterUpdateSettingsResponse::fromXContent, listener, emptySet()); + } + /** + * Asynchronously updates cluster wide specific settings using the Cluster Update Settings API. *

* See Cluster Update Settings * API on elastic.co + * @deprecated Prefer {@link #putSettingsAsync(ClusterUpdateSettingsRequest, RequestOptions, ActionListener)} */ + @Deprecated public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index d51a92ea00fc5..fa7eb9ab9ec8a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import org.apache.http.Header; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; @@ -47,10 +46,10 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; @@ -76,66 +75,159 @@ public final class IndicesClient { } /** - * Deletes an index using the Delete Index API + * Deletes an index using the Delete Index API. + * See + * Delete Index API on elastic.co + * @param deleteIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, options, + DeleteIndexResponse::fromXContent, emptySet()); + } + + /** + * Deletes an index using the Delete Index API. *

* See * Delete Index API on elastic.co + * @deprecated Prefer {@link #delete(DeleteIndexRequest, RequestOptions)} */ + @Deprecated public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, DeleteIndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously deletes an index using the Delete Index API + * Asynchronously deletes an index using the Delete Index API. + * See + * Delete Index API on elastic.co + * @param deleteIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deleteAsync(DeleteIndexRequest deleteIndexRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, options, + DeleteIndexResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously deletes an index using the Delete Index API. *

* See * Delete Index API on elastic.co + * @deprecated Prefer {@link #deleteAsync(DeleteIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, DeleteIndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Creates an index using the Create Index API + * Creates an index using the Create Index API. + * See + * Create Index API on elastic.co + * @param createIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public CreateIndexResponse create(CreateIndexRequest createIndexRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, RequestConverters::createIndex, options, + CreateIndexResponse::fromXContent, emptySet()); + } + + /** + * Creates an index using the Create Index API. *

* See * Create Index API on elastic.co + * @deprecated Prefer {@link #create(CreateIndexRequest, RequestOptions)} */ + @Deprecated public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, RequestConverters::createIndex, CreateIndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously creates an index using the Create Index API + * Asynchronously creates an index using the Create Index API. + * See + * Create Index API on elastic.co + * @param createIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void createAsync(CreateIndexRequest createIndexRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, RequestConverters::createIndex, options, + CreateIndexResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously creates an index using the Create Index API. *

* See * Create Index API on elastic.co + * @deprecated Prefer {@link #createAsync(CreateIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void createAsync(CreateIndexRequest createIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, RequestConverters::createIndex, CreateIndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Updates the mappings on an index using the Put Mapping API + * Updates the mappings on an index using the Put Mapping API. + * See + * Put Mapping API on elastic.co + * @param putMappingRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, RequestConverters::putMapping, options, + PutMappingResponse::fromXContent, emptySet()); + } + + /** + * Updates the mappings on an index using the Put Mapping API. *

* See * Put Mapping API on elastic.co + * @deprecated Prefer {@link #putMapping(PutMappingRequest, RequestOptions)} */ + @Deprecated public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, RequestConverters::putMapping, PutMappingResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously updates the mappings on an index using the Put Mapping API + * Asynchronously updates the mappings on an index using the Put Mapping API. + * See + * Put Mapping API on elastic.co + * @param putMappingRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putMappingAsync(PutMappingRequest putMappingRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, RequestConverters::putMapping, options, + PutMappingResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously updates the mappings on an index using the Put Mapping API. *

* See * Put Mapping API on elastic.co + * @deprecated Prefer {@link #putMappingAsync(PutMappingRequest, RequestOptions, ActionListener)} */ + @Deprecated public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, RequestConverters::putMapping, @@ -143,242 +235,507 @@ public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener< } /** - * Retrieves the mappings on an index or indices using the Get Mapping API - *

+ * Retrieves the mappings on an index or indices using the Get Mapping API. * See * Get Mapping API on elastic.co + * @param getMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public GetMappingsResponse getMappings(GetMappingsRequest getMappingsRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getMappingsRequest, RequestConverters::getMappings, - GetMappingsResponse::fromXContent, emptySet(), headers); + public GetMappingsResponse getMappings(GetMappingsRequest getMappingsRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getMappingsRequest, RequestConverters::getMappings, options, + GetMappingsResponse::fromXContent, emptySet()); } /** - * Asynchronously retrieves the mappings on an index on indices using the Get Mapping API - *

+ * Asynchronously retrieves the mappings on an index on indices using the Get Mapping API. * See * Get Mapping API on elastic.co + * @param getMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void getMappingsAsync(GetMappingsRequest getMappingsRequest, ActionListener listener, - Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, RequestConverters::getMappings, - GetMappingsResponse::fromXContent, listener, emptySet(), headers); + public void getMappingsAsync(GetMappingsRequest getMappingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, RequestConverters::getMappings, options, + GetMappingsResponse::fromXContent, listener, emptySet()); } /** - * Updates aliases using the Index Aliases API + * Updates aliases using the Index Aliases API. + * See + * Index Aliases API on elastic.co + * @param indicesAliasesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, options, + IndicesAliasesResponse::fromXContent, emptySet()); + } + + /** + * Updates aliases using the Index Aliases API. *

* See * Index Aliases API on elastic.co + * @deprecated {@link #updateAliases(IndicesAliasesRequest, RequestOptions)} */ + @Deprecated public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, IndicesAliasesResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously updates aliases using the Index Aliases API + * Asynchronously updates aliases using the Index Aliases API. + * See + * Index Aliases API on elastic.co + * @param indicesAliasesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, options, + IndicesAliasesResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously updates aliases using the Index Aliases API. *

* See * Index Aliases API on elastic.co + * @deprecated Prefer {@link #updateAliasesAsync(IndicesAliasesRequest, RequestOptions, ActionListener)} */ + @Deprecated public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, ActionListener listener, - Header... headers) { + Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, IndicesAliasesResponse::fromXContent, listener, emptySet(), headers); } /** - * Opens an index using the Open Index API + * Opens an index using the Open Index API. + * See + * Open Index API on elastic.co + * @param openIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public OpenIndexResponse open(OpenIndexRequest openIndexRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, RequestConverters::openIndex, options, + OpenIndexResponse::fromXContent, emptySet()); + } + + /** + * Opens an index using the Open Index API. *

* See * Open Index API on elastic.co + * @deprecated Prefer {@link #open(OpenIndexRequest, RequestOptions)} */ + @Deprecated public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, RequestConverters::openIndex, OpenIndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously opens an index using the Open Index API + * Asynchronously opens an index using the Open Index API. + * See + * Open Index API on elastic.co + * @param openIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void openAsync(OpenIndexRequest openIndexRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, RequestConverters::openIndex, options, + OpenIndexResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously opens an index using the Open Index API. *

* See * Open Index API on elastic.co + * @deprecated Prefer {@link #openAsync(OpenIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, RequestConverters::openIndex, OpenIndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Closes an index using the Close Index API + * Closes an index using the Close Index API. + * See + * Close Index API on elastic.co + * @param closeIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, options, + CloseIndexResponse::fromXContent, emptySet()); + } + + /** + * Closes an index using the Close Index API. *

* See * Close Index API on elastic.co + * @deprecated Prefer {@link #close(CloseIndexRequest, RequestOptions)} */ + @Deprecated public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, CloseIndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously closes an index using the Close Index API + * Asynchronously closes an index using the Close Index API. + * See + * Close Index API on elastic.co + * @param closeIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void closeAsync(CloseIndexRequest closeIndexRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, options, + CloseIndexResponse::fromXContent, listener, emptySet()); + } + + + /** + * Asynchronously closes an index using the Close Index API. *

* See * Close Index API on elastic.co + * @deprecated Prefer {@link #closeAsync(CloseIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, CloseIndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Checks if one or more aliases exist using the Aliases Exist API + * Checks if one or more aliases exist using the Aliases Exist API. + * See + * Indices Aliases API on elastic.co + * @param getAliasesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request + */ + public boolean existsAlias(GetAliasesRequest getAliasesRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequest(getAliasesRequest, RequestConverters::existsAlias, options, + RestHighLevelClient::convertExistsResponse, emptySet()); + } + + /** + * Checks if one or more aliases exist using the Aliases Exist API. *

* See * Indices Aliases API on elastic.co + * @deprecated Prefer {@link #existsAlias(GetAliasesRequest, RequestOptions)} */ + @Deprecated public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException { return restHighLevelClient.performRequest(getAliasesRequest, RequestConverters::existsAlias, RestHighLevelClient::convertExistsResponse, emptySet(), headers); } /** - * Asynchronously checks if one or more aliases exist using the Aliases Exist API + * Asynchronously checks if one or more aliases exist using the Aliases Exist API. + * See + * Indices Aliases API on elastic.co + * @param getAliasesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void existsAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsync(getAliasesRequest, RequestConverters::existsAlias, options, + RestHighLevelClient::convertExistsResponse, listener, emptySet()); + } + + /** + * Asynchronously checks if one or more aliases exist using the Aliases Exist API. *

* See * Indices Aliases API on elastic.co + * @deprecated Prefer {@link #existsAliasAsync(GetAliasesRequest, RequestOptions, ActionListener)} */ + @Deprecated public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsync(getAliasesRequest, RequestConverters::existsAlias, RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers); } /** - * Refresh one or more indices using the Refresh API + * Refresh one or more indices using the Refresh API. + * See Refresh API on elastic.co + * @param refreshRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public RefreshResponse refresh(RefreshRequest refreshRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(refreshRequest, RequestConverters::refresh, options, + RefreshResponse::fromXContent, emptySet()); + } + + /** + * Refresh one or more indices using the Refresh API. *

* See Refresh API on elastic.co + * @deprecated Prefer {@link #refresh(RefreshRequest, RequestOptions)} */ + @Deprecated public RefreshResponse refresh(RefreshRequest refreshRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously refresh one or more indices using the Refresh API + * Asynchronously refresh one or more indices using the Refresh API. + * See Refresh API on elastic.co + * @param refreshRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void refreshAsync(RefreshRequest refreshRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, RequestConverters::refresh, options, + RefreshResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously refresh one or more indices using the Refresh API. *

* See Refresh API on elastic.co + * @deprecated Prefer {@link #refreshAsync(RefreshRequest, RequestOptions, ActionListener)} */ + @Deprecated public void refreshAsync(RefreshRequest refreshRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent, listener, emptySet(), headers); } /** - * Flush one or more indices using the Flush API + * Flush one or more indices using the Flush API. + * See Flush API on elastic.co + * @param flushRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public FlushResponse flush(FlushRequest flushRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(flushRequest, RequestConverters::flush, options, + FlushResponse::fromXContent, emptySet()); + } + + /** + * Flush one or more indices using the Flush API. *

* See Flush API on elastic.co + * @deprecated Prefer {@link #flush(FlushRequest, RequestOptions)} */ + @Deprecated public FlushResponse flush(FlushRequest flushRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously flush one or more indices using the Flush API + * Asynchronously flush one or more indices using the Flush API. + * See Flush API on elastic.co + * @param flushRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void flushAsync(FlushRequest flushRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, RequestConverters::flush, options, + FlushResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously flush one or more indices using the Flush API. *

* See Flush API on elastic.co + * @deprecated Prefer {@link #flushAsync(FlushRequest, RequestOptions, ActionListener)} */ + @Deprecated public void flushAsync(FlushRequest flushRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent, listener, emptySet(), headers); } - /** Initiate a synced flush manually using the synced flush API - *

- * See - * Synced flush API on elastic.co - */ - public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, - SyncedFlushResponse::fromXContent, emptySet(), headers); + /** + * Initiate a synced flush manually using the synced flush API. + * See + * Synced flush API on elastic.co + * @param syncedFlushRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, options, + SyncedFlushResponse::fromXContent, emptySet()); } /** - * Asynchronously initiate a synced flush manually using the synced flush API - *

+ * Asynchronously initiate a synced flush manually using the synced flush API. * See * Synced flush API on elastic.co + * @param syncedFlushRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, - SyncedFlushResponse::fromXContent, listener, emptySet(), headers); + public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, options, + SyncedFlushResponse::fromXContent, listener, emptySet()); } - /** - * Retrieve the settings of one or more indices - *

+ * Retrieve the settings of one or more indices. * See * Indices Get Settings API on elastic.co + * @param getSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public GetSettingsResponse getSettings(GetSettingsRequest getSettingsRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getSettingsRequest, RequestConverters::getSettings, - GetSettingsResponse::fromXContent, emptySet(), headers); + public GetSettingsResponse getSettings(GetSettingsRequest getSettingsRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getSettingsRequest, RequestConverters::getSettings, options, + GetSettingsResponse::fromXContent, emptySet()); } /** - * Asynchronously retrieve the settings of one or more indices - *

+ * Asynchronously retrieve the settings of one or more indices. * See * Indices Get Settings API on elastic.co + * @param getSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getSettingsAsync(GetSettingsRequest getSettingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getSettingsRequest, RequestConverters::getSettings, options, + GetSettingsResponse::fromXContent, listener, emptySet()); + } + + /** + * Force merge one or more indices using the Force Merge API. + * See + * Force Merge API on elastic.co + * @param forceMergeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public void getSettingsAsync(GetSettingsRequest getSettingsRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(getSettingsRequest, RequestConverters::getSettings, - GetSettingsResponse::fromXContent, listener, emptySet(), headers); + public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, options, + ForceMergeResponse::fromXContent, emptySet()); } /** - * Force merge one or more indices using the Force Merge API + * Force merge one or more indices using the Force Merge API. *

* See * Force Merge API on elastic.co + * @deprecated Prefer {@link #forceMerge(ForceMergeRequest, RequestOptions)} */ + @Deprecated public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, ForceMergeResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously force merge one or more indices using the Force Merge API + * Asynchronously force merge one or more indices using the Force Merge API. + * See + * Force Merge API on elastic.co + * @param forceMergeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void forceMergeAsync(ForceMergeRequest forceMergeRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, options, + ForceMergeResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously force merge one or more indices using the Force Merge API. *

* See * Force Merge API on elastic.co + * @deprecated Prefer {@link #forceMergeAsync(ForceMergeRequest, RequestOptions, ActionListener)} */ + @Deprecated public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, ForceMergeResponse::fromXContent, listener, emptySet(), headers); } /** - * Clears the cache of one or more indices using the Clear Cache API + * Clears the cache of one or more indices using the Clear Cache API. + * See + * Clear Cache API on elastic.co + * @param clearIndicesCacheRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, options, + ClearIndicesCacheResponse::fromXContent, emptySet()); + } + + /** + * Clears the cache of one or more indices using the Clear Cache API. *

* See * Clear Cache API on elastic.co + * @deprecated Prefer {@link #clearCache(ClearIndicesCacheRequest, RequestOptions)} */ + @Deprecated public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, ClearIndicesCacheResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously clears the cache of one or more indices using the Clear Cache API + * Asynchronously clears the cache of one or more indices using the Clear Cache API. + * See + * Clear Cache API on elastic.co + * @param clearIndicesCacheRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, options, + ClearIndicesCacheResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously clears the cache of one or more indices using the Clear Cache API. *

* See * Clear Cache API on elastic.co + * @deprecated Prefer {@link #clearCacheAsync(ClearIndicesCacheRequest, RequestOptions, ActionListener)} */ + @Deprecated public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, @@ -387,17 +744,57 @@ public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, A /** * Checks if the index (indices) exists or not. - *

* See * Indices Exists API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request */ - public boolean exists(GetIndexRequest request, Header... headers) throws IOException { + public boolean exists(GetIndexRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequest( request, RequestConverters::indicesExist, + options, RestHighLevelClient::convertExistsResponse, - Collections.emptySet(), - headers + Collections.emptySet() + ); + } + + /** + * Checks if the index (indices) exists or not. + *

+ * See + * Indices Exists API on elastic.co + * @deprecated Prefer {@link #exists(GetIndexRequest, RequestOptions)} + */ + @Deprecated + public boolean exists(GetIndexRequest request, Header... headers) throws IOException { + return restHighLevelClient.performRequest( + request, + RequestConverters::indicesExist, + RestHighLevelClient::convertExistsResponse, + Collections.emptySet(), + headers + ); + } + + /** + * Asynchronously checks if the index (indices) exists or not. + * See + * Indices Exists API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void existsAsync(GetIndexRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsync( + request, + RequestConverters::indicesExist, + options, + RestHighLevelClient::convertExistsResponse, + listener, + Collections.emptySet() ); } @@ -406,7 +803,9 @@ public boolean exists(GetIndexRequest request, Header... headers) throws IOExcep *

* See * Indices Exists API on elastic.co + * @deprecated Prefer {@link #existsAsync(GetIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void existsAsync(GetIndexRequest request, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsync( request, @@ -419,88 +818,213 @@ public void existsAsync(GetIndexRequest request, ActionListener listene } /** - * Shrinks an index using the Shrink Index API + * Shrinks an index using the Shrink Index API. + * See + * Shrink Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ResizeResponse shrink(ResizeRequest resizeRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::shrink, options, + ResizeResponse::fromXContent, emptySet()); + } + + /** + * Shrinks an index using the Shrink Index API. *

* See * Shrink Index API on elastic.co + * @deprecated Prefer {@link #shrink(ResizeRequest, RequestOptions)} */ + @Deprecated public ResizeResponse shrink(ResizeRequest resizeRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously shrinks an index using the Shrink index API + * Asynchronously shrinks an index using the Shrink index API. + * See + * Shrink Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void shrinkAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::shrink, options, + ResizeResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously shrinks an index using the Shrink index API. *

* See * Shrink Index API on elastic.co + * @deprecated Prefer {@link #shrinkAsync(ResizeRequest, RequestOptions, ActionListener)} */ + @Deprecated public void shrinkAsync(ResizeRequest resizeRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent, listener, emptySet(), headers); } /** - * Splits an index using the Split Index API + * Splits an index using the Split Index API. + * See + * Split Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ResizeResponse split(ResizeRequest resizeRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::split, options, + ResizeResponse::fromXContent, emptySet()); + } + + /** + * Splits an index using the Split Index API. *

* See * Split Index API on elastic.co + * @deprecated {@link #split(ResizeRequest, RequestOptions)} */ + @Deprecated public ResizeResponse split(ResizeRequest resizeRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously splits an index using the Split Index API + * Asynchronously splits an index using the Split Index API. + * See + * Split Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void splitAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::split, options, + ResizeResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously splits an index using the Split Index API. *

* See * Split Index API on elastic.co + * @deprecated Prefer {@link #splitAsync(ResizeRequest, RequestOptions, ActionListener)} */ + @Deprecated public void splitAsync(ResizeRequest resizeRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent, listener, emptySet(), headers); } /** - * Rolls over an index using the Rollover Index API + * Rolls over an index using the Rollover Index API. + * See + * Rollover Index API on elastic.co + * @param rolloverRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public RolloverResponse rollover(RolloverRequest rolloverRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, RequestConverters::rollover, options, + RolloverResponse::fromXContent, emptySet()); + } + + /** + * Rolls over an index using the Rollover Index API. *

* See * Rollover Index API on elastic.co + * @deprecated Prefer {@link #rollover(RolloverRequest, RequestOptions)} */ + @Deprecated public RolloverResponse rollover(RolloverRequest rolloverRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, RequestConverters::rollover, RolloverResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously rolls over an index using the Rollover Index API + * Asynchronously rolls over an index using the Rollover Index API. + * See + * Rollover Index API on elastic.co + * @param rolloverRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void rolloverAsync(RolloverRequest rolloverRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, RequestConverters::rollover, options, + RolloverResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously rolls over an index using the Rollover Index API. *

* See * Rollover Index API on elastic.co + * @deprecated Prefer {@link #rolloverAsync(RolloverRequest, RequestOptions, ActionListener)} */ + @Deprecated public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, RequestConverters::rollover, RolloverResponse::fromXContent, listener, emptySet(), headers); } /** - * Updates specific index level settings using the Update Indices Settings API + * Updates specific index level settings using the Update Indices Settings API. + * See Update Indices Settings + * API on elastic.co + * @param updateSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, options, + UpdateSettingsResponse::fromXContent, emptySet()); + } + + /** + * Updates specific index level settings using the Update Indices Settings API. *

* See Update Indices Settings * API on elastic.co + * @deprecated Prefer {@link #putSettings(UpdateSettingsRequest, RequestOptions)} */ + @Deprecated public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, UpdateSettingsResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously updates specific index level settings using the Update Indices Settings API + * Asynchronously updates specific index level settings using the Update Indices Settings API. + * See Update Indices Settings + * API on elastic.co + * @param updateSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, options, + UpdateSettingsResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously updates specific index level settings using the Update Indices Settings API. *

* See Update Indices Settings * API on elastic.co + * @deprecated Prefer {@link #putSettingsAsync(UpdateSettingsRequest, RequestOptions, ActionListener)} */ + @Deprecated public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, @@ -508,25 +1032,31 @@ public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, Action } /** - * Puts an index template using the Index Templates API - *

+ * Puts an index template using the Index Templates API. * See Index Templates API * on elastic.co + * @param putIndexTemplateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public PutIndexTemplateResponse putTemplate(PutIndexTemplateRequest putIndexTemplateRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, - PutIndexTemplateResponse::fromXContent, emptySet(), headers); + public PutIndexTemplateResponse putTemplate(PutIndexTemplateRequest putIndexTemplateRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, options, + PutIndexTemplateResponse::fromXContent, emptySet()); } /** - * Asynchronously puts an index template using the Index Templates API - *

+ * Asynchronously puts an index template using the Index Templates API. * See Index Templates API * on elastic.co + * @param putIndexTemplateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, - PutIndexTemplateResponse::fromXContent, listener, emptySet(), headers); + public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, options, + PutIndexTemplateResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java index 72b1813f93909..5c5a82b52f438 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java @@ -19,7 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.Header; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineRequest; @@ -45,70 +44,85 @@ public final class IngestClient { } /** - * Add a pipeline or update an existing pipeline - *

+ * Add a pipeline or update an existing pipeline. * See * Put Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public WritePipelineResponse putPipeline(PutPipelineRequest request, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline, - WritePipelineResponse::fromXContent, emptySet(), headers); + public WritePipelineResponse putPipeline(PutPipelineRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline, options, + WritePipelineResponse::fromXContent, emptySet()); } /** - * Asynchronously add a pipeline or update an existing pipeline - *

+ * Asynchronously add a pipeline or update an existing pipeline. * See * Put Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void putPipelineAsync(PutPipelineRequest request, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline, - WritePipelineResponse::fromXContent, listener, emptySet(), headers); + public void putPipelineAsync(PutPipelineRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline, options, + WritePipelineResponse::fromXContent, listener, emptySet()); } /** - * Get an existing pipeline - *

+ * Get an existing pipeline. * See * Get Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public GetPipelineResponse getPipeline(GetPipelineRequest request, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::getPipeline, - GetPipelineResponse::fromXContent, emptySet(), headers); + public GetPipelineResponse getPipeline(GetPipelineRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::getPipeline, options, + GetPipelineResponse::fromXContent, emptySet()); } /** - * Asynchronously get an existing pipeline - *

+ * Asynchronously get an existing pipeline. * See * Get Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void getPipelineAsync(GetPipelineRequest request, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::getPipeline, - GetPipelineResponse::fromXContent, listener, emptySet(), headers); + public void getPipelineAsync(GetPipelineRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::getPipeline, options, + GetPipelineResponse::fromXContent, listener, emptySet()); } /** - * Delete an existing pipeline - *

+ * Delete an existing pipeline. * See * * Delete Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public WritePipelineResponse deletePipeline(DeletePipelineRequest request, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::deletePipeline, - WritePipelineResponse::fromXContent, emptySet(), headers); + public WritePipelineResponse deletePipeline(DeletePipelineRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::deletePipeline, options, + WritePipelineResponse::fromXContent, emptySet()); } /** - * Asynchronously delete an existing pipeline - *

+ * Asynchronously delete an existing pipeline. * See * * Delete Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void deletePipelineAsync(DeletePipelineRequest request, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline, - WritePipelineResponse::fromXContent, listener, emptySet(), headers); + public void deletePipelineAsync(DeletePipelineRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline, options, + WritePipelineResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index cceb208e9268e..308b8917842d1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -721,7 +721,7 @@ static Request rollover(RolloverRequest rolloverRequest) throws IOException { return request; } - static Request getSettings(GetSettingsRequest getSettingsRequest) throws IOException { + static Request getSettings(GetSettingsRequest getSettingsRequest) { String[] indices = getSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getSettingsRequest.indices(); String[] names = getSettingsRequest.names() == null ? Strings.EMPTY_ARRAY : getSettingsRequest.names(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index a9587b73c1959..8980508c48738 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -285,16 +285,19 @@ public final TasksClient tasks() { } /** - * Executes a bulk request using the Bulk API - * + * Executes a bulk request using the Bulk API. * See Bulk API on elastic.co + * @param bulkRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ public final BulkResponse bulk(BulkRequest bulkRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet()); } /** - * Executes a bulk request using the Bulk API + * Executes a bulk request using the Bulk API. * * See Bulk API on elastic.co * @deprecated Prefer {@link #bulk(BulkRequest, RequestOptions)} @@ -305,16 +308,18 @@ public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throw } /** - * Asynchronously executes a bulk request using the Bulk API - * + * Asynchronously executes a bulk request using the Bulk API. * See Bulk API on elastic.co + * @param bulkRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ public final void bulkAsync(BulkRequest bulkRequest, RequestOptions options, ActionListener listener) { performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, listener, emptySet()); } /** - * Asynchronously executes a bulk request using the Bulk API + * Asynchronously executes a bulk request using the Bulk API. * * See Bulk API on elastic.co * @deprecated Prefer {@link #bulkAsync(BulkRequest, RequestOptions, ActionListener)} @@ -326,194 +331,482 @@ public final void bulkAsync(BulkRequest bulkRequest, ActionListenertrue if the ping succeeded, false otherwise + * @throws IOException in case there is a problem sending the request + */ + public final boolean ping(RequestOptions options) throws IOException { + return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), options, RestHighLevelClient::convertExistsResponse, + emptySet()); + } + + /** + * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise + * @deprecated Prefer {@link #ping(RequestOptions)} */ + @Deprecated public final boolean ping(Header... headers) throws IOException { return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), RestHighLevelClient::convertExistsResponse, emptySet(), headers); } + /** + * Get the cluster info otherwise provided when sending an HTTP request to '/' + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final MainResponse info(RequestOptions options) throws IOException { + return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(), options, + MainResponse::fromXContent, emptySet()); + } + /** * Get the cluster info otherwise provided when sending an HTTP request to port 9200 + * @deprecated Prefer {@link #info(RequestOptions)} */ + @Deprecated public final MainResponse info(Header... headers) throws IOException { return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(), MainResponse::fromXContent, emptySet(), headers); } /** - * Retrieves a document by id using the Get API + * Retrieves a document by id using the Get API. + * See Get API on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final GetResponse get(GetRequest getRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, singleton(404)); + } + + /** + * Retrieves a document by id using the Get API. * * See Get API on elastic.co + * @deprecated Prefer {@link #get(GetRequest, RequestOptions)} */ + @Deprecated public final GetResponse get(GetRequest getRequest, Header... headers) throws IOException { return performRequestAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, singleton(404), headers); } /** - * Asynchronously retrieves a document by id using the Get API + * Asynchronously retrieves a document by id using the Get API. + * See Get API on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void getAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, listener, + singleton(404)); + } + + /** + * Asynchronously retrieves a document by id using the Get API. * * See Get API on elastic.co + * @deprecated Prefer {@link #getAsync(GetRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void getAsync(GetRequest getRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, listener, singleton(404), headers); } /** - * Retrieves multiple documents by id using the Multi Get API + * Retrieves multiple documents by id using the Multi Get API. + * See Multi Get API on elastic.co + * @param multiGetRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, options, MultiGetResponse::fromXContent, + singleton(404)); + } + + /** + * Retrieves multiple documents by id using the Multi Get API. * * See Multi Get API on elastic.co + * @deprecated Prefer {@link #multiGet(MultiGetRequest, RequestOptions)} */ + @Deprecated public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header... headers) throws IOException { return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent, singleton(404), headers); } /** - * Asynchronously retrieves multiple documents by id using the Multi Get API + * Asynchronously retrieves multiple documents by id using the Multi Get API. + * See Multi Get API on elastic.co + * @param multiGetRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void multiGetAsync(MultiGetRequest multiGetRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, options, MultiGetResponse::fromXContent, listener, + singleton(404)); + } + + /** + * Asynchronously retrieves multiple documents by id using the Multi Get API. * * See Multi Get API on elastic.co + * @deprecated Prefer {@link #multiGetAsync(MultiGetRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent, listener, singleton(404), headers); } /** - * Checks for the existence of a document. Returns true if it exists, false otherwise + * Checks for the existence of a document. Returns true if it exists, false otherwise. + * See Get API on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return true if the document exists, false otherwise + * @throws IOException in case there is a problem sending the request + */ + public final boolean exists(GetRequest getRequest, RequestOptions options) throws IOException { + return performRequest(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, emptySet()); + } + + /** + * Checks for the existence of a document. Returns true if it exists, false otherwise. * * See Get API on elastic.co + * @deprecated Prefer {@link #exists(GetRequest, RequestOptions)} */ + @Deprecated public final boolean exists(GetRequest getRequest, Header... headers) throws IOException { return performRequest(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers); } /** - * Asynchronously checks for the existence of a document. Returns true if it exists, false otherwise + * Asynchronously checks for the existence of a document. Returns true if it exists, false otherwise. + * See Get API on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void existsAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { + performRequestAsync(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, listener, + emptySet()); + } + + /** + * Asynchronously checks for the existence of a document. Returns true if it exists, false otherwise. * * See Get API on elastic.co + * @deprecated Prefer {@link #existsAsync(GetRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void existsAsync(GetRequest getRequest, ActionListener listener, Header... headers) { performRequestAsync(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers); } /** - * Index a document using the Index API + * Index a document using the Index API. + * See Index API on elastic.co + * @param indexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final IndexResponse index(IndexRequest indexRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); + } + + /** + * Index a document using the Index API. * * See Index API on elastic.co + * @deprecated Prefer {@link #index(IndexRequest, RequestOptions)} */ + @Deprecated public final IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException { return performRequestAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously index a document using the Index API + * Asynchronously index a document using the Index API. + * See Index API on elastic.co + * @param indexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void indexAsync(IndexRequest indexRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, listener, + emptySet()); + } + + /** + * Asynchronously index a document using the Index API. * * See Index API on elastic.co + * @deprecated Prefer {@link #indexAsync(IndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void indexAsync(IndexRequest indexRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Updates a document using the Update API + * Updates a document using the Update API. + * See Update API on elastic.co + * @param updateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final UpdateResponse update(UpdateRequest updateRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, emptySet()); + } + + /** + * Updates a document using the Update API. *

* See Update API on elastic.co + * @deprecated Prefer {@link #update(UpdateRequest, RequestOptions)} */ + @Deprecated public final UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException { return performRequestAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously updates a document using the Update API + * Asynchronously updates a document using the Update API. + * See Update API on elastic.co + * @param updateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void updateAsync(UpdateRequest updateRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, listener, + emptySet()); + } + + /** + * Asynchronously updates a document using the Update API. *

* See Update API on elastic.co + * @deprecated Prefer {@link #updateAsync(UpdateRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void updateAsync(UpdateRequest updateRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, listener, emptySet(), headers); } /** - * Deletes a document by id using the Delete API + * Deletes a document by id using the Delete API. + * See Delete API on elastic.co + * @param deleteRequest the reuqest + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final DeleteResponse delete(DeleteRequest deleteRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, options, DeleteResponse::fromXContent, + singleton(404)); + } + + /** + * Deletes a document by id using the Delete API. * * See Delete API on elastic.co + * @deprecated Prefer {@link #delete(DeleteRequest, RequestOptions)} */ + @Deprecated public final DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException { return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, singleton(404), headers); } /** - * Asynchronously deletes a document by id using the Delete API + * Asynchronously deletes a document by id using the Delete API. + * See Delete API on elastic.co + * @param deleteRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void deleteAsync(DeleteRequest deleteRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, options, DeleteResponse::fromXContent, listener, + Collections.singleton(404)); + } + + /** + * Asynchronously deletes a document by id using the Delete API. * * See Delete API on elastic.co + * @deprecated Prefer {@link #deleteAsync(DeleteRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void deleteAsync(DeleteRequest deleteRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, listener, - Collections.singleton(404), headers); + Collections.singleton(404), headers); + } + + /** + * Executes a search request using the Search API. + * See Search API on elastic.co + * @param searchRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final SearchResponse search(SearchRequest searchRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(searchRequest, RequestConverters::search, options, SearchResponse::fromXContent, emptySet()); } /** - * Executes a search using the Search API + * Executes a search using the Search API. * * See Search API on elastic.co + * @deprecated Prefer {@link #search(SearchRequest, RequestOptions)} */ + @Deprecated public final SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException { return performRequestAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously executes a search using the Search API + * Asynchronously executes a search using the Search API. + * See Search API on elastic.co + * @param searchRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void searchAsync(SearchRequest searchRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(searchRequest, RequestConverters::search, options, SearchResponse::fromXContent, listener, + emptySet()); + } + + /** + * Asynchronously executes a search using the Search API. * * See Search API on elastic.co + * @deprecated Prefer {@link #searchAsync(SearchRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void searchAsync(SearchRequest searchRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, listener, emptySet(), headers); } /** - * Executes a multi search using the msearch API + * Executes a multi search using the msearch API. + * See Multi search API on + * elastic.co + * @param multiSearchRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, options, MultiSearchResponse::fromXContext, + emptySet()); + } + + /** + * Executes a multi search using the msearch API. * * See Multi search API on * elastic.co + * @deprecated Prefer {@link #multiSearch(MultiSearchRequest, RequestOptions)} */ + @Deprecated public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException { return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext, emptySet(), headers); } /** - * Asynchronously executes a multi search using the msearch API + * Asynchronously executes a multi search using the msearch API. + * See Multi search API on + * elastic.co + * @param searchRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void multiSearchAsync(MultiSearchRequest searchRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, options, MultiSearchResponse::fromXContext, + listener, emptySet()); + } + + /** + * Asynchronously executes a multi search using the msearch API. * * See Multi search API on * elastic.co + * @deprecated Prefer {@link #multiSearchAsync(MultiSearchRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext, listener, emptySet(), headers); } /** - * Executes a search using the Search Scroll API + * Executes a search using the Search Scroll API. + * See Search Scroll + * API on elastic.co + * @param searchScrollRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, options, SearchResponse::fromXContent, + emptySet()); + } + + /** + * Executes a search using the Search Scroll API. * * See Search Scroll * API on elastic.co + * @deprecated Prefer {@link #searchScroll(SearchScrollRequest, RequestOptions)} */ + @Deprecated public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException { return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously executes a search using the Search Scroll API + * Asynchronously executes a search using the Search Scroll API. + * See Search Scroll + * API on elastic.co + * @param searchScrollRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, options, SearchResponse::fromXContent, + listener, emptySet()); + } + + /** + * Asynchronously executes a search using the Search Scroll API. * * See Search Scroll * API on elastic.co + * @deprecated Prefer {@link #searchScrollAsync(SearchScrollRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent, @@ -521,22 +814,54 @@ public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, } /** - * Clears one or more scroll ids using the Clear Scroll API + * Clears one or more scroll ids using the Clear Scroll API. + * See + * Clear Scroll API on elastic.co + * @param clearScrollRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, options, ClearScrollResponse::fromXContent, + emptySet()); + } + + /** + * Clears one or more scroll ids using the Clear Scroll API. * * See * Clear Scroll API on elastic.co + * @deprecated Prefer {@link #clearScroll(ClearScrollRequest, RequestOptions)} */ + @Deprecated public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException { return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously clears one or more scroll ids using the Clear Scroll API + * Asynchronously clears one or more scroll ids using the Clear Scroll API. + * See + * Clear Scroll API on elastic.co + * @param clearScrollRequest the reuqest + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, options, ClearScrollResponse::fromXContent, + listener, emptySet()); + } + + /** + * Asynchronously clears one or more scroll ids using the Clear Scroll API. * * See * Clear Scroll API on elastic.co + * @deprecated Prefer {@link #clearScrollAsync(ClearScrollRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent, @@ -545,47 +870,79 @@ public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, /** * Executes a request using the Search Template API. - * * See Search Template API * on elastic.co. + * @param searchTemplateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ public final SearchTemplateResponse searchTemplate(SearchTemplateRequest searchTemplateRequest, - Header... headers) throws IOException { - return performRequestAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, - SearchTemplateResponse::fromXContent, emptySet(), headers); + RequestOptions options) throws IOException { + return performRequestAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, options, + SearchTemplateResponse::fromXContent, emptySet()); } /** - * Asynchronously executes a request using the Search Template API + * Asynchronously executes a request using the Search Template API. * * See Search Template API * on elastic.co. */ - public final void searchTemplateAsync(SearchTemplateRequest searchTemplateRequest, - ActionListener listener, - Header... headers) { - performRequestAsyncAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, - SearchTemplateResponse::fromXContent, listener, emptySet(), headers); + public final void searchTemplateAsync(SearchTemplateRequest searchTemplateRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, options, + SearchTemplateResponse::fromXContent, listener, emptySet()); } + /** + * Executes a request using the Ranking Evaluation API. + * See Ranking Evaluation API + * on elastic.co + * @param rankEvalRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, options, RankEvalResponse::fromXContent, + emptySet()); + } /** * Executes a request using the Ranking Evaluation API. * * See Ranking Evaluation API * on elastic.co + * @deprecated Prefer {@link #rankEval(RankEvalRequest, RequestOptions)} */ + @Deprecated public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, Header... headers) throws IOException { return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent, emptySet(), headers); } + /** + * Asynchronously executes a request using the Ranking Evaluation API. + * See Ranking Evaluation API + * on elastic.co + * @param rankEvalRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void rankEvalAsync(RankEvalRequest rankEvalRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, options, RankEvalResponse::fromXContent, listener, + emptySet()); + } + /** * Asynchronously executes a request using the Ranking Evaluation API. * * See Ranking Evaluation API * on elastic.co + * @deprecated Prefer {@link #rankEvalAsync(RankEvalRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent, listener, emptySet(), headers); @@ -593,27 +950,31 @@ public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener< /** * Executes a request using the Field Capabilities API. - * * See Field Capabilities API * on elastic.co. + * @param fieldCapabilitiesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest, - Header... headers) throws IOException { - return performRequestAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, - FieldCapabilitiesResponse::fromXContent, emptySet(), headers); + RequestOptions options) throws IOException { + return performRequestAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, options, + FieldCapabilitiesResponse::fromXContent, emptySet()); } /** * Asynchronously executes a request using the Field Capabilities API. - * * See Field Capabilities API * on elastic.co. + * @param fieldCapabilitiesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, - ActionListener listener, - Header... headers) { - performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, - FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers); + public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, options, + FieldCapabilitiesResponse::fromXContent, listener, emptySet()); } @Deprecated diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index 104bc91271148..b7cd2d52732cc 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -19,7 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.Header; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; @@ -49,97 +48,117 @@ public final class SnapshotClient { /** * Gets a list of snapshot repositories. If the list of repositories is empty or it contains a single element "_all", all * registered repositories are returned. - *

* See Snapshot and Restore * API on elastic.co + * @param getRepositoriesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public GetRepositoriesResponse getRepositories(GetRepositoriesRequest getRepositoriesRequest, Header... headers) + public GetRepositoriesResponse getRepositories(GetRepositoriesRequest getRepositoriesRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, - GetRepositoriesResponse::fromXContent, emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, options, + GetRepositoriesResponse::fromXContent, emptySet()); } /** * Asynchronously gets a list of snapshot repositories. If the list of repositories is empty or it contains a single element "_all", all * registered repositories are returned. - *

* See Snapshot and Restore * API on elastic.co + * @param getRepositoriesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void getRepositoriesAsync(GetRepositoriesRequest getRepositoriesRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, - GetRepositoriesResponse::fromXContent, listener, emptySet(), headers); + public void getRepositoriesAsync(GetRepositoriesRequest getRepositoriesRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, options, + GetRepositoriesResponse::fromXContent, listener, emptySet()); } /** * Creates a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param putRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public PutRepositoryResponse createRepository(PutRepositoryRequest putRepositoryRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, - PutRepositoryResponse::fromXContent, emptySet(), headers); + public PutRepositoryResponse createRepository(PutRepositoryRequest putRepositoryRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, options, + PutRepositoryResponse::fromXContent, emptySet()); } /** * Asynchronously creates a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param putRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, - PutRepositoryResponse::fromXContent, listener, emptySet(), headers); + public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, options, + PutRepositoryResponse::fromXContent, listener, emptySet()); } /** * Deletes a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param deleteRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest, Header... headers) + public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, - DeleteRepositoryResponse::fromXContent, emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, options, + DeleteRepositoryResponse::fromXContent, emptySet()); } /** * Asynchronously deletes a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param deleteRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, - DeleteRepositoryResponse::fromXContent, listener, emptySet(), headers); + public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, options, + DeleteRepositoryResponse::fromXContent, listener, emptySet()); } /** * Verifies a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param verifyRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest, Header... headers) + public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, - VerifyRepositoryResponse::fromXContent, emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, options, + VerifyRepositoryResponse::fromXContent, emptySet()); } /** * Asynchronously verifies a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param verifyRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, - VerifyRepositoryResponse::fromXContent, listener, emptySet(), headers); + public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, options, + VerifyRepositoryResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java index 214f1e7884a2a..f4a76e78b946b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java @@ -19,7 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.Header; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -33,7 +32,7 @@ *

* See Task Management API on elastic.co */ -public class TasksClient { +public final class TasksClient { private final RestHighLevelClient restHighLevelClient; TasksClient(RestHighLevelClient restHighLevelClient) { @@ -41,24 +40,29 @@ public class TasksClient { } /** - * Get current tasks using the Task Management API - *

+ * Get current tasks using the Task Management API. * See * Task Management API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public ListTasksResponse list(ListTasksRequest request, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, - emptySet(), headers); + public ListTasksResponse list(ListTasksRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, options, + ListTasksResponse::fromXContent, emptySet()); } /** - * Asynchronously get current tasks using the Task Management API - *

+ * Asynchronously get current tasks using the Task Management API. * See * Task Management API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void listAsync(ListTasksRequest request, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, - listener, emptySet(), headers); + public void listAsync(ListTasksRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, options, + ListTasksResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 9782b1016b421..d41c47177f968 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -20,8 +20,6 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; @@ -39,7 +37,6 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -81,7 +78,7 @@ public void testThatBulkProcessorCountIsCorrect() throws Exception { assertThat(listener.afterCounts.get(), equalTo(1)); assertThat(listener.bulkFailures.size(), equalTo(0)); assertResponseItems(listener.bulkItems, numDocs); - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), numDocs); } } @@ -107,7 +104,7 @@ public void testBulkProcessorFlush() throws Exception { assertThat(listener.afterCounts.get(), equalTo(1)); assertThat(listener.bulkFailures.size(), equalTo(0)); assertResponseItems(listener.bulkItems, numDocs); - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), numDocs); } } @@ -159,7 +156,7 @@ public void testBulkProcessorConcurrentRequests() throws Exception { assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); } - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), numDocs); } public void testBulkProcessorWaitOnClose() throws Exception { @@ -190,7 +187,7 @@ public void testBulkProcessorWaitOnClose() throws Exception { } assertThat(listener.bulkFailures.size(), equalTo(0)); assertResponseItems(listener.bulkItems, numDocs); - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), numDocs); } public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { @@ -267,7 +264,7 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception } } - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), testDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), testDocs); } private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java index 597d35a99967b..fe6aa6b1017ee 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java @@ -127,8 +127,8 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) } } - highLevelClient().indices().refresh(new RefreshRequest()); - int multiGetResponsesCount = highLevelClient().multiGet(multiGetRequest).getResponses().length; + highLevelClient().indices().refresh(new RefreshRequest(), RequestOptions.DEFAULT); + int multiGetResponsesCount = highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT).getResponses().length; if (rejectedExecutionExpected) { assertThat(multiGetResponsesCount, lessThanOrEqualTo(numberOfAsyncOps)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 9314bb2e36cea..f1110163b2517 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -57,6 +57,7 @@ public void testClusterPutSettings() throws IOException { setRequest.persistentSettings(map); ClusterUpdateSettingsResponse setResponse = execute(setRequest, highLevelClient().cluster()::putSettings, + highLevelClient().cluster()::putSettingsAsync, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync); assertAcked(setResponse); @@ -79,6 +80,7 @@ public void testClusterPutSettings() throws IOException { resetRequest.persistentSettings("{\"" + persistentSettingKey + "\": null }", XContentType.JSON); ClusterUpdateSettingsResponse resetResponse = execute(resetRequest, highLevelClient().cluster()::putSettings, + highLevelClient().cluster()::putSettingsAsync, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync); assertThat(resetResponse.getTransientSettings().get(transientSettingKey), equalTo(null)); @@ -100,6 +102,7 @@ public void testClusterUpdateSettingNonExistent() { clusterUpdateSettingsRequest.transientSettings(Settings.builder().put(setting, value).build()); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(clusterUpdateSettingsRequest, + highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync)); assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(exception.getMessage(), equalTo( diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 8595bd16b63be..d4ebcfd70713d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -68,12 +68,14 @@ public void testDelete() throws IOException { { // Testing deletion String docId = "id"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar"))); + highLevelClient().index( + new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")), RequestOptions.DEFAULT); DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId); if (randomBoolean()) { deleteRequest.version(1L); } - DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); assertEquals("type", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); @@ -83,7 +85,8 @@ public void testDelete() throws IOException { // Testing non existing document String docId = "does_not_exist"; DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId); - DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); assertEquals("type", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); @@ -92,10 +95,12 @@ public void testDelete() throws IOException { { // Testing version conflict String docId = "version_conflict"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar"))); + highLevelClient().index( + new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")), RequestOptions.DEFAULT); DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).version(2); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync)); + () -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" + docId + "]: " + "version conflict, current version [1] is different than the one provided [2]]", exception.getMessage()); @@ -104,10 +109,12 @@ public void testDelete() throws IOException { { // Testing version type String docId = "version_type"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) - .versionType(VersionType.EXTERNAL).version(12)); + highLevelClient().index( + new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) + .versionType(VersionType.EXTERNAL).version(12), RequestOptions.DEFAULT); DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(13); - DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); assertEquals("type", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); @@ -116,11 +123,13 @@ public void testDelete() throws IOException { { // Testing version type with a wrong version String docId = "wrong_version"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) - .versionType(VersionType.EXTERNAL).version(12)); + highLevelClient().index( + new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) + .versionType(VersionType.EXTERNAL).version(12), RequestOptions.DEFAULT); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(10); - execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" + @@ -130,9 +139,11 @@ public void testDelete() throws IOException { { // Testing routing String docId = "routing"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")).routing("foo")); + highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")).routing("foo"), + RequestOptions.DEFAULT); DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).routing("foo"); - DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); assertEquals("type", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); @@ -143,23 +154,27 @@ public void testDelete() throws IOException { public void testExists() throws IOException { { GetRequest getRequest = new GetRequest("index", "type", "id"); - assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync, + highLevelClient()::exists, highLevelClient()::existsAsync)); } IndexRequest index = new IndexRequest("index", "type", "id"); index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - highLevelClient().index(index); + highLevelClient().index(index, RequestOptions.DEFAULT); { GetRequest getRequest = new GetRequest("index", "type", "id"); - assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync, + highLevelClient()::exists, highLevelClient()::existsAsync)); } { GetRequest getRequest = new GetRequest("index", "type", "does_not_exist"); - assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync, + highLevelClient()::exists, highLevelClient()::existsAsync)); } { GetRequest getRequest = new GetRequest("index", "type", "does_not_exist").version(1); - assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync, + highLevelClient()::exists, highLevelClient()::existsAsync)); } } @@ -167,7 +182,8 @@ public void testGet() throws IOException { { GetRequest getRequest = new GetRequest("index", "type", "id"); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync)); + () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); @@ -176,11 +192,12 @@ public void testGet() throws IOException { String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; index.source(document, XContentType.JSON); index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - highLevelClient().index(index); + highLevelClient().index(index, RequestOptions.DEFAULT); { GetRequest getRequest = new GetRequest("index", "type", "id").version(2); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync)); + () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[type][id]: " + "version conflict, current version [1] is different than the one provided [2]]", exception.getMessage()); @@ -191,7 +208,8 @@ public void testGet() throws IOException { if (randomBoolean()) { getRequest.version(1L); } - GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); + GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals("id", getResponse.getId()); @@ -202,7 +220,8 @@ public void testGet() throws IOException { } { GetRequest getRequest = new GetRequest("index", "type", "does_not_exist"); - GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); + GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals("does_not_exist", getResponse.getId()); @@ -214,7 +233,8 @@ public void testGet() throws IOException { { GetRequest getRequest = new GetRequest("index", "type", "id"); getRequest.fetchSourceContext(new FetchSourceContext(false, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)); - GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); + GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals("id", getResponse.getId()); @@ -230,7 +250,8 @@ public void testGet() throws IOException { } else { getRequest.fetchSourceContext(new FetchSourceContext(true, Strings.EMPTY_ARRAY, new String[]{"field2"})); } - GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); + GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals("id", getResponse.getId()); @@ -248,7 +269,8 @@ public void testMultiGet() throws IOException { MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.add("index", "type", "id1"); multiGetRequest.add("index", "type", "id2"); - MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync); + MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync, + highLevelClient()::multiGet, highLevelClient()::multiGetAsync); assertEquals(2, response.getResponses().length); assertTrue(response.getResponses()[0].isFailed()); @@ -275,12 +297,13 @@ public void testMultiGet() throws IOException { index = new IndexRequest("index", "type", "id2"); index.source("{\"field\":\"value2\"}", XContentType.JSON); bulk.add(index); - highLevelClient().bulk(bulk); + highLevelClient().bulk(bulk, RequestOptions.DEFAULT); { MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.add("index", "type", "id1"); multiGetRequest.add("index", "type", "id2"); - MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync); + MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync, + highLevelClient()::multiGet, highLevelClient()::multiGetAsync); assertEquals(2, response.getResponses().length); assertFalse(response.getResponses()[0].isFailed()); @@ -305,7 +328,8 @@ public void testIndex() throws IOException { IndexRequest indexRequest = new IndexRequest("index", "type"); indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("test", "test").endObject()); - IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); assertEquals("index", indexResponse.getIndex()); @@ -326,7 +350,8 @@ public void testIndex() throws IOException { IndexRequest indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("version", 1).endObject()); - IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); @@ -336,7 +361,8 @@ public void testIndex() throws IOException { indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("version", 2).endObject()); - indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.OK, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); @@ -348,7 +374,8 @@ public void testIndex() throws IOException { wrongRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject()); wrongRequest.version(5L); - execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync); + execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: " + @@ -374,7 +401,8 @@ public void testIndex() throws IOException { indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject()); indexRequest.setPipeline("missing"); - execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); }); assertEquals(RestStatus.BAD_REQUEST, exception.status()); @@ -387,7 +415,8 @@ public void testIndex() throws IOException { indexRequest.version(12L); indexRequest.versionType(VersionType.EXTERNAL); - IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); @@ -399,14 +428,16 @@ public void testIndex() throws IOException { indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject()); indexRequest.opType(DocWriteRequest.OpType.CREATE); - IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); assertEquals("with_create_op_type", indexResponse.getId()); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { - execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); @@ -421,7 +452,8 @@ public void testUpdate() throws IOException { updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values())); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> - execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync)); + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][does_not_exist]: document missing]", exception.getMessage()); @@ -429,7 +461,7 @@ public void testUpdate() throws IOException { { IndexRequest indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source(singletonMap("field", "value")); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(RestStatus.CREATED, indexResponse.status()); UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); @@ -444,7 +476,8 @@ public void testUpdate() throws IOException { updateRequestConflict.version(indexResponse.getVersion()); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> - execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync)); + execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: version conflict, " + "current version [2] is different than the one provided [1]]", exception.getMessage()); @@ -468,7 +501,7 @@ public void testUpdate() throws IOException { { IndexRequest indexRequest = new IndexRequest("index", "type", "with_script"); indexRequest.source(singletonMap("counter", 12)); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(RestStatus.CREATED, indexResponse.status()); UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_script"); @@ -476,7 +509,8 @@ public void testUpdate() throws IOException { updateRequest.script(script); updateRequest.fetchSource(true); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.OK, updateResponse.status()); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertEquals(2L, updateResponse.getVersion()); @@ -488,7 +522,7 @@ public void testUpdate() throws IOException { indexRequest.source("field_1", "one", "field_3", "three"); indexRequest.version(12L); indexRequest.versionType(VersionType.EXTERNAL); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals(12L, indexResponse.getVersion()); @@ -496,7 +530,8 @@ public void testUpdate() throws IOException { updateRequest.doc(singletonMap("field_2", "two"), randomFrom(XContentType.values())); updateRequest.fetchSource("field_*", "field_3"); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.OK, updateResponse.status()); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertEquals(13L, updateResponse.getVersion()); @@ -510,14 +545,15 @@ public void testUpdate() throws IOException { { IndexRequest indexRequest = new IndexRequest("index", "type", "noop"); indexRequest.source("field", "value"); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals(1L, indexResponse.getVersion()); UpdateRequest updateRequest = new UpdateRequest("index", "type", "noop"); updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values())); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.OK, updateResponse.status()); assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult()); assertEquals(1L, updateResponse.getVersion()); @@ -535,7 +571,8 @@ public void testUpdate() throws IOException { updateRequest.doc(singletonMap("doc_status", "updated")); updateRequest.fetchSource(true); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.CREATED, updateResponse.status()); assertEquals("index", updateResponse.getIndex()); assertEquals("type", updateResponse.getType()); @@ -550,7 +587,8 @@ public void testUpdate() throws IOException { updateRequest.fetchSource(true); updateRequest.docAsUpsert(true); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.CREATED, updateResponse.status()); assertEquals("index", updateResponse.getIndex()); assertEquals("type", updateResponse.getType()); @@ -566,7 +604,8 @@ public void testUpdate() throws IOException { updateRequest.scriptedUpsert(true); updateRequest.upsert(singletonMap("level", "A")); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.CREATED, updateResponse.status()); assertEquals("index", updateResponse.getIndex()); assertEquals("type", updateResponse.getType()); @@ -581,7 +620,8 @@ public void testUpdate() throws IOException { UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON)); updateRequest.upsert(new IndexRequest().source(Collections.singletonMap("field", "upsert"), XContentType.YAML)); - execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); }); assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents", exception.getMessage()); @@ -604,7 +644,8 @@ public void testBulk() throws IOException { if (opType == DocWriteRequest.OpType.DELETE) { if (erroneous == false) { assertEquals(RestStatus.CREATED, - highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + highLevelClient().index( + new IndexRequest("index", "test", id).source("field", -1), RequestOptions.DEFAULT).status()); } DeleteRequest deleteRequest = new DeleteRequest("index", "test", id); bulkRequest.add(deleteRequest); @@ -622,7 +663,7 @@ public void testBulk() throws IOException { } else if (opType == DocWriteRequest.OpType.CREATE) { IndexRequest createRequest = new IndexRequest("index", "test", id).source(source, xContentType).create(true); if (erroneous) { - assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest).status()); + assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest, RequestOptions.DEFAULT).status()); } bulkRequest.add(createRequest); @@ -631,14 +672,16 @@ public void testBulk() throws IOException { .doc(new IndexRequest().source(source, xContentType)); if (erroneous == false) { assertEquals(RestStatus.CREATED, - highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + highLevelClient().index( + new IndexRequest("index", "test", id).source("field", -1), RequestOptions.DEFAULT).status()); } bulkRequest.add(updateRequest); } } } - BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync); + BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, + highLevelClient()::bulk, highLevelClient()::bulkAsync); assertEquals(RestStatus.OK, bulkResponse.status()); assertTrue(bulkResponse.getTook().getMillis() > 0); assertEquals(nbItems, bulkResponse.getItems().length); @@ -691,7 +734,8 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) if (opType == DocWriteRequest.OpType.DELETE) { if (erroneous == false) { assertEquals(RestStatus.CREATED, - highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + highLevelClient().index( + new IndexRequest("index", "test", id).source("field", -1), RequestOptions.DEFAULT).status()); } DeleteRequest deleteRequest = new DeleteRequest("index", "test", id); processor.add(deleteRequest); @@ -707,7 +751,7 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) } else if (opType == DocWriteRequest.OpType.CREATE) { IndexRequest createRequest = new IndexRequest("index", "test", id).source(xContentType, "id", i).create(true); if (erroneous) { - assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest).status()); + assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest, RequestOptions.DEFAULT).status()); } processor.add(createRequest); @@ -716,7 +760,8 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) .doc(new IndexRequest().source(xContentType, "id", i)); if (erroneous == false) { assertEquals(RestStatus.CREATED, - highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + highLevelClient().index( + new IndexRequest("index", "test", id).source("field", -1), RequestOptions.DEFAULT).status()); } processor.add(updateRequest); } @@ -768,14 +813,14 @@ public void testUrlEncode() throws IOException { { IndexRequest indexRequest = new IndexRequest(indexPattern, "type", "id#1"); indexRequest.source("field", "value"); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(expectedIndex, indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); assertEquals("id#1", indexResponse.getId()); } { GetRequest getRequest = new GetRequest(indexPattern, "type", "id#1"); - GetResponse getResponse = highLevelClient().get(getRequest); + GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); assertTrue(getResponse.isExists()); assertEquals(expectedIndex, getResponse.getIndex()); assertEquals("type", getResponse.getType()); @@ -786,21 +831,21 @@ public void testUrlEncode() throws IOException { { IndexRequest indexRequest = new IndexRequest("index", "type", docId); indexRequest.source("field", "value"); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); assertEquals(docId, indexResponse.getId()); } { GetRequest getRequest = new GetRequest("index", "type", docId); - GetResponse getResponse = highLevelClient().get(getRequest); + GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); assertTrue(getResponse.isExists()); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals(docId, getResponse.getId()); } - assertTrue(highLevelClient().indices().exists(new GetIndexRequest().indices(indexPattern, "index"))); + assertTrue(highLevelClient().indices().exists(new GetIndexRequest().indices(indexPattern, "index"), RequestOptions.DEFAULT)); } public void testParamsEncode() throws IOException { @@ -810,14 +855,14 @@ public void testParamsEncode() throws IOException { IndexRequest indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source("field", "value"); indexRequest.routing(routing); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); assertEquals("id", indexResponse.getId()); } { GetRequest getRequest = new GetRequest("index", "type", "id").routing(routing); - GetResponse getResponse = highLevelClient().get(getRequest); + GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); assertTrue(getResponse.isExists()); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index f7a934405c2ae..14fe0e01d31f9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -60,23 +60,60 @@ protected static RestHighLevelClient highLevelClient() { * Executes the provided request using either the sync method or its async variant, both provided as functions */ protected static Resp execute(Req request, SyncMethod syncMethod, - AsyncMethod asyncMethod, Header... headers) throws IOException { + AsyncMethod asyncMethod) throws IOException { if (randomBoolean()) { - return syncMethod.execute(request, headers); + return syncMethod.execute(request, RequestOptions.DEFAULT); } else { PlainActionFuture future = PlainActionFuture.newFuture(); - asyncMethod.execute(request, future, headers); + asyncMethod.execute(request, RequestOptions.DEFAULT, future); return future.actionGet(); } } @FunctionalInterface protected interface SyncMethod { - Response execute(Request request, Header... headers) throws IOException; + Response execute(Request request, RequestOptions options) throws IOException; } @FunctionalInterface protected interface AsyncMethod { + void execute(Request request, RequestOptions options, ActionListener listener); + } + + /** + * Executes the provided request using either the sync method or its async variant, both provided as functions + */ + @Deprecated + protected static Resp execute(Req request, SyncMethod syncMethod, AsyncMethod asyncMethod, + SyncMethodWithHeaders syncMethodWithHeaders, + AsyncMethodWithHeaders asyncMethodWithHeaders) throws IOException { + switch(randomIntBetween(0, 3)) { + case 0: + return syncMethod.execute(request, RequestOptions.DEFAULT); + case 1: + PlainActionFuture future = PlainActionFuture.newFuture(); + asyncMethod.execute(request, RequestOptions.DEFAULT, future); + return future.actionGet(); + case 2: + return syncMethodWithHeaders.execute(request); + case 3: + PlainActionFuture futureWithHeaders = PlainActionFuture.newFuture(); + asyncMethodWithHeaders.execute(request, futureWithHeaders); + return futureWithHeaders.actionGet(); + default: + throw new UnsupportedOperationException(); + } + } + + @Deprecated + @FunctionalInterface + protected interface SyncMethodWithHeaders { + Response execute(Request request, Header... headers) throws IOException; + } + + @Deprecated + @FunctionalInterface + protected interface AsyncMethodWithHeaders { void execute(Request request, ActionListener listener, Header... headers); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index b7fccd41c36bc..87d5120116fb4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -111,6 +111,8 @@ public void testIndicesExists() throws IOException { boolean response = execute( request, highLevelClient().indices()::exists, + highLevelClient().indices()::existsAsync, + highLevelClient().indices()::exists, highLevelClient().indices()::existsAsync ); assertTrue(response); @@ -126,6 +128,8 @@ public void testIndicesExists() throws IOException { boolean response = execute( request, highLevelClient().indices()::exists, + highLevelClient().indices()::existsAsync, + highLevelClient().indices()::exists, highLevelClient().indices()::existsAsync ); assertFalse(response); @@ -144,6 +148,8 @@ public void testIndicesExists() throws IOException { boolean response = execute( request, highLevelClient().indices()::exists, + highLevelClient().indices()::existsAsync, + highLevelClient().indices()::exists, highLevelClient().indices()::existsAsync ); assertFalse(response); @@ -161,7 +167,8 @@ public void testCreateIndex() throws IOException { CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync, + highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); assertTrue(indexExists(indexName)); @@ -189,7 +196,8 @@ public void testCreateIndex() throws IOException { createIndexRequest.mapping("type_name", mappingBuilder); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync, + highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); Map getIndexResponse = getAsMap(indexName); @@ -324,7 +332,8 @@ public void testPutMapping() throws IOException { putMappingRequest.source(mappingBuilder); PutMappingResponse putMappingResponse = - execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); + execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync, + highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); assertTrue(putMappingResponse.isAcknowledged()); Map getIndexResponse = getAsMap(indexName); @@ -376,7 +385,8 @@ public void testDeleteIndex() throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName); DeleteIndexResponse deleteIndexResponse = - execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync); + execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync, + highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync); assertTrue(deleteIndexResponse.isAcknowledged()); assertFalse(indexExists(indexName)); @@ -389,7 +399,8 @@ public void testDeleteIndex() throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync)); + () -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync, + highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } @@ -408,6 +419,7 @@ public void testUpdateAliases() throws IOException { addAction.routing("routing").searchRouting("search_routing").filter("{\"term\":{\"year\":2016}}"); aliasesAddRequest.addAliasAction(addAction); IndicesAliasesResponse aliasesAddResponse = execute(aliasesAddRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync); assertTrue(aliasesAddResponse.isAcknowledged()); assertThat(aliasExists(alias), equalTo(true)); @@ -426,6 +438,7 @@ public void testUpdateAliases() throws IOException { AliasActions removeAction = new AliasActions(AliasActions.Type.REMOVE).index(index).alias(alias); aliasesAddRemoveRequest.addAliasAction(removeAction); IndicesAliasesResponse aliasesAddRemoveResponse = execute(aliasesAddRemoveRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync); assertTrue(aliasesAddRemoveResponse.isAcknowledged()); assertThat(aliasExists(alias), equalTo(false)); @@ -437,6 +450,7 @@ public void testUpdateAliases() throws IOException { AliasActions removeIndexAction = new AliasActions(AliasActions.Type.REMOVE_INDEX).index(index); aliasesRemoveIndexRequest.addAliasAction(removeIndexAction); IndicesAliasesResponse aliasesRemoveIndexResponse = execute(aliasesRemoveIndexRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync); assertTrue(aliasesRemoveIndexResponse.isAcknowledged()); assertThat(aliasExists(alias), equalTo(false)); @@ -454,7 +468,9 @@ public void testAliasesNonExistentIndex() throws IOException { IndicesAliasesRequest nonExistentIndexRequest = new IndicesAliasesRequest(); nonExistentIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias)); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(nonExistentIndexRequest, - highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); + highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync, + highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync)); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); @@ -464,7 +480,8 @@ public void testAliasesNonExistentIndex() throws IOException { mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).indices(index).aliases(alias)); mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE).indices(nonExistentIndex).alias(alias)); exception = expectThrows(ElasticsearchStatusException.class, - () -> execute(mixedRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); + () -> execute(mixedRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync, + highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); @@ -476,6 +493,7 @@ public void testAliasesNonExistentIndex() throws IOException { removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias)); removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE_INDEX).indices(nonExistentIndex)); exception = expectThrows(ElasticsearchException.class, () -> execute(removeIndexRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); @@ -496,6 +514,7 @@ public void testOpenExistingIndex() throws IOException { OpenIndexRequest openIndexRequest = new OpenIndexRequest(index).waitForActiveShards(ActiveShardCount.ONE); OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync, highLevelClient().indices()::open, highLevelClient().indices()::openAsync); assertTrue(openIndexResponse.isAcknowledged()); assertTrue(openIndexResponse.isShardsAcknowledged()); @@ -510,19 +529,22 @@ public void testOpenNonExistentIndex() throws IOException { OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync, + highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync, highLevelClient().indices()::open, highLevelClient().indices()::openAsync); assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true)); OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen()); ElasticsearchException strictException = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync, + highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, strictException.status()); } @@ -534,6 +556,7 @@ public void testCloseExistingIndex() throws IOException { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index); CloseIndexResponse closeIndexResponse = execute(closeIndexRequest, highLevelClient().indices()::close, + highLevelClient().indices()::closeAsync, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync); assertTrue(closeIndexResponse.isAcknowledged()); @@ -549,7 +572,8 @@ public void testCloseNonExistentIndex() throws IOException { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync)); + () -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync, + highLevelClient().indices()::close, highLevelClient().indices()::closeAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } @@ -563,7 +587,8 @@ public void testRefresh() throws IOException { createIndex(index, settings); RefreshRequest refreshRequest = new RefreshRequest(index); RefreshResponse refreshResponse = - execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync); + execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync, + highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync); assertThat(refreshResponse.getTotalShards(), equalTo(1)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(1)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); @@ -574,7 +599,8 @@ public void testRefresh() throws IOException { assertFalse(indexExists(nonExistentIndex)); RefreshRequest refreshRequest = new RefreshRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync)); + () -> execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync, + highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } @@ -589,7 +615,8 @@ public void testFlush() throws IOException { createIndex(index, settings); FlushRequest flushRequest = new FlushRequest(index); FlushResponse flushResponse = - execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync); + execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync, + highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync); assertThat(flushResponse.getTotalShards(), equalTo(1)); assertThat(flushResponse.getSuccessfulShards(), equalTo(1)); assertThat(flushResponse.getFailedShards(), equalTo(0)); @@ -600,7 +627,8 @@ public void testFlush() throws IOException { assertFalse(indexExists(nonExistentIndex)); FlushRequest flushRequest = new FlushRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync)); + () -> execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync, + highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } @@ -648,7 +676,8 @@ public void testClearCache() throws IOException { createIndex(index, settings); ClearIndicesCacheRequest clearCacheRequest = new ClearIndicesCacheRequest(index); ClearIndicesCacheResponse clearCacheResponse = - execute(clearCacheRequest, highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync); + execute(clearCacheRequest, highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync, + highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync); assertThat(clearCacheResponse.getTotalShards(), equalTo(1)); assertThat(clearCacheResponse.getSuccessfulShards(), equalTo(1)); assertThat(clearCacheResponse.getFailedShards(), equalTo(0)); @@ -659,8 +688,8 @@ public void testClearCache() throws IOException { assertFalse(indexExists(nonExistentIndex)); ClearIndicesCacheRequest clearCacheRequest = new ClearIndicesCacheRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(clearCacheRequest, highLevelClient().indices()::clearCache, - highLevelClient().indices()::clearCacheAsync)); + () -> execute(clearCacheRequest, highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync, + highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } @@ -675,7 +704,8 @@ public void testForceMerge() throws IOException { createIndex(index, settings); ForceMergeRequest forceMergeRequest = new ForceMergeRequest(index); ForceMergeResponse forceMergeResponse = - execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync); + execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync, + highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync); assertThat(forceMergeResponse.getTotalShards(), equalTo(1)); assertThat(forceMergeResponse.getSuccessfulShards(), equalTo(1)); assertThat(forceMergeResponse.getFailedShards(), equalTo(0)); @@ -686,25 +716,30 @@ public void testForceMerge() throws IOException { assertFalse(indexExists(nonExistentIndex)); ForceMergeRequest forceMergeRequest = new ForceMergeRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync)); + () -> execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync, + highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } public void testExistsAlias() throws IOException { GetAliasesRequest getAliasesRequest = new GetAliasesRequest("alias"); - assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync, + highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); createIndex("index", Settings.EMPTY); client().performRequest(HttpPut.METHOD_NAME, "/index/_alias/alias"); - assertTrue(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + assertTrue(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync, + highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); GetAliasesRequest getAliasesRequest2 = new GetAliasesRequest(); getAliasesRequest2.aliases("alias"); getAliasesRequest2.indices("index"); - assertTrue(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + assertTrue(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync, + highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); getAliasesRequest2.indices("does_not_exist"); - assertFalse(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + assertFalse(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync, + highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); } @SuppressWarnings("unchecked") @@ -719,7 +754,8 @@ public void testShrink() throws IOException { resizeRequest.setResizeType(ResizeType.SHRINK); Settings targetSettings = Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build(); resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias"))); - ResizeResponse resizeResponse = highLevelClient().indices().shrink(resizeRequest); + ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::shrink, + highLevelClient().indices()::shrinkAsync, highLevelClient().indices()::shrink, highLevelClient().indices()::shrinkAsync); assertTrue(resizeResponse.isAcknowledged()); assertTrue(resizeResponse.isShardsAcknowledged()); Map getIndexResponse = getAsMap("target"); @@ -741,7 +777,8 @@ public void testSplit() throws IOException { resizeRequest.setResizeType(ResizeType.SPLIT); Settings targetSettings = Settings.builder().put("index.number_of_shards", 4).put("index.number_of_replicas", 0).build(); resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias"))); - ResizeResponse resizeResponse = highLevelClient().indices().split(resizeRequest); + ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::split, highLevelClient().indices()::splitAsync, + highLevelClient().indices()::split, highLevelClient().indices()::splitAsync); assertTrue(resizeResponse.isAcknowledged()); assertTrue(resizeResponse.isShardsAcknowledged()); Map getIndexResponse = getAsMap("target"); @@ -754,12 +791,13 @@ public void testSplit() throws IOException { } public void testRollover() throws IOException { - highLevelClient().indices().create(new CreateIndexRequest("test").alias(new Alias("alias"))); + highLevelClient().indices().create(new CreateIndexRequest("test").alias(new Alias("alias")), RequestOptions.DEFAULT); RolloverRequest rolloverRequest = new RolloverRequest("alias", "test_new"); rolloverRequest.addMaxIndexDocsCondition(1); { RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover, + highLevelClient().indices()::rolloverAsync, highLevelClient().indices()::rollover, highLevelClient().indices()::rolloverAsync); assertFalse(rolloverResponse.isRolledOver()); assertFalse(rolloverResponse.isDryRun()); @@ -770,15 +808,16 @@ public void testRollover() throws IOException { assertEquals("test_new", rolloverResponse.getNewIndex()); } - highLevelClient().index(new IndexRequest("test", "type", "1").source("field", "value")); + highLevelClient().index(new IndexRequest("test", "type", "1").source("field", "value"), RequestOptions.DEFAULT); highLevelClient().index(new IndexRequest("test", "type", "2").source("field", "value") - .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL)); + .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL), RequestOptions.DEFAULT); //without the refresh the rollover may not happen as the number of docs seen may be off { rolloverRequest.addMaxIndexAgeCondition(new TimeValue(1)); rolloverRequest.dryRun(true); RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover, + highLevelClient().indices()::rolloverAsync, highLevelClient().indices()::rollover, highLevelClient().indices()::rolloverAsync); assertFalse(rolloverResponse.isRolledOver()); assertTrue(rolloverResponse.isDryRun()); @@ -793,6 +832,7 @@ public void testRollover() throws IOException { rolloverRequest.dryRun(false); rolloverRequest.addMaxIndexSizeCondition(new ByteSizeValue(1, ByteSizeUnit.MB)); RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover, + highLevelClient().indices()::rolloverAsync, highLevelClient().indices()::rollover, highLevelClient().indices()::rolloverAsync); assertTrue(rolloverResponse.isRolledOver()); assertFalse(rolloverResponse.isDryRun()); @@ -827,6 +867,7 @@ public void testIndexPutSettings() throws IOException { UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest(); dynamicSettingRequest.settings(Settings.builder().put(dynamicSettingKey, dynamicSettingValue).build()); UpdateSettingsResponse response = execute(dynamicSettingRequest, highLevelClient().indices()::putSettings, + highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync); assertTrue(response.isAcknowledged()); @@ -837,6 +878,7 @@ public void testIndexPutSettings() throws IOException { UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest(); staticSettingRequest.settings(Settings.builder().put(staticSettingKey, staticSettingValue).build()); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(staticSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertThat(exception.getMessage(), startsWith("Elasticsearch exception [type=illegal_argument_exception, " @@ -847,6 +889,7 @@ public void testIndexPutSettings() throws IOException { closeIndex(index); response = execute(staticSettingRequest, highLevelClient().indices()::putSettings, + highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync); assertTrue(response.isAcknowledged()); openIndex(index); @@ -857,6 +900,7 @@ public void testIndexPutSettings() throws IOException { UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest(); unmodifiableSettingRequest.settings(Settings.builder().put(unmodifiableSettingKey, unmodifiableSettingValue).build()); exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertThat(exception.getMessage(), startsWith( "Elasticsearch exception [type=illegal_argument_exception, " @@ -884,12 +928,14 @@ public void testIndexPutSettingNonExistent() throws IOException { indexUpdateSettingsRequest.settings(Settings.builder().put(setting, value).build()); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); createIndex(index, Settings.EMPTY); exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(exception.getMessage(), equalTo( diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java index b4d8828eb7e6f..057ea49f9a969 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java @@ -28,12 +28,12 @@ public class PingAndInfoIT extends ESRestHighLevelClientTestCase { public void testPing() throws IOException { - assertTrue(highLevelClient().ping()); + assertTrue(highLevelClient().ping(RequestOptions.DEFAULT)); } @SuppressWarnings("unchecked") public void testInfo() throws IOException { - MainResponse info = highLevelClient().info(); + MainResponse info = highLevelClient().info(RequestOptions.DEFAULT); // compare with what the low level client outputs Map infoAsMap = entityAsMap(adminClient().performRequest(HttpGet.METHOD_NAME, "/")); assertEquals(infoAsMap.get("cluster_name"), info.getClusterName().value()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java index 9497bdded0549..1e12f3f5e62f6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java @@ -82,8 +82,8 @@ public void testRankEvalRequest() throws IOException { RankEvalSpec spec = new RankEvalSpec(specifications, metric); RankEvalRequest rankEvalRequest = new RankEvalRequest(spec, new String[] { "index", "index2" }); - RankEvalResponse response = execute(rankEvalRequest, highLevelClient()::rankEval, - highLevelClient()::rankEvalAsync); + RankEvalResponse response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync, + highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); // the expected Prec@ for the first query is 5/7 and the expected Prec@ for the second is 1/7, divided by 2 to get the average double expectedPrecision = (1.0 / 7.0 + 5.0 / 7.0) / 2.0; assertEquals(expectedPrecision, response.getEvaluationResult(), Double.MIN_VALUE); @@ -117,7 +117,8 @@ public void testRankEvalRequest() throws IOException { // now try this when test2 is closed client().performRequest("POST", "index2/_close", Collections.emptyMap()); rankEvalRequest.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); - response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); + response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync, + highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); } private static List createRelevant(String indexName, String... docs) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index b6a650819d370..307dd0afb5e07 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -27,10 +27,7 @@ import org.apache.http.ProtocolVersion; import org.apache.http.RequestLine; import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; @@ -77,9 +74,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; import org.junit.Before; -import org.mockito.ArgumentMatcher; -import org.mockito.internal.matchers.ArrayEquals; -import org.mockito.internal.matchers.VarargMatcher; import java.io.IOException; import java.net.SocketTimeoutException; @@ -124,25 +118,22 @@ public void testCloseIsIdempotent() throws IOException { } public void testPingSuccessful() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); Response response = mock(Response.class); when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.OK)); when(restClient.performRequest(any(Request.class))).thenReturn(response); - assertTrue(restHighLevelClient.ping(headers)); + assertTrue(restHighLevelClient.ping(RequestOptions.DEFAULT)); } public void testPing404NotFound() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); Response response = mock(Response.class); when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.NOT_FOUND)); when(restClient.performRequest(any(Request.class))).thenReturn(response); - assertFalse(restHighLevelClient.ping(headers)); + assertFalse(restHighLevelClient.ping(RequestOptions.DEFAULT)); } public void testPingSocketTimeout() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); when(restClient.performRequest(any(Request.class))).thenThrow(new SocketTimeoutException()); - expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(headers)); + expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(RequestOptions.DEFAULT)); } public void testInfo() throws IOException { @@ -150,18 +141,17 @@ public void testInfo() throws IOException { MainResponse testInfo = new MainResponse("nodeName", Version.CURRENT, new ClusterName("clusterName"), "clusterUuid", Build.CURRENT, true); mockResponse(testInfo); - MainResponse receivedInfo = restHighLevelClient.info(headers); + MainResponse receivedInfo = restHighLevelClient.info(RequestOptions.DEFAULT); assertEquals(testInfo, receivedInfo); } public void testSearchScroll() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); SearchResponse mockSearchResponse = new SearchResponse(new SearchResponseSections(SearchHits.empty(), InternalAggregations.EMPTY, null, false, false, null, 1), randomAlphaOfLengthBetween(5, 10), 5, 5, 0, 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); mockResponse(mockSearchResponse); - SearchResponse searchResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(randomAlphaOfLengthBetween(5, 10)), - headers); + SearchResponse searchResponse = restHighLevelClient.searchScroll( + new SearchScrollRequest(randomAlphaOfLengthBetween(5, 10)), RequestOptions.DEFAULT); assertEquals(mockSearchResponse.getScrollId(), searchResponse.getScrollId()); assertEquals(0, searchResponse.getHits().totalHits); assertEquals(5, searchResponse.getTotalShards()); @@ -170,12 +160,11 @@ public void testSearchScroll() throws IOException { } public void testClearScroll() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); ClearScrollResponse mockClearScrollResponse = new ClearScrollResponse(randomBoolean(), randomIntBetween(0, Integer.MAX_VALUE)); mockResponse(mockClearScrollResponse); ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10)); - ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, headers); + ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, RequestOptions.DEFAULT); assertEquals(mockClearScrollResponse.isSucceeded(), clearScrollResponse.isSucceeded()); assertEquals(mockClearScrollResponse.getNumFreed(), clearScrollResponse.getNumFreed()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index f564425f0c8a9..6848401c05edf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -164,7 +164,8 @@ public void testSearchNoQuery() throws IOException { public void testSearchMatchQuery() throws IOException { SearchRequest searchRequest = new SearchRequest("index"); searchRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10))); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getAggregations()); assertNull(searchResponse.getSuggest()); @@ -190,7 +191,8 @@ public void testSearchWithTermsAgg() throws IOException { searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1", ValueType.STRING).field("type.keyword")); searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -216,7 +218,8 @@ public void testSearchWithRangeAgg() throws IOException { searchRequest.source(searchSourceBuilder); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, - () -> execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync)); + () -> execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync)); assertEquals(RestStatus.BAD_REQUEST, exception.status()); } @@ -226,7 +229,8 @@ public void testSearchWithRangeAgg() throws IOException { .addRange("first", 0, 30).addRange("second", 31, 200)); searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -257,7 +261,8 @@ public void testSearchWithTermsAndRangeAgg() throws IOException { searchSourceBuilder.aggregation(agg); searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -308,7 +313,8 @@ public void testSearchWithMatrixStats() throws IOException { searchSourceBuilder.aggregation(new MatrixStatsAggregationBuilder("agg1").fields(Arrays.asList("num", "num2"))); searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -397,7 +403,8 @@ public void testSearchWithParentJoin() throws IOException { SearchRequest searchRequest = new SearchRequest(indexName); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -437,7 +444,8 @@ public void testSearchWithSuggest() throws IOException { searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getAggregations()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -469,7 +477,8 @@ public void testSearchWithWeirdScriptFields() throws Exception { { SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource() .scriptField("result", new Script("null"))); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); SearchHit searchHit = searchResponse.getHits().getAt(0); List values = searchHit.getFields().get("result").getValues(); assertNotNull(values); @@ -479,7 +488,8 @@ public void testSearchWithWeirdScriptFields() throws Exception { { SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource() .scriptField("result", new Script("new HashMap()"))); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); SearchHit searchHit = searchResponse.getHits().getAt(0); List values = searchHit.getFields().get("result").getValues(); assertNotNull(values); @@ -491,7 +501,8 @@ public void testSearchWithWeirdScriptFields() throws Exception { { SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource() .scriptField("result", new Script("new String[]{}"))); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); SearchHit searchHit = searchResponse.getHits().getAt(0); List values = searchHit.getFields().get("result").getValues(); assertNotNull(values); @@ -513,7 +524,8 @@ public void testSearchScroll() throws Exception { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35).sort("field", SortOrder.ASC); SearchRequest searchRequest = new SearchRequest("test").scroll(TimeValue.timeValueMinutes(2)).source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); try { long counter = 0; @@ -525,6 +537,7 @@ public void testSearchScroll() throws Exception { } searchResponse = execute(new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)), + highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync, highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync); assertThat(searchResponse.getHits().getTotalHits(), equalTo(100L)); @@ -534,6 +547,7 @@ public void testSearchScroll() throws Exception { } searchResponse = execute(new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)), + highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync, highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync); assertThat(searchResponse.getHits().getTotalHits(), equalTo(100L)); @@ -545,14 +559,14 @@ public void testSearchScroll() throws Exception { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId(searchResponse.getScrollId()); ClearScrollResponse clearScrollResponse = execute(clearScrollRequest, - // Not using a method reference to work around https://bugs.eclipse.org/bugs/show_bug.cgi?id=517951 - (request, headers) -> highLevelClient().clearScroll(request, headers), - (request, listener, headers) -> highLevelClient().clearScrollAsync(request, listener, headers)); + highLevelClient()::clearScroll, highLevelClient()::clearScrollAsync, + highLevelClient()::clearScroll, highLevelClient()::clearScrollAsync); assertThat(clearScrollResponse.getNumFreed(), greaterThan(0)); assertTrue(clearScrollResponse.isSucceeded()); SearchScrollRequest scrollRequest = new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> execute(scrollRequest, + highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync, highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertThat(exception.getRootCause(), instanceOf(ElasticsearchException.class)); @@ -574,7 +588,8 @@ public void testMultiSearch() throws Exception { multiSearchRequest.add(searchRequest3); MultiSearchResponse multiSearchResponse = - execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); + execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync, + highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3)); assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue()); @@ -615,7 +630,8 @@ public void testMultiSearch_withAgg() throws Exception { multiSearchRequest.add(searchRequest3); MultiSearchResponse multiSearchResponse = - execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); + execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync, + highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3)); assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue()); @@ -662,7 +678,8 @@ public void testMultiSearch_withQuery() throws Exception { multiSearchRequest.add(searchRequest3); MultiSearchResponse multiSearchResponse = - execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); + execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync, + highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3)); assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue()); @@ -723,7 +740,8 @@ public void testMultiSearch_failure() throws Exception { multiSearchRequest.add(searchRequest2); MultiSearchResponse multiSearchResponse = - execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); + execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync, + highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(2)); assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(true)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index de7fdf3a2a23d..f4b8636d72d78 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -48,6 +48,7 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; @@ -72,13 +73,12 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.not; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; /** * This class is used to generate the Java CRUD API documentation. @@ -112,7 +112,7 @@ public void testIndex() throws Exception { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1") .source(jsonMap); // <1> //end::index-request-map - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED); } { @@ -128,7 +128,7 @@ public void testIndex() throws Exception { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1") .source(builder); // <1> //end::index-request-xcontent - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -138,7 +138,7 @@ public void testIndex() throws Exception { "postDate", new Date(), "message", "trying out Elasticsearch"); // <1> //end::index-request-shortcut - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -156,7 +156,7 @@ public void testIndex() throws Exception { //end::index-request-string // tag::index-execute - IndexResponse indexResponse = client.index(request); + IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT); // end::index-execute assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED); @@ -217,7 +217,7 @@ public void testIndex() throws Exception { .source("field", "value") .version(1); try { - IndexResponse response = client.index(request); + IndexResponse response = client.index(request, RequestOptions.DEFAULT); } catch(ElasticsearchException e) { if (e.status() == RestStatus.CONFLICT) { // <1> @@ -231,7 +231,7 @@ public void testIndex() throws Exception { .source("field", "value") .opType(DocWriteRequest.OpType.CREATE); try { - IndexResponse response = client.index(request); + IndexResponse response = client.index(request, RequestOptions.DEFAULT); } catch(ElasticsearchException e) { if (e.status() == RestStatus.CONFLICT) { // <1> @@ -260,7 +260,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::index-execute-async - client.indexAsync(request, listener); // <1> + client.indexAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -271,7 +271,7 @@ public void testUpdate() throws Exception { RestHighLevelClient client = highLevelClient(); { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", 0); - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertSame(indexResponse.status(), RestStatus.CREATED); Request request = new Request("POST", "/_scripts/increment-field"); @@ -300,7 +300,7 @@ public void testUpdate() throws Exception { "ctx._source.field += params.count", parameters); // <2> request.script(inline); // <3> //end::update-request-with-inline-script - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); assertEquals(4, updateResponse.getGetResult().getSource().get("field")); @@ -310,7 +310,7 @@ public void testUpdate() throws Exception { new Script(ScriptType.STORED, null, "increment-field", parameters); // <1> request.script(stored); // <2> //end::update-request-with-stored-script - updateResponse = client.update(request); + updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); assertEquals(8, updateResponse.getGetResult().getSource().get("field")); } @@ -322,7 +322,7 @@ public void testUpdate() throws Exception { UpdateRequest request = new UpdateRequest("posts", "doc", "1") .doc(jsonMap); // <1> //end::update-request-with-doc-as-map - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -337,7 +337,7 @@ public void testUpdate() throws Exception { UpdateRequest request = new UpdateRequest("posts", "doc", "1") .doc(builder); // <1> //end::update-request-with-doc-as-xcontent - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -346,7 +346,7 @@ public void testUpdate() throws Exception { .doc("updated", new Date(), "reason", "daily update"); // <1> //end::update-request-shortcut - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -360,7 +360,7 @@ public void testUpdate() throws Exception { //end::update-request-with-doc-as-string request.fetchSource(true); // tag::update-execute - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); // end::update-execute assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); @@ -409,7 +409,7 @@ public void testUpdate() throws Exception { UpdateRequest request = new UpdateRequest("posts", "type", "does_not_exist") .doc("field", "value"); try { - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); } catch (ElasticsearchException e) { if (e.status() == RestStatus.NOT_FOUND) { // <1> @@ -423,7 +423,7 @@ public void testUpdate() throws Exception { .doc("field", "value") .version(1); try { - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); } catch(ElasticsearchException e) { if (e.status() == RestStatus.CONFLICT) { // <1> @@ -436,7 +436,7 @@ public void testUpdate() throws Exception { //tag::update-request-no-source request.fetchSource(true); // <1> //end::update-request-no-source - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); assertNotNull(updateResponse.getGetResult()); assertEquals(3, updateResponse.getGetResult().sourceAsMap().size()); @@ -448,7 +448,7 @@ public void testUpdate() throws Exception { String[] excludes = Strings.EMPTY_ARRAY; request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1> //end::update-request-source-include - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); Map sourceAsMap = updateResponse.getGetResult().sourceAsMap(); assertEquals(2, sourceAsMap.size()); @@ -462,7 +462,7 @@ public void testUpdate() throws Exception { String[] excludes = new String[]{"updated"}; request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1> //end::update-request-source-exclude - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); Map sourceAsMap = updateResponse.getGetResult().sourceAsMap(); assertEquals(2, sourceAsMap.size()); @@ -531,7 +531,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::update-execute-async - client.updateAsync(request, listener); // <1> + client.updateAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::update-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -543,7 +543,7 @@ public void testDelete() throws Exception { { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", "value"); - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertSame(indexResponse.status(), RestStatus.CREATED); } @@ -556,7 +556,7 @@ public void testDelete() throws Exception { // end::delete-request // tag::delete-execute - DeleteResponse deleteResponse = client.delete(request); + DeleteResponse deleteResponse = client.delete(request, RequestOptions.DEFAULT); // end::delete-execute assertSame(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); @@ -604,7 +604,7 @@ public void testDelete() throws Exception { { // tag::delete-notfound DeleteRequest request = new DeleteRequest("posts", "doc", "does_not_exist"); - DeleteResponse deleteResponse = client.delete(request); + DeleteResponse deleteResponse = client.delete(request, RequestOptions.DEFAULT); if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { // <1> } @@ -612,13 +612,14 @@ public void testDelete() throws Exception { } { - IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "1").source("field", "value")); + IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "1").source("field", "value") + , RequestOptions.DEFAULT); assertSame(indexResponse.status(), RestStatus.CREATED); // tag::delete-conflict try { DeleteRequest request = new DeleteRequest("posts", "doc", "1").version(2); - DeleteResponse deleteResponse = client.delete(request); + DeleteResponse deleteResponse = client.delete(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.CONFLICT) { // <1> @@ -627,7 +628,8 @@ public void testDelete() throws Exception { // end::delete-conflict } { - IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "async").source("field", "value")); + IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "async").source("field", "value"), + RequestOptions.DEFAULT); assertSame(indexResponse.status(), RestStatus.CREATED); DeleteRequest request = new DeleteRequest("posts", "doc", "async"); @@ -651,7 +653,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::delete-execute-async - client.deleteAsync(request, listener); // <1> + client.deleteAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::delete-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -671,7 +673,7 @@ public void testBulk() throws Exception { .source(XContentType.JSON,"field", "baz")); // end::bulk-request // tag::bulk-execute - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); // end::bulk-execute assertSame(bulkResponse.status(), RestStatus.OK); assertFalse(bulkResponse.hasFailures()); @@ -685,7 +687,7 @@ public void testBulk() throws Exception { request.add(new IndexRequest("posts", "doc", "4") // <3> .source(XContentType.JSON,"field", "baz")); // end::bulk-request-with-mixed-operations - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(bulkResponse.status(), RestStatus.OK); assertFalse(bulkResponse.hasFailures()); @@ -784,7 +786,7 @@ public void testGet() throws Exception { .source("user", "kimchy", "postDate", new Date(), "message", "trying out Elasticsearch"); - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED); } { @@ -796,7 +798,7 @@ public void testGet() throws Exception { //end::get-request //tag::get-execute - GetResponse getResponse = client.get(getRequest); + GetResponse getResponse = client.get(getRequest, RequestOptions.DEFAULT); //end::get-execute assertTrue(getResponse.isExists()); assertEquals(3, getResponse.getSourceAsMap().size()); @@ -819,7 +821,7 @@ public void testGet() throws Exception { //tag::get-request-no-source request.fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE); // <1> //end::get-request-no-source - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); assertNull(getResponse.getSourceInternal()); } { @@ -831,7 +833,7 @@ public void testGet() throws Exception { new FetchSourceContext(true, includes, excludes); request.fetchSourceContext(fetchSourceContext); // <1> //end::get-request-source-include - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); Map sourceAsMap = getResponse.getSourceAsMap(); assertEquals(2, sourceAsMap.size()); assertEquals("trying out Elasticsearch", sourceAsMap.get("message")); @@ -846,7 +848,7 @@ public void testGet() throws Exception { new FetchSourceContext(true, includes, excludes); request.fetchSourceContext(fetchSourceContext); // <1> //end::get-request-source-exclude - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); Map sourceAsMap = getResponse.getSourceAsMap(); assertEquals(2, sourceAsMap.size()); assertEquals("kimchy", sourceAsMap.get("user")); @@ -856,7 +858,7 @@ public void testGet() throws Exception { GetRequest request = new GetRequest("posts", "doc", "1"); //tag::get-request-stored request.storedFields("message"); // <1> - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); String message = getResponse.getField("message").getValue(); // <2> //end::get-request-stored assertEquals("trying out Elasticsearch", message); @@ -909,7 +911,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); //tag::get-execute-async - client.getAsync(request, listener); // <1> + client.getAsync(request, RequestOptions.DEFAULT, listener); // <1> //end::get-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -918,7 +920,7 @@ public void onFailure(Exception e) { //tag::get-indexnotfound GetRequest request = new GetRequest("does_not_exist", "doc", "1"); try { - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); } catch (ElasticsearchException e) { if (e.status() == RestStatus.NOT_FOUND) { // <1> @@ -930,7 +932,7 @@ public void onFailure(Exception e) { // tag::get-conflict try { GetRequest request = new GetRequest("posts", "doc", "1").version(2); - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.CONFLICT) { // <1> @@ -952,7 +954,7 @@ public void testExists() throws Exception { // end::exists-request { // tag::exists-execute - boolean exists = client.exists(getRequest); + boolean exists = client.exists(getRequest, RequestOptions.DEFAULT); // end::exists-execute assertFalse(exists); } @@ -976,7 +978,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::exists-execute-async - client.existsAsync(getRequest, listener); // <1> + client.existsAsync(getRequest, RequestOptions.DEFAULT, listener); // <1> // end::exists-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1103,7 +1105,7 @@ public void testMultiGet() throws Exception { source.put("baz", "val3"); client.index(new IndexRequest("index", "type", "example_id") .source(source) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); { // tag::multi-get-request @@ -1134,7 +1136,7 @@ public void testMultiGet() throws Exception { // end::multi-get-request-top-level-extras // tag::multi-get-execute - MultiGetResponse response = client.multiGet(request); + MultiGetResponse response = client.multiGet(request, RequestOptions.DEFAULT); // end::multi-get-execute // tag::multi-get-response @@ -1198,7 +1200,7 @@ public void onFailure(Exception e) { request.add(new MultiGetRequest.Item("index", "type", "example_id") .fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)); // <1> // end::multi-get-request-no-source - MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request)); + MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request, RequestOptions.DEFAULT)); assertNull(item.getResponse().getSource()); } { @@ -1211,7 +1213,7 @@ public void onFailure(Exception e) { request.add(new MultiGetRequest.Item("index", "type", "example_id") .fetchSourceContext(fetchSourceContext)); // <1> // end::multi-get-request-source-include - MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request)); + MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request, RequestOptions.DEFAULT)); assertThat(item.getResponse().getSource(), hasEntry("foo", "val1")); assertThat(item.getResponse().getSource(), hasEntry("bar", "val2")); assertThat(item.getResponse().getSource(), not(hasKey("baz"))); @@ -1226,7 +1228,7 @@ public void onFailure(Exception e) { request.add(new MultiGetRequest.Item("index", "type", "example_id") .fetchSourceContext(fetchSourceContext)); // <1> // end::multi-get-request-source-exclude - MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request)); + MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request, RequestOptions.DEFAULT)); assertThat(item.getResponse().getSource(), not(hasKey("foo"))); assertThat(item.getResponse().getSource(), not(hasKey("bar"))); assertThat(item.getResponse().getSource(), hasEntry("baz", "val3")); @@ -1236,7 +1238,7 @@ public void onFailure(Exception e) { // tag::multi-get-request-stored request.add(new MultiGetRequest.Item("index", "type", "example_id") .storedFields("foo")); // <1> - MultiGetResponse response = client.multiGet(request); + MultiGetResponse response = client.multiGet(request, RequestOptions.DEFAULT); MultiGetItemResponse item = response.getResponses()[0]; String value = item.getResponse().getField("foo").getValue(); // <2> // end::multi-get-request-stored @@ -1248,7 +1250,7 @@ public void onFailure(Exception e) { MultiGetRequest request = new MultiGetRequest(); request.add(new MultiGetRequest.Item("index", "type", "example_id") .version(1000L)); - MultiGetResponse response = client.multiGet(request); + MultiGetResponse response = client.multiGet(request, RequestOptions.DEFAULT); MultiGetItemResponse item = response.getResponses()[0]; assertNull(item.getResponse()); // <1> Exception e = item.getFailure().getFailure(); // <2> diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 304c5010a47e3..e8dd4025ba94e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -23,27 +23,19 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.action.ingest.GetPipelineRequest; -import org.elasticsearch.action.ingest.GetPipelineResponse; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.ingest.PipelineConfiguration; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; -import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -134,7 +126,7 @@ public void testClusterPutSettings() throws IOException { // end::put-settings-request-masterTimeout // tag::put-settings-execute - ClusterUpdateSettingsResponse response = client.cluster().putSettings(request); + ClusterUpdateSettingsResponse response = client.cluster().putSettings(request, RequestOptions.DEFAULT); // end::put-settings-execute // tag::put-settings-response @@ -150,7 +142,7 @@ public void testClusterPutSettings() throws IOException { request.transientSettings(Settings.builder().putNull(transientSettingKey).build()); // <1> // tag::put-settings-request-reset-transient request.persistentSettings(Settings.builder().putNull(persistentSettingKey)); - ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request); + ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request, RequestOptions.DEFAULT); assertTrue(resetResponse.isAcknowledged()); } @@ -180,7 +172,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::put-settings-execute-async - client.cluster().putSettingsAsync(request, listener); // <1> + client.cluster().putSettingsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-settings-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index afae4a5a7a0fe..47c5c976fcc27 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -64,6 +64,7 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.SyncedFlushResponse; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -111,7 +112,7 @@ public void testIndicesExist() throws IOException { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -130,7 +131,7 @@ public void testIndicesExist() throws IOException { // end::indices-exists-request-optionals // tag::indices-exists-response - boolean exists = client.indices().exists(request); + boolean exists = client.indices().exists(request, RequestOptions.DEFAULT); // end::indices-exists-response assertTrue(exists); } @@ -140,7 +141,7 @@ public void testIndicesExistAsync() throws Exception { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -167,7 +168,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::indices-exists-async - client.indices().existsAsync(request, listener); // <1> + client.indices().existsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::indices-exists-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -177,7 +178,7 @@ public void testDeleteIndex() throws IOException { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -199,7 +200,7 @@ public void testDeleteIndex() throws IOException { // end::delete-index-request-indicesOptions // tag::delete-index-execute - DeleteIndexResponse deleteIndexResponse = client.indices().delete(request); + DeleteIndexResponse deleteIndexResponse = client.indices().delete(request, RequestOptions.DEFAULT); // end::delete-index-execute // tag::delete-index-response @@ -212,7 +213,7 @@ public void testDeleteIndex() throws IOException { // tag::delete-index-notfound try { DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist"); - client.indices().delete(request); + client.indices().delete(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -226,7 +227,7 @@ public void testDeleteIndexAsync() throws Exception { final RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -253,7 +254,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::delete-index-execute-async - client.indices().deleteAsync(request, listener); // <1> + client.indices().deleteAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::delete-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -289,7 +290,7 @@ public void testCreateIndex() throws IOException { "}", // <2> XContentType.JSON); // end::create-index-request-mappings - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -306,7 +307,7 @@ public void testCreateIndex() throws IOException { jsonMap.put("tweet", tweet); request.mapping("tweet", jsonMap); // <1> //end::create-index-mappings-map - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } { @@ -332,7 +333,7 @@ public void testCreateIndex() throws IOException { builder.endObject(); request.mapping("tweet", builder); // <1> //end::create-index-mappings-xcontent - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } { @@ -340,7 +341,7 @@ public void testCreateIndex() throws IOException { //tag::create-index-mappings-shortcut request.mapping("tweet", "message", "type=text"); // <1> //end::create-index-mappings-shortcut - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -362,7 +363,7 @@ public void testCreateIndex() throws IOException { request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2> // end::create-index-request-waitForActiveShards { - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -387,7 +388,7 @@ public void testCreateIndex() throws IOException { // end::create-index-whole-source // tag::create-index-execute - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); // end::create-index-execute // tag::create-index-response @@ -426,7 +427,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::create-index-execute-async - client.indices().createAsync(request, listener); // <1> + client.indices().createAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::create-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -437,7 +438,7 @@ public void testPutMapping() throws IOException { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -459,7 +460,7 @@ public void testPutMapping() throws IOException { "}", // <1> XContentType.JSON); // end::put-mapping-request-source - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } @@ -473,7 +474,7 @@ public void testPutMapping() throws IOException { jsonMap.put("properties", properties); request.source(jsonMap); // <1> //end::put-mapping-map - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } { @@ -494,14 +495,14 @@ public void testPutMapping() throws IOException { builder.endObject(); request.source(builder); // <1> //end::put-mapping-xcontent - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } { //tag::put-mapping-shortcut request.source("message", "type=text"); // <1> //end::put-mapping-shortcut - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } @@ -515,7 +516,7 @@ public void testPutMapping() throws IOException { // end::put-mapping-request-masterTimeout // tag::put-mapping-execute - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); // end::put-mapping-execute // tag::put-mapping-response @@ -529,7 +530,7 @@ public void testPutMappingAsync() throws Exception { final RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -556,7 +557,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::put-mapping-execute-async - client.indices().putMappingAsync(request, listener); // <1> + client.indices().putMappingAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-mapping-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -601,7 +602,7 @@ public void testGetMapping() throws IOException { // end::get-mapping-request-indicesOptions // tag::get-mapping-execute - GetMappingsResponse getMappingResponse = client.indices().getMappings(request); + GetMappingsResponse getMappingResponse = client.indices().getMappings(request, RequestOptions.DEFAULT); // end::get-mapping-execute // tag::get-mapping-response @@ -683,7 +684,7 @@ public void onFailure(Exception e) { }); // tag::get-mapping-execute-async - client.indices().getMappingsAsync(request, listener); // <1> + client.indices().getMappingsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::get-mapping-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -694,7 +695,7 @@ public void testOpenIndex() throws Exception { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -721,7 +722,7 @@ public void testOpenIndex() throws Exception { // end::open-index-request-indicesOptions // tag::open-index-execute - OpenIndexResponse openIndexResponse = client.indices().open(request); + OpenIndexResponse openIndexResponse = client.indices().open(request, RequestOptions.DEFAULT); // end::open-index-execute // tag::open-index-response @@ -751,7 +752,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::open-index-execute-async - client.indices().openAsync(request, listener); // <1> + client.indices().openAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::open-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -761,7 +762,7 @@ public void onFailure(Exception e) { // tag::open-index-notfound try { OpenIndexRequest request = new OpenIndexRequest("does_not_exist"); - client.indices().open(request); + client.indices().open(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.BAD_REQUEST) { // <1> @@ -790,7 +791,7 @@ public void testRefreshIndex() throws Exception { // end::refresh-request-indicesOptions // tag::refresh-execute - RefreshResponse refreshResponse = client.indices().refresh(request); + RefreshResponse refreshResponse = client.indices().refresh(request, RequestOptions.DEFAULT); // end::refresh-execute // tag::refresh-response @@ -819,7 +820,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::refresh-execute-async - client.indices().refreshAsync(request, listener); // <1> + client.indices().refreshAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::refresh-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -829,7 +830,7 @@ public void onFailure(Exception e) { // tag::refresh-notfound try { RefreshRequest request = new RefreshRequest("does_not_exist"); - client.indices().refresh(request); + client.indices().refresh(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -866,7 +867,7 @@ public void testFlushIndex() throws Exception { // end::flush-request-force // tag::flush-execute - FlushResponse flushResponse = client.indices().flush(request); + FlushResponse flushResponse = client.indices().flush(request, RequestOptions.DEFAULT); // end::flush-execute // tag::flush-response @@ -895,7 +896,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::flush-execute-async - client.indices().flushAsync(request, listener); // <1> + client.indices().flushAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::flush-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -905,7 +906,7 @@ public void onFailure(Exception e) { // tag::flush-notfound try { FlushRequest request = new FlushRequest("does_not_exist"); - client.indices().flush(request); + client.indices().flush(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -934,7 +935,7 @@ public void testSyncedFlushIndex() throws Exception { // end::flush-synced-request-indicesOptions // tag::flush-synced-execute - SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request); + SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request, RequestOptions.DEFAULT); // end::flush-synced-execute // tag::flush-synced-response @@ -978,7 +979,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::flush-synced-execute-async - client.indices().flushSyncedAsync(request, listener); // <1> + client.indices().flushSyncedAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::flush-synced-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -988,7 +989,7 @@ public void onFailure(Exception e) { // tag::flush-synced-notfound try { SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist"); - client.indices().flushSynced(request); + client.indices().flushSynced(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -1003,7 +1004,8 @@ public void testGetSettings() throws Exception { { Settings settings = Settings.builder().put("number_of_shards", 3).build(); - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index", settings)); + CreateIndexResponse createIndexResponse = client.indices().create( + new CreateIndexRequest("index", settings), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1020,7 +1022,7 @@ public void testGetSettings() throws Exception { // end::get-settings-request-indicesOptions // tag::get-settings-execute - GetSettingsResponse getSettingsResponse = client.indices().getSettings(request); + GetSettingsResponse getSettingsResponse = client.indices().getSettings(request, RequestOptions.DEFAULT); // end::get-settings-execute // tag::get-settings-response @@ -1055,7 +1057,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::get-settings-execute-async - client.indices().getSettingsAsync(request, listener); // <1> + client.indices().getSettingsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::get-settings-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1066,7 +1068,8 @@ public void testGetSettingsWithDefaults() throws Exception { { Settings settings = Settings.builder().put("number_of_shards", 3).build(); - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index", settings)); + CreateIndexResponse createIndexResponse = client.indices().create( + new CreateIndexRequest("index", settings), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1077,7 +1080,7 @@ public void testGetSettingsWithDefaults() throws Exception { request.includeDefaults(true); // <1> // end::get-settings-request-include-defaults - GetSettingsResponse getSettingsResponse = client.indices().getSettings(request); + GetSettingsResponse getSettingsResponse = client.indices().getSettings(request, RequestOptions.DEFAULT); String numberOfShardsString = getSettingsResponse.getSetting("index", "index.number_of_shards"); Settings indexSettings = getSettingsResponse.getIndexToSettings().get("index"); Integer numberOfShards = indexSettings.getAsInt("index.number_of_shards", null); @@ -1107,7 +1110,7 @@ public void onFailure(Exception e) { final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); - client.indices().getSettingsAsync(request, listener); + client.indices().getSettingsAsync(request, RequestOptions.DEFAULT, listener); assertTrue(latch.await(30L, TimeUnit.SECONDS)); } @@ -1142,7 +1145,7 @@ public void testForceMergeIndex() throws Exception { // end::force-merge-request-flush // tag::force-merge-execute - ForceMergeResponse forceMergeResponse = client.indices().forceMerge(request); + ForceMergeResponse forceMergeResponse = client.indices().forceMerge(request, RequestOptions.DEFAULT); // end::force-merge-execute // tag::force-merge-response @@ -1167,14 +1170,14 @@ public void onFailure(Exception e) { // end::force-merge-execute-listener // tag::force-merge-execute-async - client.indices().forceMergeAsync(request, listener); // <1> + client.indices().forceMergeAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::force-merge-execute-async } { // tag::force-merge-notfound try { ForceMergeRequest request = new ForceMergeRequest("does_not_exist"); - client.indices().forceMerge(request); + client.indices().forceMerge(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -1219,7 +1222,7 @@ public void testClearCache() throws Exception { // end::clear-cache-request-fields // tag::clear-cache-execute - ClearIndicesCacheResponse clearCacheResponse = client.indices().clearCache(request); + ClearIndicesCacheResponse clearCacheResponse = client.indices().clearCache(request, RequestOptions.DEFAULT); // end::clear-cache-execute // tag::clear-cache-response @@ -1248,7 +1251,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::clear-cache-execute-async - client.indices().clearCacheAsync(request, listener); // <1> + client.indices().clearCacheAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::clear-cache-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1258,7 +1261,7 @@ public void onFailure(Exception e) { // tag::clear-cache-notfound try { ClearIndicesCacheRequest request = new ClearIndicesCacheRequest("does_not_exist"); - client.indices().clearCache(request); + client.indices().clearCache(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -1272,7 +1275,7 @@ public void testCloseIndex() throws Exception { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1295,7 +1298,7 @@ public void testCloseIndex() throws Exception { // end::close-index-request-indicesOptions // tag::close-index-execute - CloseIndexResponse closeIndexResponse = client.indices().close(request); + CloseIndexResponse closeIndexResponse = client.indices().close(request, RequestOptions.DEFAULT); // end::close-index-execute // tag::close-index-response @@ -1323,7 +1326,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::close-index-execute-async - client.indices().closeAsync(request, listener); // <1> + client.indices().closeAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::close-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1335,7 +1338,7 @@ public void testExistsAlias() throws Exception { { CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index") - .alias(new Alias("alias"))); + .alias(new Alias("alias")), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1363,7 +1366,7 @@ public void testExistsAlias() throws Exception { // end::exists-alias-request-local // tag::exists-alias-execute - boolean exists = client.indices().existsAlias(request); + boolean exists = client.indices().existsAlias(request, RequestOptions.DEFAULT); // end::exists-alias-execute assertTrue(exists); @@ -1386,7 +1389,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::exists-alias-execute-async - client.indices().existsAliasAsync(request, listener); // <1> + client.indices().existsAliasAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::exists-alias-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1397,13 +1400,13 @@ public void testUpdateAliases() throws Exception { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index1")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index1"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); - createIndexResponse = client.indices().create(new CreateIndexRequest("index2")); + createIndexResponse = client.indices().create(new CreateIndexRequest("index2"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); - createIndexResponse = client.indices().create(new CreateIndexRequest("index3")); + createIndexResponse = client.indices().create(new CreateIndexRequest("index3"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); - createIndexResponse = client.indices().create(new CreateIndexRequest("index4")); + createIndexResponse = client.indices().create(new CreateIndexRequest("index4"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1448,7 +1451,7 @@ public void testUpdateAliases() throws Exception { // tag::update-aliases-execute IndicesAliasesResponse indicesAliasesResponse = - client.indices().updateAliases(request); + client.indices().updateAliases(request, RequestOptions.DEFAULT); // end::update-aliases-execute // tag::update-aliases-response @@ -1482,7 +1485,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::update-aliases-execute-async - client.indices().updateAliasesAsync(request, listener); // <1> + client.indices().updateAliasesAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::update-aliases-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1526,7 +1529,7 @@ public void testShrinkIndex() throws Exception { // end::shrink-index-request-aliases // tag::shrink-index-execute - ResizeResponse resizeResponse = client.indices().shrink(request); + ResizeResponse resizeResponse = client.indices().shrink(request, RequestOptions.DEFAULT); // end::shrink-index-execute // tag::shrink-index-response @@ -1555,7 +1558,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::shrink-index-execute-async - client.indices().shrinkAsync(request, listener); // <1> + client.indices().shrinkAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::shrink-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1596,7 +1599,7 @@ public void testSplitIndex() throws Exception { // end::split-index-request-aliases // tag::split-index-execute - ResizeResponse resizeResponse = client.indices().split(request); + ResizeResponse resizeResponse = client.indices().split(request, RequestOptions.DEFAULT); // end::split-index-execute // tag::split-index-response @@ -1625,7 +1628,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::split-index-execute-async - client.indices().splitAsync(request,listener); // <1> + client.indices().splitAsync(request, RequestOptions.DEFAULT,listener); // <1> // end::split-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1635,7 +1638,7 @@ public void testRolloverIndex() throws Exception { RestHighLevelClient client = highLevelClient(); { - client.indices().create(new CreateIndexRequest("index-1").alias(new Alias("alias"))); + client.indices().create(new CreateIndexRequest("index-1").alias(new Alias("alias")), RequestOptions.DEFAULT); } // tag::rollover-request @@ -1672,7 +1675,7 @@ public void testRolloverIndex() throws Exception { // end::rollover-request-alias // tag::rollover-execute - RolloverResponse rolloverResponse = client.indices().rollover(request); + RolloverResponse rolloverResponse = client.indices().rollover(request, RequestOptions.DEFAULT); // end::rollover-execute // tag::rollover-response @@ -1711,7 +1714,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::rollover-execute-async - client.indices().rolloverAsync(request,listener); // <1> + client.indices().rolloverAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::rollover-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1721,7 +1724,7 @@ public void testIndexPutSettings() throws Exception { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1784,7 +1787,7 @@ public void testIndexPutSettings() throws Exception { // tag::put-settings-execute UpdateSettingsResponse updateSettingsResponse = - client.indices().putSettings(request); + client.indices().putSettings(request, RequestOptions.DEFAULT); // end::put-settings-execute // tag::put-settings-response @@ -1813,7 +1816,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::put-settings-execute-async - client.indices().putSettingsAsync(request,listener); // <1> + client.indices().putSettingsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-settings-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1848,7 +1851,7 @@ public void testPutTemplate() throws Exception { "}", // <2> XContentType.JSON); // end::put-template-request-mappings-json - assertTrue(client.indices().putTemplate(request).isAcknowledged()); + assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } { //tag::put-template-request-mappings-map @@ -1862,7 +1865,7 @@ public void testPutTemplate() throws Exception { jsonMap.put("tweet", tweet); request.mapping("tweet", jsonMap); // <1> //end::put-template-request-mappings-map - assertTrue(client.indices().putTemplate(request).isAcknowledged()); + assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } { //tag::put-template-request-mappings-xcontent @@ -1886,13 +1889,13 @@ public void testPutTemplate() throws Exception { builder.endObject(); request.mapping("tweet", builder); // <1> //end::put-template-request-mappings-xcontent - assertTrue(client.indices().putTemplate(request).isAcknowledged()); + assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } { //tag::put-template-request-mappings-shortcut request.mapping("tweet", "message", "type=text"); // <1> //end::put-template-request-mappings-shortcut - assertTrue(client.indices().putTemplate(request).isAcknowledged()); + assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } // tag::put-template-request-aliases @@ -1946,7 +1949,7 @@ public void testPutTemplate() throws Exception { request.create(false); // make test happy // tag::put-template-execute - PutIndexTemplateResponse putTemplateResponse = client.indices().putTemplate(request); + PutIndexTemplateResponse putTemplateResponse = client.indices().putTemplate(request, RequestOptions.DEFAULT); // end::put-template-execute // tag::put-template-response @@ -1974,7 +1977,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::put-template-execute-async - client.indices().putTemplateAsync(request, listener); // <1> + client.indices().putTemplateAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-template-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java index 7971e49da44f4..f5bdc9f2f3ee5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.unit.TimeValue; @@ -86,7 +87,7 @@ public void testPutPipeline() throws IOException { // end::put-pipeline-request-masterTimeout // tag::put-pipeline-execute - WritePipelineResponse response = client.ingest().putPipeline(request); // <1> + WritePipelineResponse response = client.ingest().putPipeline(request, RequestOptions.DEFAULT); // <1> // end::put-pipeline-execute // tag::put-pipeline-response @@ -129,7 +130,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::put-pipeline-execute-async - client.ingest().putPipelineAsync(request, listener); // <1> + client.ingest().putPipelineAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-pipeline-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -154,7 +155,7 @@ public void testGetPipeline() throws IOException { // end::get-pipeline-request-masterTimeout // tag::get-pipeline-execute - GetPipelineResponse response = client.ingest().getPipeline(request); // <1> + GetPipelineResponse response = client.ingest().getPipeline(request, RequestOptions.DEFAULT); // <1> // end::get-pipeline-execute // tag::get-pipeline-response @@ -199,7 +200,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::get-pipeline-execute-async - client.ingest().getPipelineAsync(request, listener); // <1> + client.ingest().getPipelineAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::get-pipeline-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -229,7 +230,7 @@ public void testDeletePipeline() throws IOException { // end::delete-pipeline-request-masterTimeout // tag::delete-pipeline-execute - WritePipelineResponse response = client.ingest().deletePipeline(request); // <1> + WritePipelineResponse response = client.ingest().deletePipeline(request, RequestOptions.DEFAULT); // <1> // end::delete-pipeline-execute // tag::delete-pipeline-response @@ -269,7 +270,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::delete-pipeline-execute-async - client.ingest().deletePipelineAsync(request, listener); // <1> + client.ingest().deletePipelineAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::delete-pipeline-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java index e1e08f120a2c9..6ac7e364dd812 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.apache.http.HttpEntity; @@ -34,9 +35,6 @@ import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.RestStatus; @@ -45,11 +43,6 @@ import java.io.InputStream; import java.util.Map; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; - /** * This class is used to generate the documentation for the * docs/java-rest/high-level/migration.asciidoc page. @@ -98,14 +91,14 @@ public void testRequests() throws Exception { //end::migration-request-ctor //tag::migration-request-ctor-execution - IndexResponse response = client.index(request); + IndexResponse response = client.index(request, RequestOptions.DEFAULT); //end::migration-request-ctor-execution assertEquals(RestStatus.CREATED, response.status()); } { //tag::migration-request-async-execution DeleteRequest request = new DeleteRequest("index", "doc", "id"); // <1> - client.deleteAsync(request, new ActionListener() { // <2> + client.deleteAsync(request, RequestOptions.DEFAULT, new ActionListener() { // <2> @Override public void onResponse(DeleteResponse deleteResponse) { // <3> @@ -117,12 +110,12 @@ public void onFailure(Exception e) { } }); //end::migration-request-async-execution - assertBusy(() -> assertFalse(client.exists(new GetRequest("index", "doc", "id")))); + assertBusy(() -> assertFalse(client.exists(new GetRequest("index", "doc", "id"), RequestOptions.DEFAULT))); } { //tag::migration-request-sync-execution DeleteRequest request = new DeleteRequest("index", "doc", "id"); - DeleteResponse response = client.delete(request); // <1> + DeleteResponse response = client.delete(request, RequestOptions.DEFAULT); // <1> //end::migration-request-sync-execution assertEquals(RestStatus.NOT_FOUND, response.status()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java index 3e0608120f755..0c1c446961bdf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.apache.http.HttpHost; @@ -40,7 +41,7 @@ public void testMain() throws IOException { RestHighLevelClient client = highLevelClient(); { //tag::main-execute - MainResponse response = client.info(); + MainResponse response = client.info(RequestOptions.DEFAULT); //end::main-execute assertTrue(response.isAvailable()); //tag::main-response @@ -61,7 +62,7 @@ public void testMain() throws IOException { public void testPing() throws IOException { RestHighLevelClient client = highLevelClient(); //tag::ping-execute - boolean response = client.ping(); + boolean response = client.ping(RequestOptions.DEFAULT); //end::ping-execute assertTrue(response); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 9f9e1d903cb50..f4ce789d5106f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -42,6 +42,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; @@ -143,7 +144,7 @@ public void testSearch() throws Exception { // tag::search-request-preference searchRequest.preference("_local"); // <1> // end::search-request-preference - assertNotNull(client.search(searchRequest)); + assertNotNull(client.search(searchRequest, RequestOptions.DEFAULT)); } { // tag::search-source-basics @@ -176,7 +177,7 @@ public void testSearch() throws Exception { // end::search-source-setter // tag::search-execute - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); // end::search-execute // tag::search-execute-listener @@ -198,7 +199,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::search-execute-async - client.searchAsync(searchRequest, listener); // <1> + client.searchAsync(searchRequest, RequestOptions.DEFAULT, listener); // <1> // end::search-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -296,7 +297,7 @@ public void testSearchRequestAggregations() throws IOException { request.add(new IndexRequest("posts", "doc", "3") .source(XContentType.JSON, "company", "Elastic", "age", 40)); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } @@ -312,7 +313,7 @@ public void testSearchRequestAggregations() throws IOException { // end::search-request-aggregations searchSourceBuilder.query(QueryBuilders.matchAllQuery()); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); { // tag::search-request-aggregations-get Aggregations aggregations = searchResponse.getAggregations(); @@ -369,7 +370,7 @@ public void testSearchRequestSuggestions() throws IOException { request.add(new IndexRequest("posts", "doc", "3").source(XContentType.JSON, "user", "tlrx")); request.add(new IndexRequest("posts", "doc", "4").source(XContentType.JSON, "user", "cbuescher")); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } @@ -384,7 +385,7 @@ public void testSearchRequestSuggestions() throws IOException { searchSourceBuilder.suggest(suggestBuilder); // end::search-request-suggestion searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); { // tag::search-request-suggestion-get Suggest suggest = searchResponse.getSuggest(); // <1> @@ -416,7 +417,7 @@ public void testSearchRequestHighlighting() throws IOException { .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } @@ -437,7 +438,7 @@ public void testSearchRequestHighlighting() throws IOException { .should(matchQuery("title", "Elasticsearch")) .should(matchQuery("user", "kimchy"))); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); { // tag::search-request-highlighting-get SearchHits hits = searchResponse.getHits(); @@ -472,7 +473,7 @@ public void testSearchRequestProfiling() throws IOException { IndexRequest request = new IndexRequest("posts", "doc", "1") .source(XContentType.JSON, "tags", "elasticsearch", "comments", 123); request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - IndexResponse indexResponse = client.index(request); + IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT); assertSame(RestStatus.CREATED, indexResponse.status()); } { @@ -485,7 +486,7 @@ public void testSearchRequestProfiling() throws IOException { searchSourceBuilder.aggregation(AggregationBuilders.histogram("by_comments").field("comments").interval(100)); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); // tag::search-request-profiling-get Map profilingResults = searchResponse.getProfileResults(); // <1> @@ -548,7 +549,7 @@ public void testScroll() throws Exception { request.add(new IndexRequest("posts", "doc", "3") .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch")); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } @@ -561,7 +562,7 @@ public void testScroll() throws Exception { searchSourceBuilder.size(size); // <1> searchRequest.source(searchSourceBuilder); searchRequest.scroll(TimeValue.timeValueMinutes(1L)); // <2> - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); String scrollId = searchResponse.getScrollId(); // <3> SearchHits hits = searchResponse.getHits(); // <4> // end::search-scroll-init @@ -572,7 +573,7 @@ public void testScroll() throws Exception { // tag::search-scroll2 SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId); // <1> scrollRequest.scroll(TimeValue.timeValueSeconds(30)); - SearchResponse searchScrollResponse = client.searchScroll(scrollRequest); + SearchResponse searchScrollResponse = client.searchScroll(scrollRequest, RequestOptions.DEFAULT); scrollId = searchScrollResponse.getScrollId(); // <2> hits = searchScrollResponse.getHits(); // <3> assertEquals(3, hits.getTotalHits()); @@ -582,14 +583,14 @@ public void testScroll() throws Exception { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId(scrollId); - ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest); + ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest, RequestOptions.DEFAULT); assertTrue(clearScrollResponse.isSucceeded()); } { SearchRequest searchRequest = new SearchRequest(); searchRequest.scroll("60s"); - SearchResponse initialSearchResponse = client.search(searchRequest); + SearchResponse initialSearchResponse = client.search(searchRequest, RequestOptions.DEFAULT); String scrollId = initialSearchResponse.getScrollId(); SearchScrollRequest scrollRequest = new SearchScrollRequest(); @@ -601,7 +602,7 @@ public void testScroll() throws Exception { // end::scroll-request-arguments // tag::search-scroll-execute-sync - SearchResponse searchResponse = client.searchScroll(scrollRequest); + SearchResponse searchResponse = client.searchScroll(scrollRequest, RequestOptions.DEFAULT); // end::search-scroll-execute-sync assertEquals(0, searchResponse.getFailedShards()); @@ -648,7 +649,7 @@ public void onFailure(Exception e) { // end::clear-scroll-add-scroll-ids // tag::clear-scroll-execute - ClearScrollResponse response = client.clearScroll(request); + ClearScrollResponse response = client.clearScroll(request, RequestOptions.DEFAULT); // end::clear-scroll-execute // tag::clear-scroll-response @@ -678,7 +679,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, clearScrollLatch); // tag::clear-scroll-execute-async - client.clearScrollAsync(request, listener); // <1> + client.clearScrollAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::clear-scroll-execute-async assertTrue(clearScrollLatch.await(30L, TimeUnit.SECONDS)); @@ -692,14 +693,14 @@ public void onFailure(Exception e) { searchSourceBuilder.query(matchQuery("title", "Elasticsearch")); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); // <1> + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); // <1> String scrollId = searchResponse.getScrollId(); SearchHit[] searchHits = searchResponse.getHits().getHits(); while (searchHits != null && searchHits.length > 0) { // <2> SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId); // <3> scrollRequest.scroll(scroll); - searchResponse = client.searchScroll(scrollRequest); + searchResponse = client.searchScroll(scrollRequest, RequestOptions.DEFAULT); scrollId = searchResponse.getScrollId(); searchHits = searchResponse.getHits().getHits(); // <4> @@ -707,7 +708,7 @@ public void onFailure(Exception e) { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); // <5> clearScrollRequest.addScrollId(scrollId); - ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest); + ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest, RequestOptions.DEFAULT); boolean succeeded = clearScrollResponse.isSucceeded(); // end::search-scroll-example assertTrue(succeeded); @@ -737,7 +738,7 @@ public void testSearchTemplateWithInlineScript() throws Exception { // end::search-template-request-inline // tag::search-template-response - SearchTemplateResponse response = client.searchTemplate(request); + SearchTemplateResponse response = client.searchTemplate(request, RequestOptions.DEFAULT); SearchResponse searchResponse = response.getResponse(); // end::search-template-response @@ -749,7 +750,7 @@ public void testSearchTemplateWithInlineScript() throws Exception { // end::render-search-template-request // tag::render-search-template-response - SearchTemplateResponse renderResponse = client.searchTemplate(request); + SearchTemplateResponse renderResponse = client.searchTemplate(request, RequestOptions.DEFAULT); BytesReference source = renderResponse.getSource(); // <1> // end::render-search-template-response @@ -802,7 +803,7 @@ public void testSearchTemplateWithStoredScript() throws Exception { // end::search-template-request-options // tag::search-template-execute - SearchTemplateResponse response = client.searchTemplate(request); + SearchTemplateResponse response = client.searchTemplate(request, RequestOptions.DEFAULT); // end::search-template-execute SearchResponse searchResponse = response.getResponse(); @@ -828,7 +829,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::search-template-execute-async - client.searchTemplateAsync(request, listener); // <1> + client.searchTemplateAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::search-template-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -849,7 +850,7 @@ public void testFieldCaps() throws Exception { // end::field-caps-request-indicesOptions // tag::field-caps-execute - FieldCapabilitiesResponse response = client.fieldCaps(request); + FieldCapabilitiesResponse response = client.fieldCaps(request, RequestOptions.DEFAULT); // end::field-caps-execute // tag::field-caps-response @@ -892,7 +893,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::field-caps-execute-async - client.fieldCapsAsync(request, listener); // <1> + client.fieldCapsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::field-caps-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -918,7 +919,7 @@ public void testRankEval() throws Exception { // end::rank-eval-request-basic // tag::rank-eval-execute - RankEvalResponse response = client.rankEval(request); + RankEvalResponse response = client.rankEval(request, RequestOptions.DEFAULT); // end::rank-eval-execute // tag::rank-eval-response @@ -962,7 +963,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::rank-eval-execute-async - client.rankEvalAsync(request, listener); // <1> + client.rankEvalAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::rank-eval-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -987,7 +988,7 @@ public void testMultiSearch() throws Exception { request.add(secondSearchRequest); // end::multi-search-request-basic // tag::multi-search-execute - MultiSearchResponse response = client.multiSearch(request); + MultiSearchResponse response = client.multiSearch(request, RequestOptions.DEFAULT); // end::multi-search-execute // tag::multi-search-response MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // <1> @@ -1019,7 +1020,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::multi-search-execute-async - client.multiSearchAsync(request, listener); // <1> + client.multiSearchAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::multi-search-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1030,7 +1031,7 @@ public void onFailure(Exception e) { request.add(new SearchRequest("posts") // <1> .types("doc")); // <2> // end::multi-search-request-index - MultiSearchResponse response = client.multiSearch(request); + MultiSearchResponse response = client.multiSearch(request, RequestOptions.DEFAULT); MultiSearchResponse.Item firstResponse = response.getResponses()[0]; assertNull(firstResponse.getFailure()); SearchResponse searchResponse = firstResponse.getResponse(); @@ -1041,12 +1042,12 @@ public void onFailure(Exception e) { private void indexSearchTestData() throws IOException { CreateIndexRequest authorsRequest = new CreateIndexRequest("authors") .mapping("doc", "user", "type=keyword,doc_values=false"); - CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest); + CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest, RequestOptions.DEFAULT); assertTrue(authorsResponse.isAcknowledged()); CreateIndexRequest reviewersRequest = new CreateIndexRequest("contributors") .mapping("doc", "user", "type=keyword"); - CreateIndexResponse reviewersResponse = highLevelClient().indices().create(reviewersRequest); + CreateIndexResponse reviewersResponse = highLevelClient().indices().create(reviewersRequest, RequestOptions.DEFAULT); assertTrue(reviewersResponse.isAcknowledged()); BulkRequest bulkRequest = new BulkRequest(); @@ -1067,7 +1068,7 @@ private void indexSearchTestData() throws IOException { bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest); + BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 2890ad50c2666..8c158a91a5111 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; @@ -134,7 +135,7 @@ public void testSnapshotCreateRepository() throws IOException { // end::create-repository-request-verify // tag::create-repository-execute - PutRepositoryResponse response = client.snapshot().createRepository(request); + PutRepositoryResponse response = client.snapshot().createRepository(request, RequestOptions.DEFAULT); // end::create-repository-execute // tag::create-repository-response @@ -168,7 +169,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::create-repository-execute-async - client.snapshot().createRepositoryAsync(request, listener); // <1> + client.snapshot().createRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::create-repository-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -197,7 +198,7 @@ public void testSnapshotGetRepository() throws IOException { // end::get-repository-request-masterTimeout // tag::get-repository-execute - GetRepositoriesResponse response = client.snapshot().getRepositories(request); + GetRepositoriesResponse response = client.snapshot().getRepositories(request, RequestOptions.DEFAULT); // end::get-repository-execute // tag::get-repository-response @@ -232,7 +233,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::get-repository-execute-async - client.snapshot().getRepositoriesAsync(request, listener); // <1> + client.snapshot().getRepositoriesAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::get-repository-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -258,7 +259,7 @@ public void testSnapshotDeleteRepository() throws IOException { // end::delete-repository-request-timeout // tag::delete-repository-execute - DeleteRepositoryResponse response = client.snapshot().deleteRepository(request); + DeleteRepositoryResponse response = client.snapshot().deleteRepository(request, RequestOptions.DEFAULT); // end::delete-repository-execute // tag::delete-repository-response @@ -292,7 +293,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::delete-repository-execute-async - client.snapshot().deleteRepositoryAsync(request, listener); // <1> + client.snapshot().deleteRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::delete-repository-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -317,7 +318,7 @@ public void testSnapshotVerifyRepository() throws IOException { // end::verify-repository-request-timeout // tag::verify-repository-execute - VerifyRepositoryResponse response = client.snapshot().verifyRepository(request); + VerifyRepositoryResponse response = client.snapshot().verifyRepository(request, RequestOptions.DEFAULT); // end::verify-repository-execute // tag::verify-repository-response @@ -352,7 +353,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::verify-repository-execute-async - client.snapshot().verifyRepositoryAsync(request, listener); // <1> + client.snapshot().verifyRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::verify-repository-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -363,6 +364,6 @@ private void createTestRepositories() throws IOException { PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); request.type(FsRepository.TYPE); request.settings("{\"location\": \".\"}", XContentType.JSON); - assertTrue(highLevelClient().snapshot().createRepository(request).isAcknowledged()); + assertTrue(highLevelClient().snapshot().createRepository(request, RequestOptions.DEFAULT).isAcknowledged()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java index faf447a4143b1..0d62a2d29a03b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.tasks.TaskId; @@ -90,7 +91,7 @@ public void testListTasks() throws IOException { ListTasksRequest request = new ListTasksRequest(); // tag::list-tasks-execute - ListTasksResponse response = client.tasks().list(request); + ListTasksResponse response = client.tasks().list(request, RequestOptions.DEFAULT); // end::list-tasks-execute assertThat(response, notNullValue()); @@ -139,7 +140,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::list-tasks-execute-async - client.tasks().listAsync(request, listener); // <1> + client.tasks().listAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::list-tasks-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index 409a646c4fe36..331e851105819 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -133,19 +134,20 @@ public void testSearchSkipUnavailable() throws IOException { updateRemoteClusterSettings(Collections.singletonMap("seeds", remoteNode.getAddress().toString())); for (int i = 0; i < 10; i++) { - restHighLevelClient.index(new IndexRequest("index", "doc", String.valueOf(i)).source("field", "value")); + restHighLevelClient.index( + new IndexRequest("index", "doc", String.valueOf(i)).source("field", "value"), RequestOptions.DEFAULT); } Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index"), RequestOptions.DEFAULT); assertSame(SearchResponse.Clusters.EMPTY, response.getClusters()); assertEquals(10, response.getHits().totalHits); assertEquals(10, response.getHits().getHits().length); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); assertEquals(2, response.getClusters().getTotal()); assertEquals(2, response.getClusters().getSuccessful()); assertEquals(0, response.getClusters().getSkipped()); @@ -153,7 +155,7 @@ public void testSearchSkipUnavailable() throws IOException { assertEquals(10, response.getHits().getHits().length); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); assertEquals(1, response.getClusters().getTotal()); assertEquals(1, response.getClusters().getSuccessful()); assertEquals(0, response.getClusters().getSkipped()); @@ -161,14 +163,15 @@ public void testSearchSkipUnavailable() throws IOException { } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index").scroll("1m")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index").scroll("1m"), + RequestOptions.DEFAULT); assertEquals(2, response.getClusters().getTotal()); assertEquals(2, response.getClusters().getSuccessful()); assertEquals(0, response.getClusters().getSkipped()); assertEquals(10, response.getHits().totalHits); assertEquals(10, response.getHits().getHits().length); String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(scrollId)); + SearchResponse scrollResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); assertEquals(10, scrollResponse.getHits().totalHits); assertEquals(0, scrollResponse.getHits().getHits().length); @@ -179,7 +182,7 @@ public void testSearchSkipUnavailable() throws IOException { updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", true)); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); assertEquals(2, response.getClusters().getTotal()); assertEquals(1, response.getClusters().getSuccessful()); assertEquals(1, response.getClusters().getSkipped()); @@ -187,7 +190,7 @@ public void testSearchSkipUnavailable() throws IOException { assertEquals(10, response.getHits().getHits().length); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); assertEquals(1, response.getClusters().getTotal()); assertEquals(0, response.getClusters().getSuccessful()); assertEquals(1, response.getClusters().getSkipped()); @@ -195,14 +198,15 @@ public void testSearchSkipUnavailable() throws IOException { } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index").scroll("1m")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index").scroll("1m"), + RequestOptions.DEFAULT); assertEquals(2, response.getClusters().getTotal()); assertEquals(1, response.getClusters().getSuccessful()); assertEquals(1, response.getClusters().getSkipped()); assertEquals(10, response.getHits().totalHits); assertEquals(10, response.getHits().getHits().length); String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(scrollId)); + SearchResponse scrollResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); assertEquals(10, scrollResponse.getHits().totalHits); assertEquals(0, scrollResponse.getHits().getHits().length); @@ -266,19 +270,19 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { private static void assertSearchConnectFailure() { { ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("index", "remote1:index"))); + () -> restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT)); ElasticsearchException rootCause = (ElasticsearchException)exception.getRootCause(); assertThat(rootCause.getMessage(), containsString("connect_exception")); } { ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index"))); + () -> restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT)); ElasticsearchException rootCause = (ElasticsearchException)exception.getRootCause(); assertThat(rootCause.getMessage(), containsString("connect_exception")); } { ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index").scroll("1m"))); + () -> restHighLevelClient.search(new SearchRequest("remote1:index").scroll("1m"), RequestOptions.DEFAULT)); ElasticsearchException rootCause = (ElasticsearchException)exception.getRootCause(); assertThat(rootCause.getMessage(), containsString("connect_exception")); } From 4b39993e748ab20b38d104b53847ecc5a166a1ca Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 8 Jun 2018 13:36:19 +0100 Subject: [PATCH 06/24] [DOCS] Add note about long-lived idle connections (#30990) Clarify that we expect to have idle inter-node connections within the cluster, and that the network needs to be configured not to disrupt these. --- docs/reference/modules/transport.asciidoc | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index b7a65d98592cc..046d82cc507eb 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -44,7 +44,12 @@ time setting format). Defaults to `30s`. |`transport.tcp.compress` |Set to `true` to enable compression (`DEFLATE`) between all nodes. Defaults to `false`. -|`transport.ping_schedule` | Schedule a regular ping message to ensure that connections are kept alive. Defaults to `5s` in the transport client and `-1` (disabled) elsewhere. +|`transport.ping_schedule` | Schedule a regular application-level ping message +to ensure that transport connections between nodes are kept alive. Defaults to +`5s` in the transport client and `-1` (disabled) elsewhere. It is preferable to +correctly configure TCP keep-alives instead of using this feature, because TCP +keep-alives apply to all kinds of long-lived connection and not just to +transport connections. |======================================================================= @@ -80,6 +85,20 @@ The following parameters can be configured like that * `tcp_send_buffer_size`: Configures the send buffer size of the socket * `tcp_receive_buffer_size`: Configures the receive buffer size of the socket +[float] +==== Long-lived idle connections + +Elasticsearch opens a number of long-lived TCP connections between each pair of +nodes in the cluster, and some of these connections may be idle for an extended +period of time. Nonetheless, Elasticsearch requires these connections to remain +open, and it can disrupt the operation of the cluster if any inter-node +connections are closed by an external influence such as a firewall. It is +important to configure your network to preserve long-lived idle connections +between Elasticsearch nodes, for instance by leaving `tcp_keep_alive` enabled +and ensuring that the keepalive interval is shorter than any timeout that might +cause idle connections to be closed, or by setting `transport.ping_schedule` if +keepalives cannot be configured. + [float] === Transport Tracer From 45fccaa1c23ed2d212d622c8d36639b1940fba58 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 8 Jun 2018 08:55:10 -0400 Subject: [PATCH 07/24] Enhance license detection for various licenses (#31198) This commit enhances the license detection that we have for various licenses. Here we improve the detection for all licenses (especially the Apache 2.0 License), the BSD 2-clause license, the MIT (with attribution) license, and we add detection for the BSD 3-clause license. One way that we achieved this improvement is by changing how the license files are read so that rather than reading them as a multi-line string which ended up represented as "[line1, line2, line3, ...]" internally, we read the full bytes of the license text and replace all whitespace with a single space so the license text is now loaded as "line1 line2 line3". For the MIT license we add the actual license text and remove the "MIT" string as not all copies of the license clearly indicate that the text is the MIT license. We take a similar strategy for the BSD-2 and BSD-3 clause licenses. With this change, we reduce the number of "custom" licenses in the codebase from 31 to 2. The two remaining appear to be truly custom licenses, not carrying licenses identifiable by SPDX. A follow-up will address "unknown" licenses. --- .../gradle/DependenciesInfoTask.groovy | 83 ++++++++++++++++++- 1 file changed, 79 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy index eb82b4675f287..b42e6cc8e3caa 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy @@ -109,7 +109,8 @@ public class DependenciesInfoTask extends DefaultTask { } if (license) { - final String content = license.readLines("UTF-8").toString() + // replace * because they are sometimes used at the beginning lines as if the license was a multi-line comment + final String content = new String(license.readBytes(), "UTF-8").replaceAll("\\s+", " ").replaceAll("\\*", " ") final String spdx = checkSPDXLicense(content) if (spdx == null) { // License has not be identified as SPDX. @@ -133,13 +134,84 @@ public class DependenciesInfoTask extends DefaultTask { private String checkSPDXLicense(final String licenseText) { String spdx = null - final String APACHE_2_0 = "Apache.*License.*(v|V)ersion 2.0" - final String BSD_2 = "BSD 2-clause.*License" + final String APACHE_2_0 = "Apache.*License.*(v|V)ersion.*2\\.0" + + final String BSD_2 = """ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1\\. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer\\. + 2\\. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution\\. + +THIS SOFTWARE IS PROVIDED BY .+ (``|''|")AS IS(''|") AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED\\. +IN NO EVENT SHALL .+ BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \\(INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION\\) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +\\(INCLUDING NEGLIGENCE OR OTHERWISE\\) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\\. +""".replaceAll("\\s+", "\\\\s*") + + final String BSD_3 = """ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + (1\\.)? Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer\\. + (2\\.)? Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution\\. + ((3\\.)? The name of .+ may not be used to endorse or promote products + derived from this software without specific prior written permission\\.| + (3\\.)? Neither the name of .+ nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission\\.) + +THIS SOFTWARE IS PROVIDED BY .+ (``|''|")AS IS(''|") AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED\\. +IN NO EVENT SHALL .+ BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \\(INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION\\) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +\\(INCLUDING NEGLIGENCE OR OTHERWISE\\) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\\. +""".replaceAll("\\s+", "\\\\s*") + final String CDDL_1_0 = "COMMON DEVELOPMENT AND DISTRIBUTION LICENSE.*Version 1.0" final String CDDL_1_1 = "COMMON DEVELOPMENT AND DISTRIBUTION LICENSE.*Version 1.1" final String ICU = "ICU License - ICU 1.8.1 and later" final String LGPL_3 = "GNU LESSER GENERAL PUBLIC LICENSE.*Version 3" - final String MIT = "MIT License" + + final String MIT = """ +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files \\(the "Software"\\), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software\\. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT\\. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE\\. +""".replaceAll("\\s+", "\\\\s*") + final String MOZILLA_1_1 = "Mozilla Public License.*Version 1.1" switch (licenseText) { @@ -152,6 +224,9 @@ public class DependenciesInfoTask extends DefaultTask { case ~/.*${BSD_2}.*/: spdx = 'BSD-2-Clause' break + case ~/.*${BSD_3}.*/: + spdx = 'BSD-3-Clause' + break case ~/.*${LGPL_3}.*/: spdx = 'LGPL-3.0' break From 014e5b869224ef261193b11267b6f37c5289a832 Mon Sep 17 00:00:00 2001 From: Dimitrios Liappis Date: Fri, 8 Jun 2018 17:28:43 +0300 Subject: [PATCH 08/24] Mute WatchBackwardsCompatibilityIT.testWatcherRestart Relates #31196 --- .../elasticsearch/upgrades/WatchBackwardsCompatibilityIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatchBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatchBackwardsCompatibilityIT.java index 3711a88f973f6..64c612e65d7ab 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatchBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatchBackwardsCompatibilityIT.java @@ -177,6 +177,7 @@ public void testWatcherStats() throws Exception { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31196") public void testWatcherRestart() throws Exception { executeUpgradeIfNeeded(); From f4ea2e95978b9d16e38bfc18b2d73e282b720970 Mon Sep 17 00:00:00 2001 From: Dimitrios Liappis Date: Fri, 8 Jun 2018 17:29:54 +0300 Subject: [PATCH 09/24] Mute TokenBackwardsCompatibilityIT.testMixedCluster Relates #31195 --- .../elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 7e2b1a99e915d..c2b9f3db800c9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -69,6 +69,7 @@ public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { assertTokenWorks((String) source.get("token")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31195") public void testMixedCluster() throws Exception { assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.MIXED); assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion()); From 540b117b9e7d2f10f0eba740bc3618c757664e89 Mon Sep 17 00:00:00 2001 From: Paul Sanwald Date: Thu, 7 Jun 2018 14:02:23 -0700 Subject: [PATCH 10/24] high level REST api: cancel task (#30745) * Initial commit of rest high level exposure of cancel task * fix javadocs * address some code review comments * update branch to use tasks namespace instead of cluster * High-level client: list tasks failure to not lose nodeId This commit reworks testing for `ListTasksResponse` so that random fields insertion can be tested and xcontent equivalence can be checked too. Proper exclusions need to be configured, and failures need to be tested separately. This helped finding a little problem, whenever there is a node failure returned, the nodeId was lost as it was never printed out as part of the exception toXContent. * added comment * merge from master * re-work CancelTasksResponseTests to separate XContent failure cases from non-failure cases * remove duplication of logic in parser creation * code review changes * refactor TasksClient to support RequestOptions * add tests for parent task id * address final PR review comments, mostly formatting and such --- .../client/RequestConverters.java | 19 +++ .../org/elasticsearch/client/TasksClient.java | 43 +++++++ .../client/RequestConvertersTests.java | 19 +++ .../org/elasticsearch/client/TasksIT.java | 25 ++++ .../ClusterClientDocumentationIT.java | 1 + .../TasksClientDocumentationIT.java | 72 +++++++++++ .../high-level/supported-apis.asciidoc | 2 + .../high-level/tasks/cancel_tasks.asciidoc | 82 +++++++++++++ .../tasks/cancel/CancelTasksResponse.java | 29 ++++- .../node/tasks/list/ListTasksResponse.java | 27 ++-- .../tasks/CancelTasksResponseTests.java | 116 ++++++++++++++++++ .../tasks/ListTasksResponseTests.java | 26 ++-- 12 files changed, 442 insertions(+), 19 deletions(-) create mode 100644 docs/java-rest/high-level/tasks/cancel_tasks.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 308b8917842d1..53992a051080b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -29,6 +29,7 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -108,6 +109,17 @@ private RequestConverters() { // Contains only status utility methods } + static Request cancelTasks(CancelTasksRequest cancelTasksRequest) { + Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel"); + Params params = new Params(request); + params.withTimeout(cancelTasksRequest.getTimeout()) + .withTaskId(cancelTasksRequest.getTaskId()) + .withNodes(cancelTasksRequest.getNodes()) + .withParentTaskId(cancelTasksRequest.getParentTaskId()) + .withActions(cancelTasksRequest.getActions()); + return request; + } + static Request delete(DeleteRequest deleteRequest) { String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); @@ -1092,6 +1104,13 @@ Params withActions(String[] actions) { return this; } + Params withTaskId(TaskId taskId) { + if (taskId != null && taskId.isSet()) { + return putParam("task_id", taskId.toString()); + } + return this; + } + Params withParentTaskId(TaskId parentTaskId) { if (parentTaskId != null && parentTaskId.isSet()) { return putParam("parent_task_id", parentTaskId.toString()); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java index f4a76e78b946b..f8f03d7f7d288 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java @@ -20,6 +20,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -65,4 +67,45 @@ public void listAsync(ListTasksRequest request, RequestOptions options, ActionLi restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, options, ListTasksResponse::fromXContent, listener, emptySet()); } + + /** + * Cancel one or more cluster tasks using the Task Management API. + * + * See + * Task Management API on elastic.co + * @param cancelTasksRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + * + */ + public CancelTasksResponse cancel(CancelTasksRequest cancelTasksRequest, RequestOptions options ) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + cancelTasksRequest, + RequestConverters::cancelTasks, + options, + parser -> CancelTasksResponse.fromXContent(parser), + emptySet() + ); + } + + /** + * Asynchronously cancel one or more cluster tasks using the Task Management API. + * + * See + * Task Management API on elastic.co + * @param cancelTasksRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void cancelAsync(CancelTasksRequest cancelTasksRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( + cancelTasksRequest, + RequestConverters::cancelTasks, + options, + parser -> CancelTasksResponse.fromXContent(parser), + listener, + emptySet() + ); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 2645846341f4b..c09da06995599 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -29,6 +29,8 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -1620,6 +1622,23 @@ public void testIndexPutSettings() throws IOException { assertEquals(expectedParams, request.getParameters()); } + public void testCancelTasks() { + CancelTasksRequest request = new CancelTasksRequest(); + Map expectedParams = new HashMap<>(); + TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + TaskId parentTaskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + request.setTaskId(taskId); + request.setParentTaskId(parentTaskId); + expectedParams.put("task_id", taskId.toString()); + expectedParams.put("parent_task_id", parentTaskId.toString()); + Request httpRequest = RequestConverters.cancelTasks(request); + assertThat(httpRequest, notNullValue()); + assertThat(httpRequest.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(httpRequest.getEntity(), nullValue()); + assertThat(httpRequest.getEndpoint(), equalTo("/_tasks/_cancel")); + assertThat(httpRequest.getParameters(), equalTo(expectedParams)); + } + public void testListTasks() { { ListTasksRequest request = new ListTasksRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java index fc7d70a36e10e..baa97cfa5b4ef 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java @@ -19,9 +19,12 @@ package org.elasticsearch.client; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; @@ -58,4 +61,26 @@ public void testListTasks() throws IOException { assertTrue("List tasks were not found", listTasksFound); } + public void testCancelTasks() throws IOException { + ListTasksRequest listRequest = new ListTasksRequest(); + ListTasksResponse listResponse = execute( + listRequest, + highLevelClient().tasks()::list, + highLevelClient().tasks()::listAsync + ); + // in this case, probably no task will actually be cancelled. + // this is ok, that case is covered in TasksIT.testTasksCancellation + TaskInfo firstTask = listResponse.getTasks().get(0); + String node = listResponse.getPerNodeTasks().keySet().iterator().next(); + + CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); + cancelTasksRequest.setTaskId(new TaskId(node, firstTask.getId())); + cancelTasksRequest.setReason("testreason"); + CancelTasksResponse response = execute(cancelTasksRequest, + highLevelClient().tasks()::cancel, + highLevelClient().tasks()::cancelAsync); + // Since the task may or may not have been cancelled, assert that we received a response only + // The actual testing of task cancellation is covered by TasksIT.testTasksCancellation + assertThat(response, notNullValue()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index e8dd4025ba94e..75902cf02babb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -178,4 +178,5 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java index 0d62a2d29a03b..8a45195757c13 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; @@ -146,4 +148,74 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testCancelTasks() throws IOException { + RestHighLevelClient client = highLevelClient(); + { + // tag::cancel-tasks-request + CancelTasksRequest request = new CancelTasksRequest(); + // end::cancel-tasks-request + + // tag::cancel-tasks-request-filter + request.setTaskId(new TaskId("nodeId1", 42)); //<1> + request.setActions("cluster:*"); // <2> + request.setNodes("nodeId1", "nodeId2"); // <3> + // end::cancel-tasks-request-filter + + } + + CancelTasksRequest request = new CancelTasksRequest(); + request.setTaskId(TaskId.EMPTY_TASK_ID); + + // tag::cancel-tasks-execute + CancelTasksResponse response = client.tasks().cancel(request, RequestOptions.DEFAULT); + // end::cancel-tasks-execute + + assertThat(response, notNullValue()); + + // tag::cancel-tasks-response-tasks + List tasks = response.getTasks(); // <1> + // end::cancel-tasks-response-tasks + + + // tag::cancel-tasks-response-failures + List nodeFailures = response.getNodeFailures(); // <1> + List taskFailures = response.getTaskFailures(); // <2> + // end::-tasks-response-failures + + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + } + + public void testAsyncCancelTasks() throws InterruptedException { + + RestHighLevelClient client = highLevelClient(); + { + CancelTasksRequest request = new CancelTasksRequest(); + + // tag::cancel-tasks-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(CancelTasksResponse response) { + // <1> + } + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::cancel-tasks-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::cancel-tasks-execute-async + client.tasks().cancelAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::cancel-tasks-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 8694a624654a5..064cd401721ac 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -140,5 +140,7 @@ include::snapshot/verify_repository.asciidoc[] The Java High Level REST Client supports the following Tasks APIs: * <> +* <> include::tasks/list_tasks.asciidoc[] +include::tasks/cancel_tasks.asciidoc[] diff --git a/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc b/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc new file mode 100644 index 0000000000000..089f87c00a2ef --- /dev/null +++ b/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc @@ -0,0 +1,82 @@ +[[java-rest-high-cluster-cancel-tasks]] +=== Cancel Tasks API + +The Cancel Tasks API allows cancellation of a currently running task. + +==== Cancel Tasks Request + +A `CancelTasksRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-request] +-------------------------------------------------- +There are no required parameters. The task cancellation command supports the same +task selection parameters as the list tasks command. + +==== Parameters + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-request-filter] +-------------------------------------------------- +<1> Cancel a task +<2> Cancel only cluster-related tasks +<3> Cancel all tasks running on nodes nodeId1 and nodeId2 + +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-execute] +-------------------------------------------------- + +==== Asynchronous Execution + +The asynchronous execution requires `CancelTasksRequest` instance and an +`ActionListener` instance to be passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-execute-async] +-------------------------------------------------- +<1> The `CancelTasksRequest` to execute and the `ActionListener` to use +when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `CancelTasksResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +==== Cancel Tasks Response + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-tasks] +-------------------------------------------------- +<1> List of cancelled tasks + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-calc] +-------------------------------------------------- +<1> List of cancelled tasks grouped by a node +<2> List of cancelled tasks grouped by a parent task + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-failures] +-------------------------------------------------- +<1> List of node failures +<2> List of task cancellation failures + diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java index 5e7c2c0f97d56..fbc81d2995511 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java @@ -19,23 +19,48 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; -import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.tasks.TaskInfo; +import java.io.IOException; import java.util.List; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + /** * Returns the list of tasks that were cancelled */ public class CancelTasksResponse extends ListTasksResponse { + private static final ConstructingObjectParser PARSER = + setupParser("cancel_tasks_response", CancelTasksResponse::new); + public CancelTasksResponse() { } - public CancelTasksResponse(List tasks, List taskFailures, List + public CancelTasksResponse(List tasks, List taskFailures, List nodeFailures) { super(tasks, taskFailures, nodeFailures); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return super.toXContent(builder, params); + } + + public static CancelTasksResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 53d80853328b2..cb1fcb0b091ee 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -70,8 +71,14 @@ public ListTasksResponse(List tasks, List taskFa this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks)); } - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("list_tasks_response", true, + + protected static ConstructingObjectParser setupParser(String name, + TriFunction< + List, + List, + List, + T> ctor) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(name, true, constructingObjects -> { int i = 0; @SuppressWarnings("unchecked") @@ -80,16 +87,18 @@ public ListTasksResponse(List tasks, List taskFa List tasksFailures = (List) constructingObjects[i++]; @SuppressWarnings("unchecked") List nodeFailures = (List) constructingObjects[i]; - return new ListTasksResponse(tasks, tasksFailures, nodeFailures); + return ctor.apply(tasks,tasksFailures, nodeFailures); }); - - static { - PARSER.declareObjectArray(constructorArg(), TaskInfo.PARSER, new ParseField(TASKS)); - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), new ParseField(TASK_FAILURES)); - PARSER.declareObjectArray(optionalConstructorArg(), - (parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(NODE_FAILURES)); + parser.declareObjectArray(optionalConstructorArg(), TaskInfo.PARSER, new ParseField(TASKS)); + parser.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), new ParseField(TASK_FAILURES)); + parser.declareObjectArray(optionalConstructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(NODE_FAILURES)); + return parser; } + private static final ConstructingObjectParser PARSER = + setupParser("list_tasks_response", ListTasksResponse::new); + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java new file mode 100644 index 0000000000000..3233edefb30d4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.tasks; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.net.ConnectException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class CancelTasksResponseTests extends AbstractXContentTestCase { + + @Override + protected CancelTasksResponse createTestInstance() { + List randomTasks = randomTasks(); + return new CancelTasksResponse(randomTasks, Collections.emptyList(), Collections.emptyList()); + } + + private static List randomTasks() { + List randomTasks = new ArrayList<>(); + for (int i = 0; i < randomInt(10); i++) { + randomTasks.add(TaskInfoTests.randomTaskInfo()); + } + return randomTasks; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + //status and headers hold arbitrary content, we can't inject random fields in them + return field -> field.endsWith("status") || field.endsWith("headers"); + } + + @Override + protected void assertEqualInstances(CancelTasksResponse expectedInstance, CancelTasksResponse newInstance) { + assertNotSame(expectedInstance, newInstance); + assertThat(newInstance.getTasks(), equalTo(expectedInstance.getTasks())); + ListTasksResponseTests.assertOnNodeFailures(newInstance.getNodeFailures(), expectedInstance.getNodeFailures()); + ListTasksResponseTests.assertOnTaskFailures(newInstance.getTaskFailures(), expectedInstance.getTaskFailures()); + } + + @Override + protected CancelTasksResponse doParseInstance(XContentParser parser) { + return CancelTasksResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected boolean assertToXContentEquivalence() { + return true; + } + + /** + * Test parsing {@link ListTasksResponse} with inner failures as they don't support asserting on xcontent equivalence, given that + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = CancelTasksResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, + getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence); + } + + private static CancelTasksResponse createTestInstanceWithFailures() { + int numNodeFailures = randomIntBetween(0, 3); + List nodeFailures = new ArrayList<>(numNodeFailures); + for (int i = 0; i < numNodeFailures; i++) { + nodeFailures.add(new FailedNodeException(randomAlphaOfLength(5), "error message", new ConnectException())); + } + int numTaskFailures = randomIntBetween(0, 3); + List taskFailures = new ArrayList<>(numTaskFailures); + for (int i = 0; i < numTaskFailures; i++) { + taskFailures.add(new TaskOperationFailure(randomAlphaOfLength(5), randomLong(), new IllegalStateException())); + } + return new CancelTasksResponse(randomTasks(), taskFailures, nodeFailures); + } + +} diff --git a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java index b280446db1c74..4862278fac111 100644 --- a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java @@ -109,20 +109,30 @@ protected Predicate getRandomFieldsExcludeFilter() { protected void assertEqualInstances(ListTasksResponse expectedInstance, ListTasksResponse newInstance) { assertNotSame(expectedInstance, newInstance); assertThat(newInstance.getTasks(), equalTo(expectedInstance.getTasks())); - assertThat(newInstance.getNodeFailures().size(), equalTo(expectedInstance.getNodeFailures().size())); - for (int i = 0; i < newInstance.getNodeFailures().size(); i++) { - ElasticsearchException newException = newInstance.getNodeFailures().get(i); - ElasticsearchException expectedException = expectedInstance.getNodeFailures().get(i); + assertOnNodeFailures(newInstance.getNodeFailures(), expectedInstance.getNodeFailures()); + assertOnTaskFailures(newInstance.getTaskFailures(), expectedInstance.getTaskFailures()); + } + + protected static void assertOnNodeFailures(List nodeFailures, + List expectedFailures) { + assertThat(nodeFailures.size(), equalTo(expectedFailures.size())); + for (int i = 0; i < nodeFailures.size(); i++) { + ElasticsearchException newException = nodeFailures.get(i); + ElasticsearchException expectedException = expectedFailures.get(i); assertThat(newException.getMetadata("es.node_id").get(0), equalTo(((FailedNodeException)expectedException).nodeId())); assertThat(newException.getMessage(), equalTo("Elasticsearch exception [type=failed_node_exception, reason=error message]")); assertThat(newException.getCause(), instanceOf(ElasticsearchException.class)); ElasticsearchException cause = (ElasticsearchException) newException.getCause(); assertThat(cause.getMessage(), equalTo("Elasticsearch exception [type=connect_exception, reason=null]")); } - assertThat(newInstance.getTaskFailures().size(), equalTo(expectedInstance.getTaskFailures().size())); - for (int i = 0; i < newInstance.getTaskFailures().size(); i++) { - TaskOperationFailure newFailure = newInstance.getTaskFailures().get(i); - TaskOperationFailure expectedFailure = expectedInstance.getTaskFailures().get(i); + } + + protected static void assertOnTaskFailures(List taskFailures, + List expectedFailures) { + assertThat(taskFailures.size(), equalTo(expectedFailures.size())); + for (int i = 0; i < taskFailures.size(); i++) { + TaskOperationFailure newFailure = taskFailures.get(i); + TaskOperationFailure expectedFailure = expectedFailures.get(i); assertThat(newFailure.getNodeId(), equalTo(expectedFailure.getNodeId())); assertThat(newFailure.getTaskId(), equalTo(expectedFailure.getTaskId())); assertThat(newFailure.getStatus(), equalTo(expectedFailure.getStatus())); From de5324ed112bb28dc471c3dd97f8a37551883b62 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Fri, 8 Jun 2018 09:16:38 -0700 Subject: [PATCH 11/24] Allow to trim all ops above a certain seq# with a term lower than X Relates to #10708 (cherry picked from commit a86c0f8) --- .../resync/ResyncReplicationRequest.java | 24 +- .../TransportResyncReplicationAction.java | 3 + .../elasticsearch/index/engine/Engine.java | 8 +- .../index/engine/InternalEngine.java | 20 +- .../index/seqno/SequenceNumbers.java | 2 +- .../elasticsearch/index/shard/IndexShard.java | 6 +- .../index/shard/PrimaryReplicaSyncer.java | 21 +- .../index/translog/Checkpoint.java | 92 ++++++-- .../index/translog/MultiSnapshot.java | 9 + .../index/translog/Translog.java | 42 ++++ .../index/translog/TranslogReader.java | 38 +++ .../index/translog/TranslogSnapshot.java | 17 +- .../index/translog/TranslogWriter.java | 24 +- .../recovery/RecoverySourceHandler.java | 4 +- .../resync/ResyncReplicationRequestTests.java | 2 +- .../IndexLevelReplicationTests.java | 10 +- .../RecoveryDuringReplicationTests.java | 71 +++++- .../shard/PrimaryReplicaSyncerTests.java | 36 ++- .../index/translog/TranslogHeaderTests.java | 3 +- .../index/translog/TranslogTests.java | 216 +++++++++++++++++- 20 files changed, 579 insertions(+), 69 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java index 9aadf56178648..0995411279246 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; @@ -33,17 +34,24 @@ */ public final class ResyncReplicationRequest extends ReplicatedWriteRequest { + private long trimAboveSeqNo; private Translog.Operation[] operations; ResyncReplicationRequest() { super(); } - public ResyncReplicationRequest(final ShardId shardId, final Translog.Operation[] operations) { + public ResyncReplicationRequest(final ShardId shardId, final long trimAboveSeqNo, + final Translog.Operation[] operations) { super(shardId); + this.trimAboveSeqNo = trimAboveSeqNo; this.operations = operations; } + public long getTrimAboveSeqNo() { + return trimAboveSeqNo; + } + public Translog.Operation[] getOperations() { return operations; } @@ -58,12 +66,20 @@ public void readFrom(final StreamInput in) throws IOException { throw new IllegalStateException("resync replication request serialization is broken in 6.0.0"); } super.readFrom(in); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + trimAboveSeqNo = in.readZLong(); + } else { + trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + } operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); } @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeZLong(trimAboveSeqNo); + } out.writeArray(Translog.Operation::writeOperation, operations); } @@ -72,12 +88,13 @@ public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final ResyncReplicationRequest that = (ResyncReplicationRequest) o; - return Arrays.equals(operations, that.operations); + return trimAboveSeqNo == that.trimAboveSeqNo + && Arrays.equals(operations, that.operations); } @Override public int hashCode() { - return Arrays.hashCode(operations); + return Long.hashCode(trimAboveSeqNo) + 31 * Arrays.hashCode(operations); } @Override @@ -86,6 +103,7 @@ public String toString() { "shardId=" + shardId + ", timeout=" + timeout + ", index='" + index + '\'' + + ", trimAboveSeqNo=" + trimAboveSeqNo + ", ops=" + operations.length + "}"; } diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 3dd2bd4df580f..78c1e835d4087 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -135,6 +135,9 @@ public static Translog.Location performOnReplica(ResyncReplicationRequest reques } } } + if (request.getTrimAboveSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { + replica.trimOperationOfPreviousPrimaryTerms(request.getTrimAboveSeqNo()); + } return location; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 33913ff71141b..2c91ef36bc764 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -236,6 +236,12 @@ boolean isThrottled() { */ public abstract boolean isThrottled(); + /** + * Trims translog for terms below belowTerm and seq# above aboveSeqNo + * @see Translog#trimOperations(long, long) + */ + public abstract void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) throws EngineException; + /** A Lock implementation that always allows the lock to be acquired */ protected static final class NoOpLock implements Lock { @@ -904,7 +910,7 @@ public final boolean refreshNeeded() { * checks and removes translog files that no longer need to be retained. See * {@link org.elasticsearch.index.translog.TranslogDeletionPolicy} for details */ - public abstract void trimTranslog() throws EngineException; + public abstract void trimUnreferencedTranslogFiles() throws EngineException; /** * Tests whether or not the translog generation should be rolled to a new generation. diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 970af2885f2c1..897a41eddec46 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1601,7 +1601,7 @@ public void rollTranslogGeneration() throws EngineException { } @Override - public void trimTranslog() throws EngineException { + public void trimUnreferencedTranslogFiles() throws EngineException { try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); translog.trimUnreferencedReaders(); @@ -1618,6 +1618,24 @@ public void trimTranslog() throws EngineException { } } + @Override + public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) throws EngineException { + try (ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + translog.trimOperations(belowTerm, aboveSeqNo); + } catch (AlreadyClosedException e) { + failOnTragicEvent(e); + throw e; + } catch (Exception e) { + try { + failEngine("translog operations trimming failed", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new EngineException(shardId, "failed to trim translog operations", e); + } + } + private void pruneDeletedTombstones() { /* * We need to deploy two different trimming strategies for GC deletes on primary and replicas. Delete operations on primary diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index 0c071f4b2d422..7cffc8c1ac911 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -37,7 +37,7 @@ public class SequenceNumbers { */ public static final long UNASSIGNED_SEQ_NO = -2L; /** - * Represents no operations have been performed on the shard. + * Represents no operations have been performed on the shard. Initial value of a sequence number. */ public static final long NO_OPS_PERFORMED = -1L; diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 4515cbe87a3a3..9103a9dd27cad 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1022,7 +1022,7 @@ public Engine.CommitId flush(FlushRequest request) { public void trimTranslog() { verifyNotClosed(); final Engine engine = getEngine(); - engine.trimTranslog(); + engine.trimUnreferencedTranslogFiles(); } /** @@ -1225,6 +1225,10 @@ public void prepareForIndexRecovery() { assert currentEngineReference.get() == null; } + public void trimOperationOfPreviousPrimaryTerms(long aboveSeqNo) { + getEngine().trimOperationsFromTranslog(primaryTerm, aboveSeqNo); + } + public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin) throws IOException { final Engine.Result result; switch (operation.opType()) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index af8c9bdd0272f..8e05e7bf08efa 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.tasks.Task; @@ -84,6 +85,7 @@ public void resync(final IndexShard indexShard, final ActionListener try { final long startingSeqNo = indexShard.getGlobalCheckpoint() + 1; Translog.Snapshot snapshot = indexShard.newTranslogSnapshotFromMinSeqNo(startingSeqNo); + final long maxSeqNo = indexShard.seqNoStats().getMaxSeqNo(); resyncListener = new ActionListener() { @Override public void onResponse(final ResyncTask resyncTask) { @@ -135,7 +137,7 @@ public synchronized Translog.Operation next() throws IOException { } }; resync(shardId, indexShard.routingEntry().allocationId().getId(), indexShard.getPrimaryTerm(), wrappedSnapshot, - startingSeqNo, resyncListener); + startingSeqNo, maxSeqNo, resyncListener); } catch (Exception e) { if (resyncListener != null) { resyncListener.onFailure(e); @@ -146,7 +148,7 @@ public synchronized Translog.Operation next() throws IOException { } private void resync(final ShardId shardId, final String primaryAllocationId, final long primaryTerm, final Translog.Snapshot snapshot, - long startingSeqNo, ActionListener listener) { + long startingSeqNo, long maxSeqNo, ActionListener listener) { ResyncRequest request = new ResyncRequest(shardId, primaryAllocationId); ResyncTask resyncTask = (ResyncTask) taskManager.register("transport", "resync", request); // it's not transport :-) ActionListener wrappedListener = new ActionListener() { @@ -166,7 +168,7 @@ public void onFailure(Exception e) { }; try { new SnapshotSender(logger, syncAction, resyncTask, shardId, primaryAllocationId, primaryTerm, snapshot, chunkSize.bytesAsInt(), - startingSeqNo, wrappedListener).run(); + startingSeqNo, maxSeqNo, wrappedListener).run(); } catch (Exception e) { wrappedListener.onFailure(e); } @@ -186,14 +188,16 @@ static class SnapshotSender extends AbstractRunnable implements ActionListener listener; + private final AtomicBoolean firstMessage = new AtomicBoolean(true); private final AtomicInteger totalSentOps = new AtomicInteger(); private final AtomicInteger totalSkippedOps = new AtomicInteger(); private AtomicBoolean closed = new AtomicBoolean(); SnapshotSender(Logger logger, SyncAction syncAction, ResyncTask task, ShardId shardId, String primaryAllocationId, long primaryTerm, - Translog.Snapshot snapshot, int chunkSizeInBytes, long startingSeqNo, ActionListener listener) { + Translog.Snapshot snapshot, int chunkSizeInBytes, long startingSeqNo, long maxSeqNo, ActionListener listener) { this.logger = logger; this.syncAction = syncAction; this.task = task; @@ -203,6 +207,7 @@ static class SnapshotSender extends AbstractRunnable implements ActionListenerbelowTerm and seq# above aboveSeqNo. + * Effectively it moves max visible seq# {@link Checkpoint#trimmedAboveSeqNo} therefore {@link TranslogSnapshot} skips those operations. + */ + public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException { + assert aboveSeqNo >= SequenceNumbers.NO_OPS_PERFORMED : "aboveSeqNo has to a valid sequence number"; + + try (ReleasableLock lock = writeLock.acquire()) { + ensureOpen(); + if (current.getPrimaryTerm() < belowTerm) { + throw new IllegalArgumentException("Trimming the translog can only be done for terms lower than the current one. " + + "Trim requested for term [ " + belowTerm + " ] , current is [ " + current.getPrimaryTerm() + " ]"); + } + // we assume that the current translog generation doesn't have trimmable ops. Verify that. + assert current.assertNoSeqAbove(belowTerm, aboveSeqNo); + // update all existed ones (if it is necessary) as checkpoint and reader are immutable + final List newReaders = new ArrayList<>(readers.size()); + try { + for (TranslogReader reader : readers) { + final TranslogReader newReader = + reader.getPrimaryTerm() < belowTerm + ? reader.closeIntoTrimmedReader(aboveSeqNo, getChannelFactory()) + : reader; + newReaders.add(newReader); + } + } catch (IOException e) { + IOUtils.closeWhileHandlingException(newReaders); + close(); + throw e; + } + + this.readers.clear(); + this.readers.addAll(newReaders); + } + } /** * Ensures that the given location has be synced / written to the underlying storage. @@ -847,6 +882,13 @@ public interface Snapshot extends Closeable { */ int totalOperations(); + /** + * The number of operations have been skipped (overridden or trimmed) in the snapshot so far. + */ + default int skippedOperations() { + return 0; + } + /** * The number of operations have been overridden (eg. superseded) in the snapshot so far. * If two operations have the same sequence number, the operation with a lower term will be overridden by the operation diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index 29e30bd25dd37..4091fa45762e1 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -21,6 +21,8 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.common.io.Channels; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.Closeable; import java.io.EOFException; @@ -28,8 +30,11 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.util.concurrent.atomic.AtomicBoolean; +import static org.elasticsearch.index.translog.Translog.getCommitCheckpointFileName; + /** * an immutable translog filereader */ @@ -70,6 +75,39 @@ public static TranslogReader open( return new TranslogReader(checkpoint, channel, path, header); } + /** + * Closes current reader and creates new one with new checkoint and same file channel + */ + TranslogReader closeIntoTrimmedReader(long aboveSeqNo, ChannelFactory channelFactory) throws IOException { + if (closed.compareAndSet(false, true)) { + Closeable toCloseOnFailure = channel; + final TranslogReader newReader; + try { + if (aboveSeqNo < checkpoint.trimmedAboveSeqNo + || aboveSeqNo < checkpoint.maxSeqNo && checkpoint.trimmedAboveSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) { + final Path checkpointFile = path.getParent().resolve(getCommitCheckpointFileName(checkpoint.generation)); + final Checkpoint newCheckpoint = new Checkpoint(checkpoint.offset, checkpoint.numOps, + checkpoint.generation, checkpoint.minSeqNo, checkpoint.maxSeqNo, + checkpoint.globalCheckpoint, checkpoint.minTranslogGeneration, aboveSeqNo); + Checkpoint.write(channelFactory, checkpointFile, newCheckpoint, StandardOpenOption.WRITE); + + IOUtils.fsync(checkpointFile, false); + IOUtils.fsync(checkpointFile.getParent(), true); + + newReader = new TranslogReader(newCheckpoint, channel, path, header); + } else { + newReader = new TranslogReader(checkpoint, channel, path, header); + } + toCloseOnFailure = null; + return newReader; + } finally { + IOUtils.close(toCloseOnFailure); + } + } else { + throw new AlreadyClosedException(toString() + " is already closed"); + } + } + public long sizeInBytes() { return length; } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java index a966720353297..8fe92bba0097c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.translog; import org.elasticsearch.common.io.Channels; +import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.EOFException; import java.io.IOException; @@ -32,6 +33,7 @@ final class TranslogSnapshot extends BaseTranslogReader { private final ByteBuffer reusableBuffer; private long position; + private int skippedOperations; private int readOperations; private BufferedChecksumStreamInput reuse; @@ -54,17 +56,24 @@ public int totalOperations() { return totalOperations; } + int skippedOperations(){ + return skippedOperations; + } + @Override Checkpoint getCheckpoint() { return checkpoint; } public Translog.Operation next() throws IOException { - if (readOperations < totalOperations) { - return readOperation(); - } else { - return null; + while (readOperations < totalOperations) { + final Translog.Operation operation = readOperation(); + if (operation.seqNo() <= checkpoint.trimmedAboveSeqNo || checkpoint.trimmedAboveSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) { + return operation; + } + skippedOperations++; } + return null; } protected Translog.Operation readOperation() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index cae6578886534..b89b21c52588a 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.translog; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.OutputStreamDataOutput; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Assertions; import org.elasticsearch.common.bytes.BytesArray; @@ -92,6 +91,7 @@ private TranslogWriter( this.minSeqNo = initialCheckpoint.minSeqNo; assert initialCheckpoint.maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED : initialCheckpoint.maxSeqNo; this.maxSeqNo = initialCheckpoint.maxSeqNo; + assert initialCheckpoint.trimmedAboveSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : initialCheckpoint.trimmedAboveSeqNo; this.globalCheckpointSupplier = globalCheckpointSupplier; this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; } @@ -213,6 +213,25 @@ private synchronized boolean assertNoSeqNumberConflict(long seqNo, BytesReferenc return true; } + synchronized boolean assertNoSeqAbove(long belowTerm, long aboveSeqNo) { + seenSequenceNumbers.entrySet().stream().filter(e -> e.getKey().longValue() > aboveSeqNo) + .forEach(e -> { + final Translog.Operation op; + try { + op = Translog.readOperation(new BufferedChecksumStreamInput(e.getValue().v1().streamInput())); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + long seqNo = op.seqNo(); + long primaryTerm = op.primaryTerm(); + if (primaryTerm < belowTerm) { + throw new AssertionError("current should not have any operations with seq#:primaryTerm [" + + seqNo + ":" + primaryTerm + "] > " + aboveSeqNo + ":" + belowTerm); + } + }); + return true; + } + /** * write all buffered ops to disk and fsync file. * @@ -241,7 +260,8 @@ public int totalOperations() { @Override synchronized Checkpoint getCheckpoint() { return new Checkpoint(totalOffset, operationCounter, generation, minSeqNo, maxSeqNo, - globalCheckpointSupplier.getAsLong(), minTranslogGenerationSupplier.getAsLong()); + globalCheckpointSupplier.getAsLong(), minTranslogGenerationSupplier.getAsLong(), + SequenceNumbers.UNASSIGNED_SEQ_NO); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 4c543aeeb22d4..72a6fcb6ba329 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -615,9 +615,9 @@ protected SendSnapshotResult sendSnapshot(final long startingSeqNo, long require cancellableThreads.executeIO(sendBatch); } - assert expectedTotalOps == snapshot.overriddenOperations() + skippedOps + totalSentOps + assert expectedTotalOps == snapshot.skippedOperations() + skippedOps + totalSentOps : String.format(Locale.ROOT, "expected total [%d], overridden [%d], skipped [%d], total sent [%d]", - expectedTotalOps, snapshot.overriddenOperations(), skippedOps, totalSentOps); + expectedTotalOps, snapshot.skippedOperations(), skippedOps, totalSentOps); if (requiredOpsTracker.getCheckpoint() < endingSeqNo) { throw new IllegalStateException("translog replay failed to cover required sequence numbers" + diff --git a/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java b/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java index 313c8cda1fecd..b25d263a2ab7f 100644 --- a/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java @@ -40,7 +40,7 @@ public void testSerialization() throws IOException { final Translog.Index index = new Translog.Index("type", "id", 0, randomNonNegativeLong(), Versions.MATCH_ANY, VersionType.INTERNAL, bytes, null, null, -1); final ShardId shardId = new ShardId(new Index("index", "uuid"), 0); - final ResyncReplicationRequest before = new ResyncReplicationRequest(shardId, new Translog.Operation[]{index}); + final ResyncReplicationRequest before = new ResyncReplicationRequest(shardId, 42L, new Translog.Operation[]{index}); final BytesStreamOutput out = new BytesStreamOutput(); before.writeTo(out); diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 736dc40e6867d..018548be9629f 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -340,9 +340,10 @@ public void testSeqNoCollision() throws Exception { op1 = snapshot.next(); assertThat(op1, notNullValue()); assertThat(snapshot.next(), nullValue()); - assertThat(snapshot.overriddenOperations(), equalTo(0)); + assertThat(snapshot.skippedOperations(), equalTo(0)); } - // Make sure that replica2 receives translog ops (eg. op2) from replica1 and overwrites its stale operation (op1). + // Make sure that replica2 receives translog ops (eg. op2) from replica1 + // and does not overwrite its stale operation (op1) as it is trimmed. logger.info("--> Promote replica1 as the primary"); shards.promoteReplicaToPrimary(replica1).get(); // wait until resync completed. shards.index(new IndexRequest(index.getName(), "type", "d2").source("{}", XContentType.JSON)); @@ -353,7 +354,8 @@ public void testSeqNoCollision() throws Exception { assertThat(op2.seqNo(), equalTo(op1.seqNo())); assertThat(op2.primaryTerm(), greaterThan(op1.primaryTerm())); assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations)); - assertThat(snapshot.overriddenOperations(), equalTo(1)); + assertThat(snapshot.overriddenOperations(), equalTo(0)); + assertThat(snapshot.skippedOperations(), equalTo(1)); } // Make sure that peer-recovery transfers all but non-overridden operations. @@ -366,7 +368,7 @@ public void testSeqNoCollision() throws Exception { assertThat(snapshot.totalOperations(), equalTo(initDocs + 1)); assertThat(snapshot.next(), equalTo(op2)); assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations)); - assertThat("Peer-recovery should not send overridden operations", snapshot.overriddenOperations(), equalTo(0)); + assertThat("Peer-recovery should not send overridden operations", snapshot.skippedOperations(), equalTo(0)); } // TODO: We should assert the content of shards in the ReplicationGroup. // Without rollback replicas(current implementation), we don't have the same content across shards: diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 21be1da3845b6..ee97ba14fe09e 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -53,8 +53,12 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.EnumSet; +import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; @@ -65,6 +69,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -353,10 +358,19 @@ public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.action.resync:TRACE") public void testResyncAfterPrimaryPromotion() throws Exception { - // TODO: check translog trimming functionality once it's implemented - try (ReplicationGroup shards = createGroup(2)) { + // TODO: check translog trimming functionality once rollback is implemented in Lucene (ES trimming is done) + Map mappings = + Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}"); + try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(2, mappings))) { shards.startAll(); - int initialDocs = shards.indexDocs(randomInt(10)); + int initialDocs = randomInt(10); + + for (int i = 0; i < initialDocs; i++) { + final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "initial_doc_" + i) + .source("{ \"f\": \"normal\"}", XContentType.JSON); + shards.index(indexRequest); + } + boolean syncedGlobalCheckPoint = randomBoolean(); if (syncedGlobalCheckPoint) { shards.syncGlobalCheckpoint(); @@ -364,16 +378,30 @@ public void testResyncAfterPrimaryPromotion() throws Exception { final IndexShard oldPrimary = shards.getPrimary(); final IndexShard newPrimary = shards.getReplicas().get(0); + final IndexShard justReplica = shards.getReplicas().get(1); // simulate docs that were inflight when primary failed - final int extraDocs = randomIntBetween(0, 5); + final int extraDocs = randomInt(5); logger.info("--> indexing {} extra docs", extraDocs); for (int i = 0; i < extraDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "extra_" + i) - .source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "extra_doc_" + i) + .source("{ \"f\": \"normal\"}", XContentType.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); indexOnReplica(bulkShardRequest, shards, newPrimary); } + + final int extraDocsToBeTrimmed = randomIntBetween(0, 10); + logger.info("--> indexing {} extra docs to be trimmed", extraDocsToBeTrimmed); + for (int i = 0; i < extraDocsToBeTrimmed; i++) { + final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "extra_trimmed_" + i) + .source("{ \"f\": \"trimmed\"}", XContentType.JSON); + final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); + // have to replicate to another replica != newPrimary one - the subject to trim + indexOnReplica(bulkShardRequest, shards, justReplica); + } + + logger.info("--> seqNo primary {} replica {}", oldPrimary.seqNoStats(), newPrimary.seqNoStats()); + logger.info("--> resyncing replicas"); PrimaryReplicaSyncer.ResyncTask task = shards.promoteReplicaToPrimary(newPrimary).get(); if (syncedGlobalCheckPoint) { @@ -381,7 +409,36 @@ public void testResyncAfterPrimaryPromotion() throws Exception { } else { assertThat(task.getResyncedOperations(), greaterThanOrEqualTo(extraDocs)); } - shards.assertAllEqual(initialDocs + extraDocs); + List replicas = shards.getReplicas(); + + // check all docs on primary are available on replica + Set primaryIds = getShardDocUIDs(newPrimary); + assertThat(primaryIds.size(), equalTo(initialDocs + extraDocs)); + for (IndexShard replica : replicas) { + Set replicaIds = getShardDocUIDs(replica); + Set temp = new HashSet<>(primaryIds); + temp.removeAll(replicaIds); + assertThat(replica.routingEntry() + " is missing docs", temp, empty()); + temp = new HashSet<>(replicaIds); + temp.removeAll(primaryIds); + // yeah, replica has more docs as there is no Lucene roll back on it + assertThat(replica.routingEntry() + " has to have extra docs", temp, + extraDocsToBeTrimmed > 0 ? not(empty()) : empty()); + } + + // check translog on replica is trimmed + int translogOperations = 0; + try(Translog.Snapshot snapshot = getTranslog(justReplica).newSnapshot()) { + Translog.Operation next; + while ((next = snapshot.next()) != null) { + translogOperations++; + assertThat("unexpected op: " + next, (int)next.seqNo(), lessThan(initialDocs + extraDocs)); + assertThat("unexpected primaryTerm: " + next.primaryTerm(), next.primaryTerm(), is(oldPrimary.getPrimaryTerm())); + final Translog.Source source = next.getSource(); + assertThat(source.source.utf8ToString(), is("{ \"f\": \"normal\"}")); + } + } + assertThat(translogOperations, is(initialDocs + extraDocs)); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index 1257aea3d14fa..b290f4d45597b 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -20,6 +20,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.resync.ResyncReplicationRequest; import org.elasticsearch.action.resync.ResyncReplicationResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -36,15 +37,20 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.tasks.TaskManager; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsInstanceOf.instanceOf; public class PrimaryReplicaSyncerTests extends IndexShardTestCase { @@ -53,15 +59,17 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { IndexShard shard = newStartedShard(true); TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); AtomicBoolean syncActionCalled = new AtomicBoolean(); + List resyncRequests = new ArrayList<>(); PrimaryReplicaSyncer.SyncAction syncAction = (request, parentTask, allocationId, primaryTerm, listener) -> { logger.info("Sending off {} operations", request.getOperations().length); syncActionCalled.set(true); + resyncRequests.add(request); assertThat(parentTask, instanceOf(PrimaryReplicaSyncer.ResyncTask.class)); listener.onResponse(new ResyncReplicationResponse()); }; PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(Settings.EMPTY, taskManager, syncAction); - syncer.setChunkSize(new ByteSizeValue(randomIntBetween(1, 100))); + syncer.setChunkSize(new ByteSizeValue(randomIntBetween(1, 10))); int numDocs = randomInt(10); for (int i = 0; i < numDocs; i++) { @@ -72,7 +80,7 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { } long globalCheckPoint = numDocs > 0 ? randomIntBetween(0, numDocs - 1) : 0; - boolean syncNeeded = numDocs > 0 && globalCheckPoint < numDocs - 1; + boolean syncNeeded = numDocs > 0; String allocationId = shard.routingEntry().allocationId().getId(); shard.updateShardState(shard.routingEntry(), shard.getPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), @@ -84,19 +92,29 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { PlainActionFuture fut = new PlainActionFuture<>(); syncer.resync(shard, fut); - fut.get(); + PrimaryReplicaSyncer.ResyncTask resyncTask = fut.get(); if (syncNeeded) { assertTrue("Sync action was not called", syncActionCalled.get()); + ResyncReplicationRequest resyncRequest = resyncRequests.remove(0); + assertThat(resyncRequest.getTrimAboveSeqNo(), equalTo(numDocs - 1L)); + + assertThat("trimAboveSeqNo has to be specified in request #0 only", resyncRequests.stream() + .mapToLong(ResyncReplicationRequest::getTrimAboveSeqNo) + .filter(seqNo -> seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) + .findFirst() + .isPresent(), + is(false)); } - assertEquals(globalCheckPoint == numDocs - 1 ? 0 : numDocs, fut.get().getTotalOperations()); - if (syncNeeded) { + + assertEquals(globalCheckPoint == numDocs - 1 ? 0 : numDocs, resyncTask.getTotalOperations()); + if (syncNeeded && globalCheckPoint < numDocs - 1) { long skippedOps = globalCheckPoint + 1; // everything up to global checkpoint included - assertEquals(skippedOps, fut.get().getSkippedOperations()); - assertEquals(numDocs - skippedOps, fut.get().getResyncedOperations()); + assertEquals(skippedOps, resyncTask.getSkippedOperations()); + assertEquals(numDocs - skippedOps, resyncTask.getResyncedOperations()); } else { - assertEquals(0, fut.get().getSkippedOperations()); - assertEquals(0, fut.get().getResyncedOperations()); + assertEquals(0, resyncTask.getSkippedOperations()); + assertEquals(0, resyncTask.getResyncedOperations()); } closeShards(shard); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java index 0dc404767de3c..99e21d4760463 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java @@ -118,7 +118,8 @@ private void checkFailsToOpen(String file, Class expect assertThat("test file [" + translogFile + "] should exist", Files.exists(translogFile), equalTo(true)); final E error = expectThrows(expectedErrorType, () -> { final Checkpoint checkpoint = new Checkpoint(Files.size(translogFile), 1, 1, - SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, 1); + SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.NO_OPS_PERFORMED, 1, SequenceNumbers.NO_OPS_PERFORMED); try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { TranslogReader.open(channel, translogFile, checkpoint, null); } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 15d423c02fac8..9571ceafc5ecc 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -107,7 +107,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import java.util.stream.IntStream; import java.util.stream.LongStream; import java.util.stream.Stream; @@ -120,8 +122,11 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.stub; @@ -1474,8 +1479,8 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { fail("corrupted"); } catch (IllegalStateException ex) { assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3135, " + - "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " + - "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0}", ex.getMessage()); + "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1, trimmedAboveSeqNo=-2} but got: Checkpoint{offset=0, numOps=0, " + + "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0, trimmedAboveSeqNo=-2}", ex.getMessage()); } Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { @@ -1507,6 +1512,191 @@ public void testSnapshotFromStreamInput() throws IOException { assertEquals(ops, readOperations); } + public void testSnapshotCurrentHasUnexpectedOperationsForTrimmedOperations() throws Exception { + int extraDocs = randomIntBetween(10, 15); + + // increment primaryTerm to avoid potential negative numbers + primaryTerm.addAndGet(extraDocs); + translog.rollGeneration(); + + for (int op = 0; op < extraDocs; op++) { + String ascii = randomAlphaOfLengthBetween(1, 50); + Translog.Index operation = new Translog.Index("test", "" + op, op, primaryTerm.get() - op, + ascii.getBytes("UTF-8")); + translog.add(operation); + } + + AssertionError error = expectThrows(AssertionError.class, () -> translog.trimOperations(primaryTerm.get(), 0)); + assertThat(error.getMessage(), is("current should not have any operations with seq#:primaryTerm " + + "[1:" + (primaryTerm.get() - 1) + "] > 0:" + primaryTerm.get())); + + primaryTerm.incrementAndGet(); + translog.rollGeneration(); + + // add a single operation to current with seq# > trimmed seq# but higher primary term + Translog.Index operation = new Translog.Index("test", "" + 1, 1L, primaryTerm.get(), + randomAlphaOfLengthBetween(1, 50).getBytes("UTF-8")); + translog.add(operation); + + // it is possible to trim after generation rollover + translog.trimOperations(primaryTerm.get(), 0); + } + + public void testSnapshotTrimmedOperations() throws Exception { + final InMemoryTranslog inMemoryTranslog = new InMemoryTranslog(); + final List allOperations = new ArrayList<>(); + + for(int attempt = 0, maxAttempts = randomIntBetween(3, 10); attempt < maxAttempts; attempt++) { + List ops = LongStream.range(0, allOperations.size() + randomIntBetween(10, 15)) + .boxed().collect(Collectors.toList()); + Randomness.shuffle(ops); + + AtomicReference source = new AtomicReference<>(); + for (final long op : ops) { + source.set(randomAlphaOfLengthBetween(1, 50)); + + // have to use exactly the same source for same seq# if primaryTerm is not changed + if (primaryTerm.get() == translog.getCurrent().getPrimaryTerm()) { + // use the latest source of op with the same seq# - therefore no break + allOperations + .stream() + .filter(allOp -> allOp instanceof Translog.Index && allOp.seqNo() == op) + .map(allOp -> ((Translog.Index)allOp).source().utf8ToString()) + .reduce((a, b) -> b) + .ifPresent(source::set); + } + + // use ongoing primaryTerms - or the same as it was + Translog.Index operation = new Translog.Index("test", "" + op, op, primaryTerm.get(), + source.get().getBytes("UTF-8")); + translog.add(operation); + inMemoryTranslog.add(operation); + allOperations.add(operation); + } + + if (randomBoolean()) { + primaryTerm.incrementAndGet(); + translog.rollGeneration(); + } + + long maxTrimmedSeqNo = randomInt(allOperations.size()); + + translog.trimOperations(primaryTerm.get(), maxTrimmedSeqNo); + inMemoryTranslog.trimOperations(primaryTerm.get(), maxTrimmedSeqNo); + translog.sync(); + + Collection effectiveOperations = inMemoryTranslog.operations(); + + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, containsOperationsInAnyOrder(effectiveOperations)); + assertThat(snapshot.totalOperations(), is(allOperations.size())); + assertThat(snapshot.skippedOperations(), is(allOperations.size() - effectiveOperations.size())); + } + } + } + + /** + * this class mimic behaviour of original {@link Translog} + */ + static class InMemoryTranslog { + private final Map operations = new HashMap<>(); + + void add(Translog.Operation operation) { + final Translog.Operation old = operations.put(operation.seqNo(), operation); + assert old == null || old.primaryTerm() <= operation.primaryTerm(); + } + + void trimOperations(long belowTerm, long aboveSeqNo) { + for (final Iterator> it = operations.entrySet().iterator(); it.hasNext(); ) { + final Map.Entry next = it.next(); + Translog.Operation op = next.getValue(); + boolean drop = op.primaryTerm() < belowTerm && op.seqNo() > aboveSeqNo; + if (drop) { + it.remove(); + } + } + } + + Collection operations() { + return operations.values(); + } + } + + public void testRandomExceptionsOnTrimOperations( ) throws Exception { + Path tempDir = createTempDir(); + final FailSwitch fail = new FailSwitch(); + fail.failNever(); + TranslogConfig config = getTranslogConfig(tempDir); + List fileChannels = new ArrayList<>(); + final Translog failableTLog = + getFailableTranslog(fail, config, randomBoolean(), false, null, createTranslogDeletionPolicy(), fileChannels); + + IOException expectedException = null; + int translogOperations = 0; + final int maxAttempts = 10; + for(int attempt = 0; attempt < maxAttempts; attempt++) { + int maxTrimmedSeqNo; + fail.failNever(); + int extraTranslogOperations = randomIntBetween(10, 100); + + List ops = IntStream.range(translogOperations, translogOperations + extraTranslogOperations) + .boxed().collect(Collectors.toList()); + Randomness.shuffle(ops); + for (int op : ops) { + String ascii = randomAlphaOfLengthBetween(1, 50); + Translog.Index operation = new Translog.Index("test", "" + op, op, + primaryTerm.get(), ascii.getBytes("UTF-8")); + + failableTLog.add(operation); + } + + translogOperations += extraTranslogOperations; + + // at least one roll + inc of primary term has to be there - otherwise trim would not take place at all + // last attempt we have to make roll as well - otherwise could skip trimming as it has been trimmed already + boolean rollover = attempt == 0 || attempt == maxAttempts - 1 || randomBoolean(); + if (rollover) { + primaryTerm.incrementAndGet(); + failableTLog.rollGeneration(); + } + + maxTrimmedSeqNo = rollover ? translogOperations - randomIntBetween(4, 8) : translogOperations + 1; + + // if we are so happy to reach the max attempts - fail it always` + fail.failRate(attempt < maxAttempts - 1 ? 25 : 100); + try { + failableTLog.trimOperations(primaryTerm.get(), maxTrimmedSeqNo); + } catch (IOException e){ + expectedException = e; + break; + } + } + + assertThat(expectedException, is(not(nullValue()))); + + assertThat(fileChannels, is(not(empty()))); + assertThat("all file channels have to be closed", + fileChannels.stream().filter(f -> f.isOpen()).findFirst().isPresent(), is(false)); + + assertThat(failableTLog.isOpen(), is(false)); + final AlreadyClosedException alreadyClosedException = expectThrows(AlreadyClosedException.class, () -> failableTLog.newSnapshot()); + assertThat(alreadyClosedException.getMessage(), + is("translog is already closed")); + + fail.failNever(); + + // check that despite of IO exception translog is not corrupted + try(Translog reopenedTranslog = openTranslog(config, failableTLog.getTranslogUUID())) { + try (Translog.Snapshot snapshot = reopenedTranslog.newSnapshot()) { + assertThat(snapshot.totalOperations(), greaterThan(0)); + Translog.Operation operation; + for (int i = 0; (operation = snapshot.next()) != null; i++) { + assertNotNull("operation " + i + " must be non-null", operation); + } + } + } + } + public void testLocationHashCodeEquals() throws IOException { List locations = new ArrayList<>(); List locations2 = new ArrayList<>(); @@ -2007,7 +2197,8 @@ private static class FailSwitch { private volatile boolean onceFailedFailAlways = false; public boolean fail() { - boolean fail = randomIntBetween(1, 100) <= failRate; + final int rnd = randomIntBetween(1, 100); + boolean fail = rnd <= failRate; if (fail && onceFailedFailAlways) { failAlways(); } @@ -2026,17 +2217,30 @@ public void failRandomly() { failRate = randomIntBetween(1, 100); } + public void failRate(int rate) { + failRate = rate; + } + public void onceFailedFailAlways() { onceFailedFailAlways = true; } } - private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig config, final boolean partialWrites, final boolean throwUnknownException, String translogUUID, final TranslogDeletionPolicy deletionPolicy) throws IOException { + return getFailableTranslog(fail, config, partialWrites, throwUnknownException, translogUUID, deletionPolicy, null); + } + + private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig config, final boolean partialWrites, + final boolean throwUnknownException, String translogUUID, + final TranslogDeletionPolicy deletionPolicy, + final List fileChannels) throws IOException { final ChannelFactory channelFactory = (file, openOption) -> { FileChannel channel = FileChannel.open(file, openOption); + if (fileChannels != null) { + fileChannels.add(channel); + } boolean success = false; try { final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation @@ -2393,7 +2597,7 @@ private Checkpoint randomCheckpoint() { } final long generation = randomNonNegativeLong(); return new Checkpoint(randomLong(), randomInt(), generation, minSeqNo, maxSeqNo, randomNonNegativeLong(), - randomLongBetween(1, generation)); + randomLongBetween(1, generation), maxSeqNo); } public void testCheckpointOnDiskFull() throws IOException { @@ -2634,7 +2838,7 @@ public void testMinSeqNoBasedAPI() throws IOException { assertThat(Tuple.tuple(op.seqNo(), op.primaryTerm()), isIn(seenSeqNos)); readFromSnapshot++; } - readFromSnapshot += snapshot.overriddenOperations(); + readFromSnapshot += snapshot.skippedOperations(); } assertThat(readFromSnapshot, equalTo(expectedSnapshotOps)); final long seqNoLowerBound = seqNo; From c5d38adc14bce1f81c8f3ab65f400c6eadbba412 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 7 Jun 2018 17:14:36 -0400 Subject: [PATCH 12/24] QA: Fix rolling restart tests some more The condition in the watcher test wasn't quite right when upgrading from 5.x. The token test seems to dislike upgrading from 5.6.10 in a mixed cluster. I'm going to mute it carefully and hand the issue off to some folks closer to the code. --- x-pack/qa/rolling-upgrade/build.gradle | 4 ++++ .../upgrades/TokenBackwardsCompatibilityIT.java | 4 +++- .../upgrades/WatchBackwardsCompatibilityIT.java | 15 ++++++++++----- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 0e1fb90ec7e81..e53c34f42e042 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -177,6 +177,7 @@ subprojects { Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") oldClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'old_cluster' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') } Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> @@ -227,6 +228,7 @@ subprojects { oneThirdUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'true' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') // We only need to run these tests once so we may as well do it when we're two thirds upgraded systemProperty 'tests.rest.blacklist', [ 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', @@ -246,6 +248,7 @@ subprojects { twoThirdsUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'false' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } @@ -258,6 +261,7 @@ subprojects { Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') /* * Force stopping all the upgraded nodes after the test runner * so they are alive during the test. diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index c2b9f3db800c9..2ba388e9852dc 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -69,12 +69,14 @@ public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { assertTokenWorks((String) source.get("token")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31195") public void testMixedCluster() throws Exception { assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.MIXED); assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion()); assumeFalse("can't be run twice because it invalidates a token so we skip the first attempt", Booleans.parseBoolean(System.getProperty("tests.first_round"))); + Version upgradeFromVersion = Version.fromString(System.getProperty("tests.upgrade_from_version")); + assumeFalse("this test fails for unknown reasons when run before 5.6.0", + upgradeFromVersion.before(Version.V_6_0_0)); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); assertOK(getResponse); diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatchBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatchBackwardsCompatibilityIT.java index 64c612e65d7ab..be3b0525d06bf 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatchBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatchBackwardsCompatibilityIT.java @@ -177,7 +177,6 @@ public void testWatcherStats() throws Exception { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31196") public void testWatcherRestart() throws Exception { executeUpgradeIfNeeded(); @@ -186,7 +185,7 @@ public void testWatcherRestart() throws Exception { executeAgainstRandomNode(client -> assertOK(client.performRequest("POST", "/_xpack/watcher/_start"))); // Watcher should be started on at least the nodes with the new version. - ensureWatcherStartedOnModernNodes(); + ensureWatcherStartedOnExpectedNodes(); } public void testWatchCrudApis() throws Exception { @@ -316,7 +315,7 @@ private void ensureWatcherStarted() throws Exception { })); } - private void ensureWatcherStartedOnModernNodes() throws Exception { + private void ensureWatcherStartedOnExpectedNodes() throws Exception { if (nodes.getMaster().getVersion().before(Version.V_6_0_0)) { /* * Versions before 6.0 ran watcher on the master node and the @@ -331,11 +330,17 @@ private void ensureWatcherStartedOnModernNodes() throws Exception { Map stats = ((List) responseBody.get("stats")).stream() .map(o -> (Map) o) .collect(Collectors.toMap(m -> m.get("node_id"), Function.identity())); - assertNotNull("no stats yet", stats); + if (nodes.getBWCVersion().before(Version.V_6_0_0)) { + Map nodeStats = (Map) stats.get(nodes.getMaster().getId()); + // If the old version is before 6.0 then only the master is allowed to start + assertEquals("master node [" + nodes.getMaster().getId() + "] is not started", + "started", nodeStats.get("watcher_state")); + return; + } for (Node node : nodes.getNewNodes()) { Map nodeStats = (Map) stats.get(node.getId()); assertEquals("modern node [" + node.getId() + "] is not started", - nodeStats.get("watcher_state"), "started"); + "started", nodeStats.get("watcher_state")); } })); } From 2a5087c40799b8ea2c221dde87795337ba0344da Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 8 Jun 2018 10:15:28 -0400 Subject: [PATCH 13/24] SQL: Make a single JDBC driver jar (#31012) Replaces zip archive containing multiple jars with a single JDBC driver jar that shades all external dependencies. Closes #29856 --- x-pack/plugin/sql/jdbc/build.gradle | 125 +++++++++++++--------------- x-pack/qa/sql/build.gradle | 6 +- 2 files changed, 62 insertions(+), 69 deletions(-) diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 26cf913aa2790..e383e71cd4c76 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -1,74 +1,40 @@ + +buildscript { + repositories { + maven { + url 'https://plugins.gradle.org/m2/' + } + } + dependencies { + classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.2' + } +} + apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' +apply plugin: 'com.github.johnrengelman.shadow' description = 'JDBC driver for Elasticsearch' +archivesBaseName = "x-pack-sql-jdbc" forbiddenApisMain { // does not depend on core, so only jdk and http signatures should be checked signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] } -/* - * Bundle as many of our dependencies as we can get away with into the jar. - * We can't currently bundle *all* dependencies into the jar, but we'd like - * to avoid publishing the sql shared libraries if possible. This allows that. - * - * It is possible to use configure this bundling in a bunch of different ways - * but this particular way generates a pom that doesn't declare the bundled - * dependencies as dependencies. Which is a good thing because we don't publish - * them and we don't want consumers to get two copies of them. - * - * We'd *like* to shade these dependencies, at least ones like jackson which we - * know that we can't remove entirely. But for now something like this is - * simpler. - */ -configurations { - bundled -} -sourceSets { - main { - compileClasspath += configurations.bundled - } - test { - compileClasspath += configurations.bundled - } -} -javadoc { - classpath += configurations.bundled -} -jar { - from({configurations.bundled.collect { it.isDirectory() ? it : zipTree(it) }}) { - // We don't need the META-INF from the things we bundle. For now. - exclude 'META-INF/*' - } -} - dependencies { - - // Eclipse doesn't know how to deal with these bundled deependencies so make them compile - // dependencies if we are running in Eclipse - if (isEclipse) { - compile (xpackProject('plugin:sql:sql-shared-client')) { - transitive = false - } - compile (xpackProject('plugin:sql:sql-shared-proto')) { - transitive = false - } - } else { - bundled (xpackProject('plugin:sql:sql-shared-client')) { - transitive = false - } - bundled (xpackProject('plugin:sql:sql-shared-proto')) { - transitive = false - } + compile (xpackProject('plugin:sql:sql-shared-client')) { + transitive = false + } + compile (xpackProject('plugin:sql:sql-shared-proto')) { + transitive = false } compile (project(':libs:x-content')) { transitive = false } compile project(':libs:core') runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - testCompile "org.elasticsearch.test:framework:${version}" } @@ -82,23 +48,48 @@ dependencyLicenses { ignoreSha 'elasticsearch' } -/* - * Temporary zip file to make the jdbc driver more usable during the 6.3 - * release. We'd like to remove this in future releases when the jdbc driver - * bundles or shades all of its dependencies. But for now this should help - * non-maven jdbc users, specifically those folks using BI tools. - */ -task zipWithDependencies(type: Zip) { - from configurations.runtime - from configurations.runtime.artifacts.files - baseName 'elasticsearch-jdbc-with-dependencies' - into "elasticsearch-jdbc-with-dependencies-$version" +shadowJar { + classifier = null + relocate 'com.fasterxml', 'org.elasticsearch.fasterxml' +} + +// We don't need normal jar, we use shadow jar instead +jar.enabled = false + +// We need a no-depenencies jar though for qa testing so it doesn't conflict with cli +configurations { + nodeps +} + +task nodepsJar(type: Jar) { + appendix 'nodeps' + from sourceSets.main.output } -assemble.dependsOn zipWithDependencies + +artifacts { + nodeps nodepsJar + archives shadowJar +} + +publishing { + publications { + nebula(MavenPublication) { + artifact shadowJar + pom.withXml { + // Nebula is mistakenly including all dependencies that are already shadowed into the shadow jar + asNode().remove(asNode().dependencies) + } + } + } +} + +assemble.dependsOn shadowJar // Use the jar for testing so the tests are more "real" test { classpath -= compileJava.outputs.files - classpath += jar.outputs.files - dependsOn jar + classpath -= configurations.compile + classpath -= configurations.runtime + classpath += shadowJar.outputs.files + dependsOn shadowJar } diff --git a/x-pack/qa/sql/build.gradle b/x-pack/qa/sql/build.gradle index 8f77e1608d6d0..a3c147bbc04fc 100644 --- a/x-pack/qa/sql/build.gradle +++ b/x-pack/qa/sql/build.gradle @@ -9,7 +9,7 @@ dependencies { compile "org.elasticsearch.test:framework:${version}" // JDBC testing dependencies - compile xpackProject('plugin:sql:jdbc') + compile project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') compile "net.sourceforge.csvjdbc:csvjdbc:1.0.34" // CLI testing dependencies @@ -87,7 +87,9 @@ subprojects { // JDBC testing dependencies testRuntime "net.sourceforge.csvjdbc:csvjdbc:1.0.34" testRuntime "com.h2database:h2:1.4.197" - testRuntime xpackProject('plugin:sql:jdbc') + testRuntime project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') + testRuntime xpackProject('plugin:sql:sql-shared-client') + // TODO check if needed testRuntime("org.antlr:antlr4-runtime:4.5.3") { From 4b7025c9a58f324f4d9cf246f15ff0546e329138 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 8 Jun 2018 09:23:46 -0700 Subject: [PATCH 14/24] Remove extraneous references to 'tokenized' in the mapper code. (#31010) These are likely left over from when there were three options for the index mapping ('no', 'analyzed', 'not_analyzed'). (cherry picked from commit 2378fa19b8cf31399434815d067765917825924a) --- .../index/mapper/BooleanFieldMapper.java | 8 -------- .../elasticsearch/index/mapper/FieldMapper.java | 15 ++------------- .../index/mapper/MappedFieldType.java | 2 +- .../index/mapper/MultiFieldTests.java | 2 +- 4 files changed, 4 insertions(+), 23 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 45cd9e17ad119..2bbb9c23a25d5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -84,14 +84,6 @@ public Builder(String name) { this.builder = this; } - @Override - public Builder tokenized(boolean tokenized) { - if (tokenized) { - throw new IllegalArgumentException("bool field can't be tokenized"); - } - return super.tokenized(tokenized); - } - @Override public BooleanFieldMapper build(BuilderContext context) { setupFieldType(context); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 22c25f9ccdb80..50776b4d75bf2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -155,11 +155,6 @@ public T storeTermVectorPayloads(boolean termVectorPayloads) { return builder; } - public T tokenized(boolean tokenized) { - this.fieldType.setTokenized(tokenized); - return builder; - } - public T boost(float boost) { this.fieldType.setBoost(boost); return builder; @@ -400,9 +395,8 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, boolean indexed = fieldType().indexOptions() != IndexOptions.NONE; boolean defaultIndexed = defaultFieldType.indexOptions() != IndexOptions.NONE; - if (includeDefaults || indexed != defaultIndexed || - fieldType().tokenized() != defaultFieldType.tokenized()) { - builder.field("index", indexTokenizeOption(indexed, fieldType().tokenized())); + if (includeDefaults || indexed != defaultIndexed) { + builder.field("index", indexed); } if (includeDefaults || fieldType().stored() != defaultFieldType.stored()) { builder.field("store", fieldType().stored()); @@ -498,11 +492,6 @@ public static String termVectorOptionsToString(FieldType fieldType) { } } - /* Only protected so that string can override it */ - protected Object indexTokenizeOption(boolean indexed, boolean tokenized) { - return indexed; - } - protected abstract String contentType(); public static class MultiFields { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 571c49be11bb6..e21a8a0f29791 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -168,7 +168,7 @@ public void checkCompatibility(MappedFieldType other, List conflicts, bo boolean indexed = indexOptions() != IndexOptions.NONE; boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE; // TODO: should be validating if index options go "up" (but "down" is ok) - if (indexed != mergeWithIndexed || tokenized() != other.tokenized()) { + if (indexed != mergeWithIndexed) { conflicts.add("mapper [" + name() + "] has different [index] values"); } if (stored() != other.stored()) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java index 59c3825acc643..b80c9e17b48ad 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java @@ -120,7 +120,7 @@ public void testBuildThenParse() throws Exception { DocumentMapper builderDocMapper = new DocumentMapper.Builder(new RootObjectMapper.Builder("person").add( new TextFieldMapper.Builder("name").store(true) - .addMultiField(new TextFieldMapper.Builder("indexed").index(true).tokenized(true)) + .addMultiField(new TextFieldMapper.Builder("indexed").index(true)) .addMultiField(new TextFieldMapper.Builder("not_indexed").index(false).store(true)) ), indexService.mapperService()).build(indexService.mapperService()); From 778ab99d83b9a098f64069a62ee92d1612674efb Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 8 Jun 2018 09:24:09 -0700 Subject: [PATCH 15/24] Remove DocumentFieldMappers#smartNameFieldMapper, as it is no longer needed. (#31018) (cherry picked from commit 8f607071b6e34990d4e42f235d014d02cd680e8b) --- .../index/mapper/TokenCountFieldMapperTests.java | 4 ++-- .../get/TransportGetFieldMappingsIndexAction.java | 2 +- .../elasticsearch/index/get/ShardGetService.java | 2 +- .../index/mapper/DocumentFieldMappers.java | 13 ------------- .../index/search/QueryParserHelper.java | 2 +- .../index/mapper/BinaryFieldMapperTests.java | 4 ++-- .../index/mapper/DocumentMapperMergeTests.java | 8 ++++---- .../index/mapper/DoubleIndexingDocTests.java | 14 +++++++------- .../index/mapper/DynamicMappingTests.java | 14 +++++++------- .../index/mapper/DynamicTemplatesTests.java | 8 ++++---- 10 files changed, 29 insertions(+), 42 deletions(-) diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java index 503eaab3d8d0e..128829c5253e4 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java @@ -79,9 +79,9 @@ public void testMerge() throws IOException { new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); // previous mapper has not been modified - assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); + assertThat(((TokenCountFieldMapper) stage1.mappers().getMapper("tc")).analyzer(), equalTo("keyword")); // but the new one has the change - assertThat(((TokenCountFieldMapper) stage2.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard")); + assertThat(((TokenCountFieldMapper) stage2.mappers().getMapper("tc")).analyzer(), equalTo("standard")); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index e9551e6e69d01..a09b00b59bf12 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -187,7 +187,7 @@ private static Map findFieldMappingsByType(Predica } } else { // not a pattern - FieldMapper fieldMapper = allFieldMappers.smartNameFieldMapper(field); + FieldMapper fieldMapper = allFieldMappers.getMapper(field); if (fieldMapper != null) { addFieldMapper(fieldPredicate, field, fieldMapper, fieldMappings, request.includeDefaults()); } else if (request.probablySingleFieldRequest()) { diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index a6c8dbf53b395..41b52154c2455 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -218,7 +218,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] if (gFields != null && gFields.length > 0) { for (String field : gFields) { - FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field); + FieldMapper fieldMapper = docMapper.mappers().getMapper(field); if (fieldMapper == null) { if (docMapper.objectMappers().get(field) != null) { // Only fail if we know it is a object field, missing paths / fields shouldn't fail. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java index 2a19cb3f8bd40..ea242aca68f44 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java @@ -80,19 +80,6 @@ public Collection simpleMatchToFullName(String pattern) { return fields; } - public FieldMapper smartNameFieldMapper(String name) { - FieldMapper fieldMapper = getMapper(name); - if (fieldMapper != null) { - return fieldMapper; - } - for (FieldMapper otherFieldMapper : this) { - if (otherFieldMapper.fieldType().name().equals(name)) { - return otherFieldMapper; - } - } - return null; - } - /** * A smart analyzer used for indexing that takes into account specific analyzers configured * per {@link FieldMapper}. diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index 0c48898dda7a1..995dec4a3c1bd 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -90,7 +90,7 @@ public static Map parseFieldsAndWeights(List fields) { */ public static FieldMapper getFieldMapper(MapperService mapperService, String field) { for (DocumentMapper mapper : mapperService.docMappers(true)) { - FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper(field); + FieldMapper fieldMapper = mapper.mappers().getMapper(field); if (fieldMapper != null) { return fieldMapper; } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java index e4cd5731daafa..6e9cb6c0b5980 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java @@ -59,7 +59,7 @@ public void testDefaultMapping() throws Exception { DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field"); + FieldMapper fieldMapper = mapper.mappers().getMapper("field"); assertThat(fieldMapper, instanceOf(BinaryFieldMapper.class)); assertThat(fieldMapper.fieldType().stored(), equalTo(false)); } @@ -94,7 +94,7 @@ public void testStoredValue() throws IOException { XContentType.JSON)); BytesRef indexedValue = doc.rootDoc().getBinaryValue("field"); assertEquals(new BytesRef(value), indexedValue); - FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field"); + FieldMapper fieldMapper = mapper.mappers().getMapper("field"); Object originalValue = fieldMapper.fieldType().valueForDisplay(indexedValue); assertEquals(new BytesArray(value), originalValue); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java index ae2432301b27a..4b1d05cbbefed 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java @@ -56,11 +56,11 @@ public void test1Merge() throws Exception { DocumentMapper merged = stage1.merge(stage2.mapping(), false); // stage1 mapping should not have been modified - assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue()); - assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue()); + assertThat(stage1.mappers().getMapper("age"), nullValue()); + assertThat(stage1.mappers().getMapper("obj1.prop1"), nullValue()); // but merged should - assertThat(merged.mappers().smartNameFieldMapper("age"), notNullValue()); - assertThat(merged.mappers().smartNameFieldMapper("obj1.prop1"), notNullValue()); + assertThat(merged.mappers().getMapper("age"), notNullValue()); + assertThat(merged.mappers().getMapper("obj1.prop1"), notNullValue()); } public void testMergeObjectDynamic() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java index 6c83f31f93fe6..c50320900923c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java @@ -69,25 +69,25 @@ public void testDoubleIndexingSameDoc() throws Exception { IndexReader reader = DirectoryReader.open(writer); IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field1").fieldType().termQuery("value1", context), 10); + TopDocs topDocs = searcher.search(mapper.mappers().getMapper("field1").fieldType().termQuery("value1", context), 10); assertThat(topDocs.totalHits, equalTo(2L)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field2").fieldType().termQuery("1", context), 10); + topDocs = searcher.search(mapper.mappers().getMapper("field2").fieldType().termQuery("1", context), 10); assertThat(topDocs.totalHits, equalTo(2L)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field3").fieldType().termQuery("1.1", context), 10); + topDocs = searcher.search(mapper.mappers().getMapper("field3").fieldType().termQuery("1.1", context), 10); assertThat(topDocs.totalHits, equalTo(2L)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field4").fieldType().termQuery("2010-01-01", context), 10); + topDocs = searcher.search(mapper.mappers().getMapper("field4").fieldType().termQuery("2010-01-01", context), 10); assertThat(topDocs.totalHits, equalTo(2L)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("1", context), 10); + topDocs = searcher.search(mapper.mappers().getMapper("field5").fieldType().termQuery("1", context), 10); assertThat(topDocs.totalHits, equalTo(2L)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("2", context), 10); + topDocs = searcher.search(mapper.mappers().getMapper("field5").fieldType().termQuery("2", context), 10); assertThat(topDocs.totalHits, equalTo(2L)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("3", context), 10); + topDocs = searcher.search(mapper.mappers().getMapper("field5").fieldType().termQuery("3", context), 10); assertThat(topDocs.totalHits, equalTo(2L)); writer.close(); reader.close(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 4a2876d1708bd..2b5b9406d27ee 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -670,10 +670,10 @@ public void testNumericDetectionEnabled() throws Exception { .setSource(doc.dynamicMappingsUpdate().toString(), XContentType.JSON).get(); defaultMapper = index.mapperService().documentMapper("type"); - FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("s_long"); + FieldMapper mapper = defaultMapper.mappers().getMapper("s_long"); assertThat(mapper.fieldType().typeName(), equalTo("long")); - mapper = defaultMapper.mappers().smartNameFieldMapper("s_double"); + mapper = defaultMapper.mappers().getMapper("s_double"); assertThat(mapper.fieldType().typeName(), equalTo("float")); } @@ -697,10 +697,10 @@ public void testNumericDetectionDefault() throws Exception { .setSource(doc.dynamicMappingsUpdate().toString(), XContentType.JSON).get()); defaultMapper = index.mapperService().documentMapper("type"); - FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("s_long"); + FieldMapper mapper = defaultMapper.mappers().getMapper("s_long"); assertThat(mapper, instanceOf(TextFieldMapper.class)); - mapper = defaultMapper.mappers().smartNameFieldMapper("s_double"); + mapper = defaultMapper.mappers().getMapper("s_double"); assertThat(mapper, instanceOf(TextFieldMapper.class)); } @@ -748,9 +748,9 @@ public void testDateDetectionInheritsFormat() throws Exception { defaultMapper = index.mapperService().documentMapper("type"); - DateFieldMapper dateMapper1 = (DateFieldMapper) defaultMapper.mappers().smartNameFieldMapper("date1"); - DateFieldMapper dateMapper2 = (DateFieldMapper) defaultMapper.mappers().smartNameFieldMapper("date2"); - DateFieldMapper dateMapper3 = (DateFieldMapper) defaultMapper.mappers().smartNameFieldMapper("date3"); + DateFieldMapper dateMapper1 = (DateFieldMapper) defaultMapper.mappers().getMapper("date1"); + DateFieldMapper dateMapper2 = (DateFieldMapper) defaultMapper.mappers().getMapper("date2"); + DateFieldMapper dateMapper3 = (DateFieldMapper) defaultMapper.mappers().getMapper("date3"); // inherited from dynamic date format assertEquals("yyyy-MM-dd", dateMapper1.fieldType().dateTimeFormatter().format()); // inherited from dynamic date format since the mapping in the template did not specify a format diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 64927103e6d1d..d8e8c8e0e3da5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -56,11 +56,11 @@ public void testMatchTypeOnly() throws Exception { docMapper = index.mapperService().documentMapper("person"); DocumentFieldMappers mappers = docMapper.mappers(); - assertThat(mappers.smartNameFieldMapper("s"), Matchers.notNullValue()); - assertEquals(IndexOptions.NONE, mappers.smartNameFieldMapper("s").fieldType().indexOptions()); + assertThat(mappers.getMapper("s"), Matchers.notNullValue()); + assertEquals(IndexOptions.NONE, mappers.getMapper("s").fieldType().indexOptions()); - assertThat(mappers.smartNameFieldMapper("l"), Matchers.notNullValue()); - assertNotSame(IndexOptions.NONE, mappers.smartNameFieldMapper("l").fieldType().indexOptions()); + assertThat(mappers.getMapper("l"), Matchers.notNullValue()); + assertNotSame(IndexOptions.NONE, mappers.getMapper("l").fieldType().indexOptions()); } From 5844adcd3e60ae13af06a438470396022441f582 Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 8 Jun 2018 12:30:49 -0700 Subject: [PATCH 16/24] [DOCS] Splits release notes by major version --- docs/reference/release-notes.asciidoc | 6 +- .../{6.4.0.asciidoc => 6.0.asciidoc} | 1668 ----------------- docs/reference/release-notes/6.1.asciidoc | 794 ++++++++ docs/reference/release-notes/6.2.asciidoc | 812 ++++++++ docs/reference/release-notes/6.3.asciidoc | 113 ++ docs/reference/release-notes/6.4.asciidoc | 89 + 6 files changed, 1813 insertions(+), 1669 deletions(-) rename docs/reference/release-notes/{6.4.0.asciidoc => 6.0.asciidoc} (75%) create mode 100644 docs/reference/release-notes/6.1.asciidoc create mode 100644 docs/reference/release-notes/6.2.asciidoc create mode 100644 docs/reference/release-notes/6.3.asciidoc create mode 100644 docs/reference/release-notes/6.4.asciidoc diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 4c5d79599afca..361bf01ffbd45 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -30,4 +30,8 @@ This section summarizes the changes in each release. -- -include::release-notes/6.4.0.asciidoc[] +include::release-notes/6.4.asciidoc[] +include::release-notes/6.3.asciidoc[] +include::release-notes/6.2.asciidoc[] +include::release-notes/6.1.asciidoc[] +include::release-notes/6.0.asciidoc[] diff --git a/docs/reference/release-notes/6.4.0.asciidoc b/docs/reference/release-notes/6.0.asciidoc similarity index 75% rename from docs/reference/release-notes/6.4.0.asciidoc rename to docs/reference/release-notes/6.0.asciidoc index 4b999e039cf49..ac385b8c30174 100644 --- a/docs/reference/release-notes/6.4.0.asciidoc +++ b/docs/reference/release-notes/6.0.asciidoc @@ -34,1674 +34,6 @@ //=== Known Issues //// -[[release-notes-6.4.0]] -== {es} version 6.4.0 - -coming[6.4.0] - -//[float] -//[[breaking-6.4.0]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -[float] -=== New Features - -The new <> field allows to know which fields -got ignored at index time because of the <> -option. ({pull}30140[#29658]) - -A new analysis plugin called `analysis_nori` that exposes the Lucene Korean -analysis module. ({pull}30397[#30397]) - -[float] -=== Enhancements - -{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255]) - -Geo:: -* Add validation that geohashes are not empty and don't contain unsupported characters ({pull}30376[#30376]) - -Rollup:: -* Validate timezone in range queries to ensure they match the selected job when -searching ({pull}30338[#30338]) - -[float] -=== Bug Fixes - -Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310]) - -Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) - -Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) - -Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.3.1]] -== Elasticsearch version 6.3.1 - -coming[6.3.1] - -//[float] -[[breaking-6.3.1]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Reduce the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180]) - -Respect accept header on requests with no handler ({pull}30383[#30383]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.3.0]] -== {es} version 6.3.0 - -coming[6.3.0] - -[float] -[[breaking-6.3.0]] -=== Breaking Changes - -[float] -=== Deprecations -Monitoring:: -* By default when you install {xpack}, monitoring is enabled but data collection -is disabled. To enable data collection, use the new -`xpack.monitoring.collection.enabled` setting. You can update this setting by -using the <>. For more -information, see <>. - -Security:: -* The legacy `XPackExtension` extension mechanism has been removed and replaced -with an SPI based extension mechanism that is installed and built as an -elasticsearch plugin. - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -//[float] -//=== Bug Fixes - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.2.4]] -== {es} version 6.2.4 - -//[float] -//[[breaking-6.2.4]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Engine:: -* Harden periodically check to avoid endless flush loop {pull}29125[#29125] (issues: {issue}28350[#28350], {issue}29097[#29097]) - -Ingest:: -* Don't allow referencing the pattern bank name in the pattern bank {pull}29295[#29295] (issue: {issue}29257[#29257]) - -[float] -=== Regressions -Fail snapshot operations early when creating or deleting a snapshot on a repository that has been -written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) - -Java High Level REST Client:: -* Bulk processor#awaitClose to close scheduler {pull}29263[#29263] -Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) -Do not fail snapshot when deleting a missing snapshotted file ({pull}30332[#30332]) - -Java Low Level REST Client:: -* REST client: hosts marked dead for the first time should not be immediately retried {pull}29230[#29230] - -Machine Learning:: -* Prevents failed jobs from mistakenly re-opening after node loss recovery. -* Returns an error when an operation cannot be submitted because the process was -killed. -* Respects the datafeed frequency when it is less or equal to the -`query_delay` setting. - -Network:: -* Cross-cluster search and default connections can get crossed [OPEN] [ISSUE] {pull}29321[#29321] - -Percolator:: -* Fixed bug when non percolator docs end up in the search hits {pull}29447[#29447] (issue: {issue}29429[#29429]) -* Fixed a msm accounting error that can occur during analyzing a percolator query {pull}29415[#29415] (issue: {issue}29393[#29393]) -* Fix more query extraction bugs. {pull}29388[#29388] (issues: {issue}28353[#28353], {issue}29376[#29376]) -* Fix some query extraction bugs. {pull}29283[#29283] - -Plugins:: -* Plugins: Fix native controller confirmation for non-meta plugin {pull}29434[#29434] - -Search:: -* Propagate ignore_unmapped to inner_hits {pull}29261[#29261] (issue: {issue}29071[#29071]) - -Security/Authentication:: -* Adds missing `idp.use_single_logout` and `populate_user_metadata` SAML realm -settings. See <>. - -Settings:: -* Archive unknown or invalid settings on updates {pull}28888[#28888] (issue: {issue}28609[#28609]) - -Watcher:: -* Re-enables `smtp.*` account configuration properties in the notification -settings. See <>. -* Ensures starting and stopping {watcher} is properly acknowledged as a master -node action. -* Refrains from appending a question mark to an HTTP request if no parameters -are used. - -//[float] -//=== Known Issues - -[[release-notes-6.2.3]] -== {es} version 6.2.3 - -//[float] -//[[breaking-6.2.3]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -[float] -=== Deprecations - -Deprecated multi-argument versions of the request methods in the RestClient. -Prefer the "Request" object flavored methods. ({pull}30315[#30315]) - -//[float] -//=== New Features - -A new analysis plugin called `analysis_nori` that exposes the Lucene Korean -analysis module. ({pull}30397[#30397]) - -[float] -=== Enhancements - -Highlighting:: -* Limit analyzed text for highlighting (improvements) {pull}28808[#28808] (issues: {issue}16764[#16764], {issue}27934[#27934]) -{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255]) -{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow -copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404]) - -Added new "Request" object flavored request methods in the RestClient. Prefer -these instead of the multi-argument versions. ({pull}29623[#29623]) - -Recovery:: -* Require translogUUID when reading global checkpoint {pull}28587[#28587] (issue: {issue}28435[#28435]) -Added `setJsonEntity` to `Request` object so it is marginally easier to send JSON. ({pull}30447[#30447]) -Watcher HTTP client used in watches now allows more parallel connections to the -same endpoint and evicts long running connections. ({pull}30130[#30130]) - -The cluster state listener to decide if watcher should be -stopped/started/paused now runs far less code in an executor but is more -synchronous and predictable. Also the trigger engine thread is only started on -data nodes. And the Execute Watch API can be triggered regardless is watcher is -started or stopped. ({pull}30118[#30118]) - -Added put index template API to the high level rest client ({pull}30400[#30400]) - -Add ability to filter coordinating-only nodes when interacting with cluster -APIs. ({pull}30313[#30313]) - -[float] -=== Bug Fixes - -Core:: -* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) - -Engine:: -* Avoid class cast exception from index writer {pull}28989[#28989] -* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) -* Never block on key in `LiveVersionMap#pruneTombstones` {pull}28736[#28736] (issue: {issue}28714[#28714]) - -Ingest:: -* Continue registering pipelines after one pipeline parse failure. {pull}28752[#28752] (issue: {issue}28269[#28269]) - -Java High Level REST Client:: -* REST high-level client: encode path parts {pull}28663[#28663] (issue: {issue}28625[#28625]) - -Machine Learning:: -* Fixed the <> such that it -returns only machine learning-specific node attributes. - -Monitoring:: -* Aligned reporting of index statistics that exist in the current cluster state. -This fix avoids subtle race conditions in stats reporting. - -Packaging:: -* Delay path expansion on Windows {pull}28753[#28753] (issues: {issue}27675[#27675], {issue}28748[#28748]) - -Percolator:: -* Fix percolator query analysis for function_score query {pull}28854[#28854] -* Improved percolator's random candidate query duel test {pull}28840[#28840] - -Security:: -* Fixed handling of comments in XML documents [ESA-2018-07]. -* Fixed auditing such that when you use a local audit index, it maintains the -mappings automatically. Maintenance is necessary, for example, when new fields -are introduced or document types change. -* Added and changed settings for the SAML NameID policy. For example, added the -`nameid.allow_create` setting and changed the default value for -the SPNameQualifier setting to blank. See {stack-ov}/saml-realm.html[SAML Authentication]. -* Fixed handling of an Assertion Consumer Service (ACS) URL with existing query -parameters. See {stack-ov}/saml-realm.html[SAML Authentication]. -* Fixed the PKI realm bootstrap check such that it works with secure settings. -For more information, see <>. - -Snapshot/Restore:: -* Fix NPE when using deprecated Azure settings {pull}28769[#28769] (issues: {issue}23518[#23518], {issue}28299[#28299]) - -Stats:: -* Fix AdaptiveSelectionStats serialization bug {pull}28718[#28718] (issue: {issue}28713[#28713]) - -Watcher:: -* Fixed the serialization of failed hipchat messages, such that it no longer -tries to write the status field twice. -* Fixed TransformInput toXContent serialization errors. For more information, -see -{stack-ov}/input-chain.html#_transforming_chained_input_data[Transforming Chained Input Data]. - - -Allocation:: - -Auto-expand replicas when adding or removing nodes to prevent shard copies from -being dropped and resynced when a data node rejoins the cluster ({pull}30423[#30423]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - - -[[release-notes-6.2.2]] -== {es} version 6.2.2 - -//[float] -//[[breaking-6.2.2]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -[float] -=== Enhancements - -Recovery:: -* Synced-flush should not seal index of out of sync replicas {pull}28464[#28464] (issue: {issue}10032[#10032]) - -[float] -=== Bug Fixes - -Core:: -* Handle throws on tasks submitted to thread pools {pull}28667[#28667] -* Fix size blocking queue to not lie about its weight {pull}28557[#28557] (issue: {issue}28547[#28547]) - -Ingest:: -* Guard accessDeclaredMembers for Tika on JDK 10 {pull}28603[#28603] (issue: {issue}28602[#28602]) -* Fix for bug that prevents pipelines to load that use stored scripts after a restart {pull}28588[#28588] - -Java High Level REST Client:: -* Fix parsing of script fields {pull}28395[#28395] (issue: {issue}28380[#28380]) -* Move to POST when calling API to retrieve which support request body {pull}28342[#28342] (issue: {issue}28326[#28326]) - -Machine Learning:: -* Fixed an exception that occurred when a categorization field contained an -empty string. - -Monitoring:: -* Properly registered `xpack.monitoring.exporters.*.headers.*` settings, which -were broken in 6.2.0 and 6.2.1. For more information, see -<>. - -Packaging:: -* Fix using relative custom config path {pull}28700[#28700] (issue: {issue}27610[#27610]) -* Disable console logging in the Windows service {pull}28618[#28618] (issue: {issue}20422[#20422]) - -Percolator:: -* Do not take duplicate query extractions into account for minimum_should_match attribute {pull}28353[#28353] (issue: {issue}28315[#28315]) - -Recovery:: -* Fsync directory after cleanup {pull}28604[#28604] (issue: {issue}28435[#28435]) - -Security:: -* Added CachingRealm to published artifacts so it can be used in custom realm -extensions. -* If the realm uses native role mappings and the security index health changes, -the realm caches are cleared. For example, they are cleared when the index -recovers from a red state, when the index is deleted, when the index becomes -outdated, and when the index becomes up-to-date. -* Fixed a bug that could prevent auditing to a remote index if the remote -cluster was re-started at the same time as the audited cluster. -* Removed AuthorityKeyIdentifier's Issuer and Serial number from certificates -generated by `certgen` and `certutil`. This improves compatibility with -certificate verification in {kib}. - -Watcher:: -* Proxies now use HTTP by default, which was the default prior to 6.0. This -fixes issues with HTTPS requests that tried to access proxies via HTTP. -* Fixed the HTML sanitizer settings -(`xpack.notification.email.html.sanitization.*`), which were broken in 6.2. For -more information, see <>. - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.2.1]] -== {es} version 6.2.1 - -//[float] -//[[breaking-6.2.1]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements -The cluster state listener to decide if watcher should be -stopped/started/paused now runs far less code in an executor but is more -synchronous and predictable. Also the trigger engine thread is only started on -data nodes. And the Execute Watch API can be triggered regardless is watcher is -started or stopped. ({pull}30118[#30118]) - -[float] -=== Bug Fixes - -Plugin Lang Painless:: -* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) - -Plugins:: -* Fix the ability to remove old plugin {pull}28540[#28540] (issue: {issue}28538[#28538]) - -Security:: -* Fixed missing dependencies for x-pack-transport. -* Fixed `saml-metadata` env file such that it sources the appropriate -environment file. - -Machine Learning:: - -* Account for gaps in data counts after job is reopened ({pull}30294[#30294]) - -[[release-notes-6.2.0]] -== {es} version 6.2.0 - -[float] -[[breaking-6.2.0]] -=== Breaking Changes - -Aggregations:: -* Add a new cluster setting to limit the total number of buckets returned by a request {pull}27581[#27581] (issues: {issue}26012[#26012], {issue}27452[#27452]) - -Core:: -* Forbid granting the all permission in production {pull}27548[#27548] - -Highlighting:: -* Limit the analyzed text for highlighting {pull}27934[#27934] (issue: {issue}27517[#27517]) - -Rollover:: -* Fail rollover if duplicated alias found in templates {pull}28110[#28110] (issue: {issue}26976[#26976]) - -Search:: -* Introduce limit to the number of terms in Terms Query {pull}27968[#27968] (issue: {issue}18829[#18829]) - -[float] -=== Breaking Java Changes - -Java API:: -* Remove `operationThreaded` from Java API {pull}27836[#27836] - -Java High Level REST Client:: -* REST high-level client: remove index suffix from indices client method names {pull}28263[#28263] - -[float] -=== Deprecations - -Analysis:: -* Backport delimited payload filter renaming {pull}27535[#27535] (issue: {issue}26625[#26625]) - -Suggesters:: -* deprecating `jarowinkler` in favor of `jaro_winkler` {pull}27526[#27526] -* Deprecating `levenstein` in favor of `levensHtein` {pull}27409[#27409] (issue: {issue}27325[#27325]) - -[float] -=== New Features - -Machine Learning:: -* Added the ability to identify scheduled events and prevent anomaly detection -during these periods. For more information, see -{stack-ov}/ml-calendars.html[Calendars and Scheduled Events]. - -Plugin Ingest GeoIp:: -* Enable ASN support for Ingest GeoIP plugin. {pull}27958[#27958] (issue: {issue}27849[#27849]) - -Plugin Lang Painless:: -* Painless: Add spi jar that will be published for extending whitelists {pull}28302[#28302] -* Painless: Add a simple cache for whitelist methods and fields. {pull}28142[#28142] - -Plugins:: -* Add the ability to bundle multiple plugins into a meta plugin {pull}28022[#28022] (issue: {issue}27316[#27316]) - -Rank Evaluation:: -* Backport of ranking evaluation API (#27478) {pull}27844[#27844] (issue: {issue}27478[#27478]) - -Recovery:: -* Backport for using lastSyncedGlobalCheckpoint in deletion policy {pull}27866[#27866] (issue: {issue}27826[#27826]) - -Reindex API:: -* Add scroll parameter to _reindex API {pull}28041[#28041] (issue: {issue}27555[#27555]) - -Security:: -* {security} now supports user authentication using SAML Single Sign on. For -more information, see {stack-ov}/saml-realm.html[SAML authentication]. - -Watcher:: -* Added a transform input for chained input. For more information, see -{stack-ov}/input-chain.html#_transforming_chained_input_data[Transforming Chained Input Data]. - -[float] -=== Enhancements - -Allocation:: -* Fix cluster.routing.allocation.enable and cluster.routing.rebalance.enable case {pull}28037[#28037] (issue: {issue}28007[#28007]) -* Add node id to shard failure message {pull}28024[#28024] (issue: {issue}28018[#28018]) - -Analysis:: -* Limit the analyzed text for highlighting (#27934) {pull}28176[#28176] (issue: {issue}27517[#27517]) -* Allow TrimFilter to be used in custom normalizers {pull}27758[#27758] (issue: {issue}27310[#27310]) - -Circuit Breakers:: -* Add accounting circuit breaker and track segment memory usage {pull}27116[#27116] (issue: {issue}27044[#27044]) - -Cluster:: -* Adds wait_for_no_initializing_shards to cluster health API {pull}27489[#27489] (issue: {issue}25623[#25623]) - -Core:: -* Introduce elasticsearch-core jar {pull}28191[#28191] (issue: {issue}27933[#27933]) -* Rename core module to server {pull}28190[#28190] (issue: {issue}27933[#27933]) -* Rename core module to server {pull}28180[#28180] (issue: {issue}27933[#27933]) -* Introduce elasticsearch-core jar {pull}28178[#28178] (issue: {issue}27933[#27933]) -* Add Writeable.Reader support to TransportResponseHandler {pull}28010[#28010] (issue: {issue}26315[#26315]) -* Simplify rejected execution exception {pull}27664[#27664] (issue: {issue}27663[#27663]) -* Add node name to thread pool executor name {pull}27663[#27663] (issues: {issue}26007[#26007], {issue}26835[#26835]) - -Discovery:: -* Add information when master node left to DiscoveryNodes' shortSummary() {pull}28197[#28197] (issue: {issue}28169[#28169]) - -Engine:: -* Move uid lock into LiveVersionMap {pull}27905[#27905] -* Optimize version map for append-only indexing {pull}27752[#27752] - -Geo:: -* [GEO] Add WKT Support to GeoBoundingBoxQueryBuilder {pull}27692[#27692] (issues: {issue}27690[#27690], {issue}9120[#9120]) -* [Geo] Add Well Known Text (WKT) Parsing Support to ShapeBuilders {pull}27417[#27417] (issue: {issue}9120[#9120]) - -Highlighting:: -* Include all sentences smaller than fragment_size in the unified highlighter {pull}28132[#28132] (issue: {issue}28089[#28089]) - -Ingest:: -* Enable convert processor to support Long and Double {pull}27891[#27891] (issues: {issue}23085[#23085], {issue}23423[#23423]) - -Internal:: -* Make KeyedLock reentrant {pull}27920[#27920] -* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) -* Tighten the CountedBitSet class {pull}27632[#27632] -* Avoid doing redundant work when checking for self references. {pull}26927[#26927] (issue: {issue}26907[#26907]) - -Java API:: -* Add missing delegate methods to NodeIndicesStats {pull}28092[#28092] -* Java api clean-up : consistency for `shards_acknowledged` getters {pull}27819[#27819] (issue: {issue}27784[#27784]) - -Java High Level REST Client:: -* add toString implementation for UpdateRequest. {pull}27997[#27997] (issue: {issue}27986[#27986]) -* Add Close Index API to the high level REST client {pull}27734[#27734] (issue: {issue}27205[#27205]) -* Add Open Index API to the high level REST client {pull}27574[#27574] (issue: {issue}27205[#27205]) -* Added Create Index support to high-level REST client {pull}27351[#27351] (issue: {issue}27205[#27205]) -* Add multi get api to the high level rest client {pull}27337[#27337] (issue: {issue}27205[#27205]) -* Add msearch api to high level client {pull}27274[#27274] - -Machine Learning:: -* Increased tokenization flexibility for categorization. Now all {es} analyzer -functionality is available, which opens up the possibility of sensibly -categorizing non-English log messages. For more information, see {stack-ov}/ml-configuring-categories.html#ml-configuring-analyzer[Customizing the Categorization Analyzer]. -* Improved the sensitivity of the analysis to high variance data with lots of -values near zero. -* Improved the decay rate of the model memory by using a weighted moving average. -* Machine learning indices created after upgrading to 6.2 have the -`auto_expand_replicas: 0-1` setting rather than a fixed setting of 1 replica. -As a result, {ml} indices created after upgrading to 6.2 can have a green -status on single node clusters. There is no impact in multi-node clusters. -* Changed the credentials that are used by {dfeeds}. When {security} is enabled, -a {dfeed} stores the roles of the user who created or updated the {dfeed} -**at that time**. This means that if those roles are updated, the {dfeed} -subsequently runs with the new permissions that are associated with the roles. -However, if the user's roles are adjusted after creating or updating the {dfeed} -then the {dfeed} continues to run with the permissions that are associated with -the original roles. For more information, see -{stack-ov}/ml-dfeeds.html[Datafeeds]. -* Added a new `scheduled` forecast status, which indicates that the forecast -has not started yet. - -Mapping:: -* Allow `_doc` as a type. {pull}27816[#27816] (issues: {issue}27750[#27750], {issue}27751[#27751]) - -Monitoring:: -* {monitoring} indices (`.monitoring`) created after upgrading to 6.2 have the -`auto_expand_replicas: 0-1` setting rather than a fixed setting of 1 replica. -As a result, monitoring indices created after upgrading to 6.2 can have a green -status on single node clusters. There is no impact in multi-node clusters. -* Added a cluster alert that triggers whenever a node is added, removed, or -restarted. - -Network:: -* Add NioGroup for use in different transports {pull}27737[#27737] (issue: {issue}27260[#27260]) -* Add read timeouts to http module {pull}27713[#27713] -* Implement byte array reusage in `NioTransport` {pull}27696[#27696] (issue: {issue}27563[#27563]) -* Introduce resizable inbound byte buffer {pull}27551[#27551] (issue: {issue}27563[#27563]) -* Decouple nio constructs from the tcp transport {pull}27484[#27484] (issue: {issue}27260[#27260]) - -Packaging:: -* Extend JVM options to support multiple versions {pull}27675[#27675] (issue: {issue}27646[#27646]) -* Add explicit coreutils dependency {pull}27660[#27660] (issue: {issue}27609[#27609]) -* Detect mktemp from coreutils {pull}27659[#27659] (issues: {issue}27609[#27609], {issue}27643[#27643]) -* Enable GC logs by default {pull}27610[#27610] -* Use private directory for temporary files {pull}27609[#27609] (issues: {issue}14372[#14372], {issue}27144[#27144]) - -Percolator:: -* also extract match_all queries when indexing percolator queries {pull}27585[#27585] - -Plugin Lang Painless:: -* Painless: Add whitelist extensions {pull}28161[#28161] -* Painless: Modify Loader to Load Classes Directly from Definition {pull}28088[#28088] -* Clean Up Painless Cast Object {pull}27794[#27794] -* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] - -Plugins:: -* Add client actions to action plugin {pull}28280[#28280] (issue: {issue}27759[#27759]) -* Plugins: Add validation to plugin descriptor parsing {pull}27951[#27951] -* Plugins: Add plugin extension capabilities {pull}27881[#27881] -* Add support for filtering mappings fields {pull}27603[#27603] - -Rank Evaluation:: -* Simplify RankEvalResponse output {pull}28266[#28266] - -Recovery:: -* Truncate tlog cli should assign global checkpoint {pull}28192[#28192] (issue: {issue}28181[#28181]) -* Replica starts peer recovery with safe commit {pull}28181[#28181] (issue: {issue}10708[#10708]) -* Primary send safe commit in file-based recovery {pull}28038[#28038] (issue: {issue}10708[#10708]) -* Fail resync-failed shards in subsequent writes {pull}28005[#28005] -* Introduce promoting index shard state {pull}28004[#28004] (issue: {issue}24841[#24841]) -* Non-peer recovery should set the global checkpoint {pull}27965[#27965] -* Persist global checkpoint when finalizing a peer recovery {pull}27947[#27947] (issue: {issue}27861[#27861]) -* Rollback a primary before recovering from translog {pull}27804[#27804] (issue: {issue}10708[#10708]) - -Search:: -* Use typeName() to check field type in GeoShapeQueryBuilder {pull}27730[#27730] -* Optimize search_after when sorting in index sort order {pull}26401[#26401] - -Security:: -* Added the ability to refresh tokens that were created by the token API. The -API provides information about a refresh token, which you can use within 24 -hours of its creation to extend the life of a token. For more information, see -<>. -* Added principal and role information to `access_granted`, `access_denied`, -`run_as_granted`, and `run_as_denied` audit events. For more information about -these events, see {stack-ov}/auditing.html[Auditing Security Events]. -* Added audit event ignore policies, which are a way to tune the verbosity of an -audit trail. These policies define rules for ignoring audit events that match -specific attribute values. For more information, see -{stack-ov}/audit-log-output.html#audit-log-ignore-policy[Logfile Audit Events Ignore Policies]. -* Added a certificates API, which enables you to retrieve information about the -X.509 certificates that are used to encrypt communications in your {es} cluster. -For more information, see <>. - -Sequence IDs:: -* Do not keep 5.x commits when having 6.x commits {pull}28188[#28188] (issues: {issue}27606[#27606], {issue}28038[#28038]) -* Use lastSyncedGlobalCheckpoint in deletion policy {pull}27826[#27826] (issue: {issue}27606[#27606]) -* Use CountedBitSet in LocalCheckpointTracker {pull}27793[#27793] -* Only fsync global checkpoint if needed {pull}27652[#27652] -* Keep commits and translog up to the global checkpoint {pull}27606[#27606] -* Adjust CombinedDeletionPolicy for multiple commits {pull}27456[#27456] (issues: {issue}10708[#10708], {issue}27367[#27367]) -* Keeps index commits up to the current global checkpoint {pull}27367[#27367] (issue: {issue}10708[#10708]) -* Dedup translog operations by reading in reverse {pull}27268[#27268] (issue: {issue}10708[#10708]) - -Settings:: -* Add validation of keystore setting names {pull}27626[#27626] - -Snapshot/Restore:: -* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] -* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] -* Include include_global_state in Snapshot status API (#22423) {pull}26853[#26853] (issue: {issue}22423[#22423]) - -Task Manager:: -* Add ability to associate an ID with tasks {pull}27764[#27764] (issue: {issue}23250[#23250]) - -Translog:: -* Simplify MultiSnapshot#SeqNoset {pull}27547[#27547] (issue: {issue}27268[#27268]) -* Enclose CombinedDeletionPolicy in SnapshotDeletionPolicy {pull}27528[#27528] (issues: {issue}27367[#27367], {issue}27456[#27456]) - -Watcher:: -* Added the ability to set the `index` and `doc_type` dynamically in an index -action. For more information, see {stack-ov}/actions-index.html[Index Action]. -* Added a `refresh` index action attribute, which enables you to set the -refresh policy of the write request. For more information, see -{stack-ov}/actions-index.html[Index Action]. -* Added support for actions in slack attachments, which enables you to add -buttons that can be clicked in slack messages. For more information, see -{stack-ov}/actions-slack.html[Slack Action]. -* {watcher} indices (`.watch*` and `triggered_watches`) created after upgrading -to 6.2 have the `auto_expand_replicas: 0-1` setting rather than a fixed setting -of 1 replica. As a result, {watcher} indices created after upgrading to 6.2 can -have a green status on single node clusters. There is no impact in multi-node -clusters. - -[float] -=== Bug Fixes - -Aggregations:: -* Adds metadata to rewritten aggregations {pull}28185[#28185] (issue: {issue}28170[#28170]) -* Fix NPE on composite aggregation with sub-aggregations that need scores {pull}28129[#28129] -* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) -* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) -* Fix global aggregation that requires breadth first and scores {pull}27942[#27942] (issues: {issue}22321[#22321], {issue}27928[#27928]) -* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] -* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) -* Using DocValueFormat::parseBytesRef for parsing missing value parameter {pull}27855[#27855] (issue: {issue}27788[#27788]) -* Fix illegal cast of the "low cardinality" optimization of the `terms` aggregation. {pull}27543[#27543] -* Always include the _index and _id for nested search hits. {pull}27201[#27201] (issue: {issue}27053[#27053]) - -Allocation:: -* Do not open indices with broken settings {pull}26995[#26995] - -Core:: -* Fix lock accounting in releasable lock {pull}28202[#28202] -* Fixes ByteSizeValue to serialise correctly {pull}27702[#27702] (issue: {issue}27568[#27568]) -* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) -* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] -* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) - -Engine:: -* Replica recovery could go into an endless flushing loop {pull}28350[#28350] -* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) -* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) -* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) - -Geo:: -* Correct two equality checks on incomparable types {pull}27688[#27688] -* Handle case where the hole vertex is south of the containing polygon(s) {pull}27685[#27685] (issue: {issue}25933[#25933]) - -Highlighting:: -* Fix highlighting on a keyword field that defines a normalizer {pull}27604[#27604] - -Inner Hits:: -* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) - -Internal:: -* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) -* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) -* Retain originalIndex info when rewriting FieldCapabilities requests {pull}27761[#27761] - -Java REST Client:: -* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) - -Machine Learning:: -* Improved error reporting for crashes and resource problems on Linux. -* Improved the detection of seasonal trends in bucket spans longer than 1 hour. -* Updated the forecast API to wait for validation and return an error if the -validation fails. -* Set the actual bucket value to 0 in model plots for empty buckets for count -and sum functions. The count and sum functions treat empty buckets as 0 rather -than unknown for anomaly detection, so it was inconsistent not to do the same -for model plots. This inconsistency resulted in problems plotting these buckets -in {kib}. - -Mapping:: -* Ignore null value for range field (#27845) {pull}28116[#28116] (issue: {issue}27845[#27845]) -* Pass `java.locale.providers=COMPAT` to Java 9 onwards {pull}28080[#28080] (issue: {issue}10984[#10984]) -* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) -* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) - -Network:: -* Only bind loopback addresses when binding to local {pull}28029[#28029] (issue: {issue}1877[#1877]) -* Remove potential nio selector leak {pull}27825[#27825] -* Fix issue where the incorrect buffers are written {pull}27695[#27695] (issue: {issue}27551[#27551]) -* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) -* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) - -Packaging:: -* Allow custom service names when installing on windows {pull}25255[#25255] (issue: {issue}25231[#25231]) - -Percolator:: -* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) - -Plugin Analysis ICU:: -* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] - -Plugin Analysis Phonetic:: -* Fix daitch_mokotoff phonetic filter to use the dedicated Lucene filter {pull}28225[#28225] (issue: {issue}28211[#28211]) - -Plugin Lang Painless:: -* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) -* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) - -Plugin Repository HDFS:: -* Fix SecurityException when HDFS Repository used against HA Namenodes {pull}27196[#27196] - -Plugins:: -* Make sure that we don't detect files as maven coordinate when installing a plugin {pull}28163[#28163] -* Fix upgrading indices which use a custom similarity plugin. {pull}26985[#26985] (issue: {issue}25350[#25350]) - -Recovery:: -* Open engine should keep only starting commit {pull}28228[#28228] (issues: {issue}27804[#27804], {issue}28181[#28181]) -* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) -* Set global checkpoint before open engine from store {pull}27972[#27972] (issues: {issue}27965[#27965], {issue}27970[#27970]) -* Check and repair index under the store metadata lock {pull}27768[#27768] (issues: {issue}24481[#24481], {issue}24787[#24787], {issue}27731[#27731]) -* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) - -Rollover:: -* Make index rollover action atomic {pull}28039[#28039] (issue: {issue}26976[#26976]) - -Scripting:: -* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] - -Scroll:: -* Reject scroll query if size is 0 (#22552) {pull}27842[#27842] (issue: {issue}22552[#22552]) -* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] - -Search:: -* Fix simple_query_string on invalid input {pull}28219[#28219] (issue: {issue}28204[#28204]) -* Use the underlying connection version for CCS connections {pull}28093[#28093] -* Fix synonym phrase query expansion for cross_fields parsing {pull}28045[#28045] -* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) -* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) - -Security:: -* Updated the `setup-passwords` command to generate passwords with characters -`A-Z`, `a-z`, and `0-9`, so that they are safe to use in shell scripts. For more -information about this command, see <>. -* Improved the error messages that occur if the `x-pack` directory is missing -when you run <>. -* Fixed the ordering of realms in a realm chain, which determines the order in -which the realms are consulted. For more information, see -{stack-ov}/realms.html[Realms]. - -Sequence IDs:: -* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] -* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) -* Obey translog durability in global checkpoint sync {pull}27641[#27641] - -Settings:: -* Settings: Introduce settings updater for a list of settings {pull}28338[#28338] (issue: {issue}28047[#28047]) -* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) -* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) -* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) - -Snapshot/Restore:: -* Consistent updates of IndexShardSnapshotStatus {pull}28130[#28130] (issue: {issue}26480[#26480]) -* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) -* Do not start snapshots that are deleted during initialization {pull}27931[#27931] -* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] -* Consistent update of stage and failure message in IndexShardSnapshotStatus {pull}27557[#27557] (issue: {issue}26480[#26480]) -* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) -* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) - -Stats:: -* Fixes DocStats to properly deal with shards that report -1 index size {pull}27863[#27863] -* Include internal refreshes in refresh stats {pull}27615[#27615] - -Term Vectors:: -* Fix term vectors generator with keyword and normalizer {pull}27608[#27608] (issue: {issue}27320[#27320]) - -Watcher:: -* Replaced group settings with affix key settings where filters are needed. -For more information, see https://github.com/elastic/elasticsearch/pull/28338. - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[float] -=== Upgrades - -Core:: -* Dependencies: Update joda time to 2.9.9 {pull}28261[#28261] -* upgrade to lucene 7.2.1 {pull}28218[#28218] (issue: {issue}28044[#28044]) -* Upgrade jna from 4.4.0-1 to 4.5.1 {pull}28183[#28183] (issue: {issue}28172[#28172]) - -Ingest:: -* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] - -[[release-notes-6.1.4]] -== {es} version 6.1.4 - -//[float] -//[[breaking-6.1.4]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -[float] -=== Enhancements - -Core:: -* Fix classes that can exit {pull}27518[#27518] - -[float] -=== Bug Fixes - -Aggregations:: -* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) - -Core:: -* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) - -Engine:: -* Avoid class cast exception from index writer {pull}28989[#28989] -* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) - -Scripting:: -* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.1.3]] -== {es} version 6.1.3 - -//[float] -//[[breaking-6.1.3]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Engine:: -* Replica recovery could go into an endless flushing loop {pull}28350[#28350] - -Internal:: -* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) -* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) - -Mapping:: -* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) - -Scripting:: -* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] - -Security:: -* Improved cache expiry handling in the token service. Previously, if the token -service was idle for more than 60 minutes, the key expired and the service -failed to generate user tokens. - -Settings:: -* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) -* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) - -Snapshot/Restore:: -* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) -* Do not start snapshots that are deleted during initialization {pull}27931[#27931] - -Watcher:: -* Fixed a null pointer exception in the TemplateRegistry when there is no master -node available. -* Ensured collections obtained from scripts are protected from self-referencing. -See https://github.com/elastic/elasticsearch/pull/28335. - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.1.2]] -== {es} version 6.1.2 - -//[float] -//[[breaking-6.1.2]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -[float] -=== Enhancements - -Internal:: -* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) - -Added new "Request" object flavored request methods. Prefer these instead of the -multi-argument versions. ({pull}29623[#29623]) - - -[float] -=== Bug Fixes - -Aggregations:: -* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) -* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] -* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) - -Engine:: -* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) -* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) - -Machine Learning:: -* Fixed the removal of tokens during categorization, where the tokens were -incorrectly deemed to be hexadecimal numbers. For more information, see -{stack-ov}/ml-configuring-categories.html[Categorizing log messages]. -* Reduced the sensitivity of the analysis to small perturbations in the input -data. -* Disabled the ability to create forecasts for jobs that were created before -6.1.0. - -Monitoring:: -* Added a `cluster_alerts.management.blacklist` setting for HTTP Exporters, -which you can use to block the creation of specific cluster alerts. For more -information, see <>. - -Network:: -* Only bind loopback addresses when binding to local {pull}28029[#28029] - -Recovery:: -* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) - -Search:: -* Use the underlying connection version for CCS connections {pull}28093[#28093] -* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) - -Security:: -* Fixed an issue in the Active Directory realm when following referrals that -resulted in an increase in the number of connections made to Active Directory. -* Fixed exception that occurred when using auditing and transport clients. In -particular, the problem occurred when the number of processors on the transport -client did not match the number of processors on the server. -* Ensured that TLS is not required to install a license if you are using -single-node discovery. For more information, see <> and -{stack-ov}/ssl-tls.html[Setting up TLS on a Cluster]. -* Fixed the <>. In particular, the -`has_all_requested` field in the API results was not taking cluster privileges -into consideration. - -Snapshot/Restore:: -* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) - -Translog:: -* Only sync translog when global checkpoint increased {pull}27973[#27973] (issues: {issue}27837[#27837], {issue}27970[#27970]) - -Watcher:: -* Fixed encoding of UTF-8 data in the HTTP client. - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.1.1]] -== {es} version 6.1.1 - -//[float] -//[[breaking-6.1.1]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -[float] -=== Enhancements - -Snapshot/Restore:: -* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] - -Watcher:: -* Ensured the watcher thread pool size is reasonably bound. In particular, the -watcher thread pool size is now five times the number of processors until 50 -threads are reached. If more than 50 cores exist and 50 threads exist, the -watch thread pool size grows to match the number of processors. - -[float] -=== Bug Fixes - -Inner Hits:: -* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) - -Java REST Client:: -* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) - -Monitoring:: -* Data collectors now all share the same cluster state that existed at the -beginning of data collection. This removes the extremely rare race condition -where the cluster state can change between some data collectors, which could -cause temporary issues in the Monitoring UI. - -Search:: -* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) - -Sequence IDs:: -* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] -* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) - -Watcher:: -* Fixed the pagerduty action to send context data. For more information, see -{stack-ov}/actions-pagerduty.html[PagerDuty Action]. - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[float] -=== Upgrades - -Ingest:: -* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] - -[[release-notes-6.1.0]] -== {es} version 6.1.0 - -[float] -[[breaking-6.1.0]] -=== Breaking Changes - -Network:: -* Allow only a fixed-size receive predictor {pull}26165[#26165] (issue: {issue}23185[#23185]) - -REST:: -* Standardize underscore requirements in parameters {pull}27414[#27414] (issues: {issue}26886[#26886], {issue}27040[#27040]) - -Scroll:: -* Fail queries with scroll that explicitely set request_cache {pull}27342[#27342] - -Search:: -* Add a limit to from + size in top_hits and inner hits. {pull}26492[#26492] (issue: {issue}11511[#11511]) - -Security:: -* The `certgen` command now returns validation errors when it encounters problems -reading from an input file (with the `-in` command option). Previously these -errors might have been ignored or caused the command to abort with unclear -messages. For more information, see <>. - -[float] -=== Breaking Java Changes - -Aggregations:: -* Moves deferring code into its own subclass {pull}26421[#26421] - -Core:: -* Unify Settings xcontent reading and writing {pull}26739[#26739] - -Settings:: -* Return List instead of an array from settings {pull}26903[#26903] -* Remove `Settings,put(Map)` {pull}26785[#26785] - -[float] -=== Deprecations - -Aggregations:: -* Deprecate global_ordinals_hash and global_ordinals_low_cardinality {pull}26173[#26173] (issue: {issue}26014[#26014]) - -Allocation:: -* Add deprecation warning for negative index.unassigned.node_left.delayed_timeout {pull}26832[#26832] (issue: {issue}26828[#26828]) - -Analysis:: -* Add limits for ngram and shingle settings {pull}27411[#27411] (issues: {issue}25887[#25887], {issue}27211[#27211]) - -Geo:: -* [GEO] 6x Deprecate ShapeBuilders and decouple geojson parse logic {pull}27345[#27345] - -Mapping:: -* Deprecate the `index_options` parameter for numeric fields {pull}26672[#26672] (issue: {issue}21475[#21475]) - -Plugin Repository Azure:: -* Azure repository: Move to named configurations as we do for S3 repository and secure settings {pull}23405[#23405] (issues: {issue}22762[#22762], {issue}22763[#22763]) - -Search:: -* doc: deprecate _primary and _replica shard option {pull}26792[#26792] (issue: {issue}26335[#26335]) - -[float] -=== New Features - -Aggregations:: -* Aggregations: bucket_sort pipeline aggregation {pull}27152[#27152] (issue: {issue}14928[#14928]) -* Add composite aggregator {pull}26800[#26800] - -Analysis:: -* Added Bengali Analyzer to Elasticsearch with respect to the lucene update {pull}26527[#26527] - -Ingest:: -* add URL-Decode Processor to Ingest {pull}26045[#26045] (issue: {issue}25837[#25837]) - -Java High Level REST Client:: -* Added Delete Index support to high-level REST client {pull}27019[#27019] (issue: {issue}25847[#25847]) - -Machine Learning:: -* Added the ability to create job forecasts. This feature enables you to use -historical behavior to predict the future behavior of your time series. You can -create forecasts in {kib} or by using the <> API. -+ --- -NOTE: You cannot create forecasts for jobs that were created in previous -versions; this functionality is available only for jobs created in 6.1 or later. - --- -* Added overall buckets, which summarize bucket results for multiple jobs. -For more information, see the <> API. -* Added job groups, which you can use to manage or retrieve information from -multiple jobs at once. Also updated many {ml} APIs to support groups and -wildcard expressions in the job identifier. - -Nested Docs:: -* Multi-level Nested Sort with Filters {pull}26395[#26395] - -Query DSL:: -* Add terms_set query {pull}27145[#27145] (issue: {issue}26915[#26915]) -* Introduce sorted_after query for sorted index {pull}26377[#26377] -* Add support for auto_generate_synonyms_phrase_query in match_query, multi_match_query, query_string and simple_query_string {pull}26097[#26097] - -Search:: -* Expose `fuzzy_transpositions` parameter in fuzzy queries {pull}26870[#26870] (issue: {issue}18348[#18348]) -* Add upper limit for scroll expiry {pull}26448[#26448] (issues: {issue}11511[#11511], {issue}23268[#23268]) -* Implement adaptive replica selection {pull}26128[#26128] (issue: {issue}24915[#24915]) -* configure distance limit {pull}25731[#25731] (issue: {issue}25528[#25528]) - -Similarities:: -* Add a scripted similarity. {pull}25831[#25831] - -Suggesters:: -* Expose duplicate removal in the completion suggester {pull}26496[#26496] (issue: {issue}23364[#23364]) -* Support must and should for context query in context suggester {pull}26407[#26407] (issues: {issue}24421[#24421], {issue}24565[#24565]) - -[float] -=== Enhancements - -Aggregations:: -* Allow aggregation sorting via nested aggregation {pull}26683[#26683] (issue: {issue}16838[#16838]) - -Allocation:: -* Tie-break shard path decision based on total number of shards on path {pull}27039[#27039] (issue: {issue}26654[#26654]) -* Balance shards for an index more evenly across multiple data paths {pull}26654[#26654] (issue: {issue}16763[#16763]) -* Expand "NO" decision message in NodeVersionAllocationDecider {pull}26542[#26542] (issue: {issue}10403[#10403]) -* _reroute's retry_failed flag should reset failure counter {pull}25888[#25888] (issue: {issue}25291[#25291]) - -Analysis:: -* Add configurable `max_token_length` parameter to whitespace tokenizer {pull}26749[#26749] (issue: {issue}26643[#26643]) - -CRUD:: -* Add wait_for_active_shards parameter to index open command {pull}26682[#26682] (issue: {issue}20937[#20937]) - -Core:: -* Fix classes that can exit {pull}27518[#27518] -* Replace empty index block checks with global block checks in template delete/put actions {pull}27050[#27050] (issue: {issue}10530[#10530]) -* Allow Uid#decodeId to decode from a byte array slice {pull}26987[#26987] (issue: {issue}26931[#26931]) -* Use separate searchers for "search visibility" vs "move indexing buffer to disk {pull}26972[#26972] (issues: {issue}15768[#15768], {issue}26802[#26802], {issue}26912[#26912], {issue}3593[#3593]) -* Add ability to split shards {pull}26931[#26931] -* Make circuit breaker mutations debuggable {pull}26067[#26067] (issue: {issue}25891[#25891]) - -Dates:: -* DateProcessor Locale {pull}26186[#26186] (issue: {issue}25513[#25513]) - -Discovery:: -* Stop responding to ping requests before master abdication {pull}27329[#27329] (issue: {issue}27328[#27328]) - -Engine:: -* Ensure external refreshes will also refresh internal searcher to minimize segment creation {pull}27253[#27253] (issue: {issue}26972[#26972]) -* Move IndexShard#getWritingBytes() under InternalEngine {pull}27209[#27209] (issue: {issue}26972[#26972]) -* Refactor internal engine {pull}27082[#27082] - -Geo:: -* Add ignore_malformed to geo_shape fields {pull}24654[#24654] (issue: {issue}23747[#23747]) - -Ingest:: -* add json-processor support for non-map json types {pull}27335[#27335] (issue: {issue}25972[#25972]) -* Introduce templating support to timezone/locale in DateProcessor {pull}27089[#27089] (issue: {issue}24024[#24024]) -* Add support for parsing inline script (#23824) {pull}26846[#26846] (issue: {issue}23824[#23824]) -* Consolidate locale parsing. {pull}26400[#26400] -* Accept ingest simulate params as ints or strings {pull}23885[#23885] (issue: {issue}23823[#23823]) - -Internal:: -* Avoid uid creation in ParsedDocument {pull}27241[#27241] -* Upgrade to Lucene 7.1.0 snapshot version {pull}26864[#26864] (issue: {issue}26527[#26527]) -* Remove `_index` fielddata hack if cluster alias is present {pull}26082[#26082] (issue: {issue}25885[#25885]) - -Java High Level REST Client:: -* Adjust RestHighLevelClient method modifiers {pull}27238[#27238] -* Decouple BulkProcessor from ThreadPool {pull}26727[#26727] (issue: {issue}26028[#26028]) - -Logging:: -* Add more information on _failed_to_convert_ exception (#21946) {pull}27034[#27034] (issue: {issue}21946[#21946]) -* Improve shard-failed log messages. {pull}26866[#26866] - -Machine Learning:: -* Improved the way {ml} jobs are allocated to nodes, such that it is primarily -determined by the estimated memory requirement of the job. If there is insufficient -information about the job's memory requirements, the allocation decision is based -on job counts per node. -* Increased the default value of the `xpack.ml.max_open_jobs` setting from `10` -to `20`. The allocation of jobs to nodes now considers memory usage as well as -job counts, so it's reasonable to permit more small jobs on a single node. For -more information, see <>. -* Decreased the default `model_memory_limit` property value to 1 GB for new jobs. -If you want to create a job that analyzes high cardinality fields, you can -increase this property value. For more information, see <>. -* Improved analytics related to decay rates when predictions are very accurate. -* Improved analytics related to detecting non-negative quantities and using this -information to constrain analysis, predictions, and confidence intervals. -* Improved periodic trough or spike detection. -* Improved the speed of the aggregation of {ml} results. -* Improved probability calculation performance. -* Expedited bucket processing time in very large populations by determining when -there are nearly duplicate values in a bucket and de-duplicating the samples that -are added to the model. -* Improved handling of periodically missing values. -* Improved analytics related to diurnal periodicity. -* Reduced memory usage during population analysis by releasing redundant memory -after the bucket results are written. -* Improved modeling of long periodic components, particularly when there is a -long bucket span. - -Mapping:: -* Allow ip_range to accept CIDR notation {pull}27192[#27192] (issue: {issue}26260[#26260]) -* Deduplicate `_field_names`. {pull}26550[#26550] -* Throw a better error message for empty field names {pull}26543[#26543] (issue: {issue}23348[#23348]) -* Stricter validation for min/max values for whole numbers {pull}26137[#26137] -* Make FieldMapper.copyTo() always non-null. {pull}25994[#25994] - -Monitoring:: -* Added the new `interval_ms` field to monitoring documents. This field -indicates the current collection interval for {es} or external monitored systems. - -Nested Docs:: -* Use the primary_term field to identify parent documents {pull}27469[#27469] (issue: {issue}24362[#24362]) -* Prohibit using `nested_filter`, `nested_path` and new `nested` Option at the same time in FieldSortBuilder {pull}26490[#26490] (issue: {issue}17286[#17286]) - -Network:: -* Remove manual tracking of registered channels {pull}27445[#27445] (issue: {issue}27260[#27260]) -* Remove tcp profile from low level nio channel {pull}27441[#27441] (issue: {issue}27260[#27260]) -* Decouple `ChannelFactory` from Tcp classes {pull}27286[#27286] (issue: {issue}27260[#27260]) - -Percolator:: -* Use Lucene's CoveringQuery to select percolate candidate matches {pull}27271[#27271] (issues: {issue}26081[#26081], {issue}26307[#26307]) -* Add support to percolate query to percolate multiple documents simultaneously {pull}26418[#26418] -* Hint what clauses are important in a conjunction query based on fields {pull}26081[#26081] -* Add support for selecting percolator query candidate matches containing range queries {pull}25647[#25647] (issue: {issue}21040[#21040]) - -Plugin Discovery EC2:: -* update AWS SDK for ECS Task IAM support in discovery-ec2 {pull}26479[#26479] (issue: {issue}23039[#23039]) - -Plugin Lang Painless:: -* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] -* Allow for the Painless Definition to have multiple instances for white-listing {pull}27096[#27096] -* Separate Painless Whitelist Loading from the Painless Definition {pull}26540[#26540] -* Remove Sort enum from Painless Definition {pull}26179[#26179] - -Plugin Repository Azure:: -* Add azure storage endpoint suffix #26432 {pull}26568[#26568] (issue: {issue}26432[#26432]) -* Support for accessing Azure repositories through a proxy {pull}23518[#23518] (issues: {issue}23506[#23506], {issue}23517[#23517]) - -Plugin Repository S3:: -* Remove S3 output stream {pull}27280[#27280] (issue: {issue}27278[#27278]) -* Update to AWS SDK 1.11.223 {pull}27278[#27278] - -Plugins:: -* Plugins: Add versionless alias to all security policy codebase properties {pull}26756[#26756] (issue: {issue}26521[#26521]) -* Allow plugins to plug rescore implementations {pull}26368[#26368] (issue: {issue}26208[#26208]) - -Query DSL:: -* Add support for wildcard on `_index` {pull}27334[#27334] (issue: {issue}25722[#25722]) - -Reindex API:: -* Update by Query is modified to accept short `script` parameter. {pull}26841[#26841] (issue: {issue}24898[#24898]) -* reindex: automatically choose the number of slices {pull}26030[#26030] (issues: {issue}24547[#24547], {issue}25582[#25582]) - -Rollover:: -* Add size-based condition to the index rollover API {pull}27160[#27160] (issue: {issue}27004[#27004]) -* Add size-based condition to the index rollover API {pull}27115[#27115] (issue: {issue}27004[#27004]) - -Scripting:: -* Script: Convert script query to a dedicated script context {pull}26003[#26003] - -Search:: -* Make fields optional in multi_match query and rely on index.query.default_field by default {pull}27380[#27380] -* fix unnecessary logger creation {pull}27349[#27349] -* `ObjectParser` : replace `IllegalStateException` with `ParsingException` {pull}27302[#27302] (issue: {issue}27147[#27147]) -* Uses norms for exists query if enabled {pull}27237[#27237] -* Cross Cluster Search: make remote clusters optional {pull}27182[#27182] (issues: {issue}26118[#26118], {issue}27161[#27161]) -* Enhances exists queries to reduce need for `_field_names` {pull}26930[#26930] (issue: {issue}26770[#26770]) -* Change ParentFieldSubFetchPhase to create doc values iterator once per segment {pull}26815[#26815] -* Change VersionFetchSubPhase to create doc values iterator once per segment {pull}26809[#26809] -* Change ScriptFieldsFetchSubPhase to create search scripts once per segment {pull}26808[#26808] (issue: {issue}26775[#26775]) -* Make sure SortBuilders rewrite inner nested sorts {pull}26532[#26532] -* Extend testing of build method in ScriptSortBuilder {pull}26520[#26520] (issues: {issue}17286[#17286], {issue}26490[#26490]) -* Accept an array of field names and boosts in the index.query.default_field setting {pull}26320[#26320] (issue: {issue}25946[#25946]) -* Reject IPv6-mapped IPv4 addresses when using the CIDR notation. {pull}26254[#26254] (issue: {issue}26078[#26078]) -* Rewrite range queries with open bounds to exists query {pull}26160[#26160] (issue: {issue}22640[#22640]) - -Security:: -* Added the `manage_index_templates` cluster privilege to the built-in role -`kibana_system`. For more information, see -{stack-ov}/security-privileges.html#privileges-list-cluster[Cluster Privileges] -and {stack-ov}/built-in-roles.html[Built-in Roles]. -* Newly created or updated watches execute with the privileges of the user that -last modified the watch. -* Added log messages when a PEM key is found when a PEM certificate was -expected (or vice versa) in the `xpack.ssl.key` or `xpack.ssl.certificate` settings. -* Added the new `certutil` command to simplify the creation of certificates for -use with the Elastic stack. For more information, see <>. -* Added automatic detection of support for AES 256 bit TLS ciphers and enabled -their use when the JVM supports them. - -Sequence IDs:: -* Only fsync global checkpoint if needed {pull}27652[#27652] -* Log primary-replica resync failures {pull}27421[#27421] (issues: {issue}24841[#24841], {issue}27418[#27418]) -* Lazy initialize checkpoint tracker bit sets {pull}27179[#27179] (issue: {issue}10708[#10708]) -* Returns the current primary_term for Get/MultiGet requests {pull}27177[#27177] (issue: {issue}26493[#26493]) - -Settings:: -* Allow affix settings to specify dependencies {pull}27161[#27161] -* Represent lists as actual lists inside Settings {pull}26878[#26878] (issue: {issue}26723[#26723]) -* Remove Settings#getAsMap() {pull}26845[#26845] -* Replace group map settings with affix setting {pull}26819[#26819] -* Throw exception if setting isn't recognized {pull}26569[#26569] (issue: {issue}25607[#25607]) -* Settings: Move keystore creation to plugin installation {pull}26329[#26329] (issue: {issue}26309[#26309]) - -Snapshot/Restore:: -* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] -* Snapshot: Migrate TransportRequestHandler to TransportMasterNodeAction {pull}27165[#27165] (issue: {issue}27151[#27151]) -* Fix toString of class SnapshotStatus (#26851) {pull}26852[#26852] (issue: {issue}26851[#26851]) - -Stats:: -* Adds average document size to DocsStats {pull}27117[#27117] (issue: {issue}27004[#27004]) -* Stats to record how often the ClusterState diff mechanism is used successfully {pull}27107[#27107] (issue: {issue}26973[#26973]) -* Expose adaptive replica selection stats in /_nodes/stats API {pull}27090[#27090] -* Add cgroup memory usage/limit to OS stats on Linux {pull}26166[#26166] -* Add segment attributes to the `_segments` API. {pull}26157[#26157] (issue: {issue}26130[#26130]) - -Suggesters:: -* Improve error message for parse failures of completion fields {pull}27297[#27297] -* Support 'AND' operation for context query in context suggester {pull}24565[#24565] (issue: {issue}24421[#24421]) - -Watcher:: -* Improved error messages when there are no accounts configured for {watcher}. -* Added thread pool rejection information to execution state, which makes it -easier to debug execution failures. -* Added execution state information to watch status details. It is stored in the -`status.execution_state` field. -* Enabled the account monitoring `url` field in the `xpack.notification.jira` -setting to support customized paths. For more information about configuring Jira -accounts for use with watches, see -{stack-ov}/actions-jira.html[Jira Action]. -* Improved handling of exceptions in {watcher} to make it easier to debug -problems. - -[float] -=== Bug Fixes - -Aggregations:: -* Disable the "low cardinality" optimization of terms aggregations. {pull}27545[#27545] (issue: {issue}27543[#27543]) -* scripted_metric _agg parameter disappears if params are provided {pull}27159[#27159] (issues: {issue}19768[#19768], {issue}19863[#19863]) - -Cluster:: -* Properly format IndexGraveyard deletion date as date {pull}27362[#27362] -* Remove optimisations to reuse objects when applying a new `ClusterState` {pull}27317[#27317] - -Core:: -* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) -* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] -* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) -* Protect shard splitting from illegal target shards {pull}27468[#27468] (issue: {issue}26931[#26931]) -* Avoid NPE when getting build information {pull}27442[#27442] -* Fix `ShardSplittingQuery` to respect nested documents. {pull}27398[#27398] (issue: {issue}27378[#27378]) -* When building Settings do not set SecureSettings if empty {pull}26988[#26988] (issue: {issue}316[#316]) - -Engine:: -* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) -* Carry over version map size to prevent excessive resizing {pull}27516[#27516] (issue: {issue}20498[#20498]) - -Geo:: -* Correct two equality checks on incomparable types {pull}27688[#27688] -* [GEO] fix pointsOnly bug for MULTIPOINT {pull}27415[#27415] - -Index Templates:: -* Prevent constructing an index template without index patterns {pull}27662[#27662] - -Ingest:: -* Add pipeline support for REST API bulk upsert {pull}27075[#27075] (issue: {issue}25601[#25601]) -* Fixing Grok pattern for Apache 2.4 {pull}26635[#26635] - -Inner Hits:: -* Return an empty _source for nested inner hit when filtering on a field that doesn't exist {pull}27531[#27531] - -Internal:: -* When checking if key exists in ThreadContextStruct:putHeaders() method,should put requestHeaders in map first {pull}26068[#26068] -* Adding a refresh listener to a recovering shard should be a noop {pull}26055[#26055] - -Java High Level REST Client:: -* Register ip_range aggregation with the high level client {pull}26383[#26383] -* add top hits as a parsed aggregation to the rest high level client {pull}26370[#26370] - -Machine Learning:: -* Improved handling of scenarios where there are insufficient values to -interpolate trend components. -* Improved calculation of confidence intervals. -* Fixed degrees of freedom calculation that could lead to excessive error logging. -* Improved trend modeling with long bucket spans. -* Fixed timing of when model size statistics are written. Previously, if there -were multiple partitions, there could be multiple model size stats docs written -within the same bucket. -* Updated the calculation of the model memory to include the memory used by -partition, over, by, or influencer fields. -* Fixed calculation of the `frequency` property value for {dfeeds} that use -aggregations. The value must be a multiple of the histogram interval. For more -information, see -{stack-ov}/ml-configuring-aggregation.html[Aggregating Data for Faster Performance]. -* Removed unnecessary messages from logs when a job is forcefully closed. - -Mapping:: -* Fix dynamic mapping update generation. {pull}27467[#27467] -* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) -* Fixed rounding of bounds in scaled float comparison {pull}27207[#27207] (issue: {issue}27189[#27189]) - -Nested Docs:: -* Ensure nested documents have consistent version and seq_ids {pull}27455[#27455] -* Prevent duplicate fields when mixing parent and root nested includes {pull}27072[#27072] (issue: {issue}26990[#26990]) - -Network:: -* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) -* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) -* Do not set SO_LINGER on server channels {pull}26997[#26997] -* Do not set SO_LINGER to 0 when not shutting down {pull}26871[#26871] (issue: {issue}26764[#26764]) -* Close TcpTransport on RST in some Spots to Prevent Leaking TIME_WAIT Sockets {pull}26764[#26764] (issue: {issue}26701[#26701]) - -Packaging:: -* Removes minimum master nodes default number {pull}26803[#26803] -* setgid on /etc/elasticearch on package install {pull}26412[#26412] (issue: {issue}26410[#26410]) - -Percolator:: -* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) - -Plugin Analysis ICU:: -* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] - -Plugin Lang Painless:: -* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) -* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) - -Plugin Repository GCS:: -* Create new handlers for every new request in GoogleCloudStorageService {pull}27339[#27339] (issue: {issue}27092[#27092]) - -Recovery:: -* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) - -Reindex API:: -* Reindex: Fix headers in reindex action {pull}26937[#26937] (issue: {issue}22976[#22976]) - -Scroll:: -* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] - -Search:: -* Fix profiling naming issues {pull}27133[#27133] -* Fix max score tracking with field collapsing {pull}27122[#27122] (issue: {issue}23840[#23840]) -* Apply missing request options to the expand phase {pull}27118[#27118] (issues: {issue}26649[#26649], {issue}27079[#27079]) -* Calculate and cache result when advanceExact is called {pull}26920[#26920] (issue: {issue}26817[#26817]) -* Filter unsupported relation for RangeQueryBuilder {pull}26620[#26620] (issue: {issue}26575[#26575]) -* Handle leniency for phrase query on a field indexed without positions {pull}26388[#26388] - -Security:: -* Fixed REST requests that required a body but did not validate it, resulting in -null pointer exceptions. - -Sequence IDs:: -* Obey translog durability in global checkpoint sync {pull}27641[#27641] -* Fix resync request serialization {pull}27418[#27418] (issue: {issue}24841[#24841]) - -Settings:: -* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) - -Snapshot/Restore:: -* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] -* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) -* Fix snapshot getting stuck in INIT state {pull}27214[#27214] (issue: {issue}27180[#27180]) -* Fix default value of ignore_unavailable for snapshot REST API (#25359) {pull}27056[#27056] (issue: {issue}25359[#25359]) -* Do not create directory on readonly repository (#21495) {pull}26909[#26909] (issue: {issue}21495[#21495]) - -Stats:: -* Include internal refreshes in refresh stats {pull}27615[#27615] -* Make Segment statistics aware of segments hold by internal readers {pull}27558[#27558] -* Ensure `doc_stats` are changing even if refresh is disabled {pull}27505[#27505] - -Watcher:: -* Fixed handling of watcher templates. Missing watcher templates can be added by -any node if that node has a higher version than the master node. - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[float] -=== Upgrades - -Core:: -* Upgrade to Jackson 2.8.10 {pull}27230[#27230] -* Upgrade to Lucene 7.1 {pull}27225[#27225] - -Plugin Discovery EC2:: -* Upgrade AWS SDK Jackson Databind to 2.6.7.1 {pull}27361[#27361] (issues: {issue}27278[#27278], {issue}27359[#27359]) - -Plugin Discovery GCE:: -* Update Google SDK to version 1.23.0 {pull}27381[#27381] (issue: {issue}26636[#26636]) - -Plugin Lang Painless:: -* Upgrade Painless from ANTLR 4.5.1-1 to ANTLR 4.5.3. {pull}27153[#27153] - [[release-notes-6.0.1]] == {es} version 6.0.1 diff --git a/docs/reference/release-notes/6.1.asciidoc b/docs/reference/release-notes/6.1.asciidoc new file mode 100644 index 0000000000000..58421df768421 --- /dev/null +++ b/docs/reference/release-notes/6.1.asciidoc @@ -0,0 +1,794 @@ +//// +// To add a release, copy and paste the following text, uncomment the relevant +// sections, and add a link to the new section in the list of releases at the +// top of the page. Note that release subheads must be floated and sections +// cannot be empty. +// TEMPLATE + +// [[release-notes-n.n.n]] +// == {es} n.n.n + +//[float] +[[breaking-n.n.n]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +//[float] +//=== Bug Fixes + +//[float] +//=== Regressions + +//[float] +//=== Known Issues +//// + +[[release-notes-6.1.4]] +== {es} version 6.1.4 + +//[float] +//[[breaking-6.1.4]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Core:: +* Fix classes that can exit {pull}27518[#27518] + +[float] +=== Bug Fixes + +Aggregations:: +* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) + +Core:: +* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) + +Engine:: +* Avoid class cast exception from index writer {pull}28989[#28989] +* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) + +Scripting:: +* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.1.3]] +== {es} version 6.1.3 + +//[float] +//[[breaking-6.1.3]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +[float] +=== Bug Fixes + +Engine:: +* Replica recovery could go into an endless flushing loop {pull}28350[#28350] + +Internal:: +* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) +* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) + +Mapping:: +* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) + +Scripting:: +* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] + +Security:: +* Improved cache expiry handling in the token service. Previously, if the token +service was idle for more than 60 minutes, the key expired and the service +failed to generate user tokens. + +Settings:: +* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) +* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) + +Snapshot/Restore:: +* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) +* Do not start snapshots that are deleted during initialization {pull}27931[#27931] + +Watcher:: +* Fixed a null pointer exception in the TemplateRegistry when there is no master +node available. +* Ensured collections obtained from scripts are protected from self-referencing. +See https://github.com/elastic/elasticsearch/pull/28335. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.1.2]] +== {es} version 6.1.2 + +//[float] +//[[breaking-6.1.2]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Internal:: +* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) + +Added new "Request" object flavored request methods. Prefer these instead of the +multi-argument versions. ({pull}29623[#29623]) + + +[float] +=== Bug Fixes + +Aggregations:: +* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) +* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] +* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) + +Engine:: +* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) +* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) + +Machine Learning:: +* Fixed the removal of tokens during categorization, where the tokens were +incorrectly deemed to be hexadecimal numbers. For more information, see +{stack-ov}/ml-configuring-categories.html[Categorizing log messages]. +* Reduced the sensitivity of the analysis to small perturbations in the input +data. +* Disabled the ability to create forecasts for jobs that were created before +6.1.0. + +Monitoring:: +* Added a `cluster_alerts.management.blacklist` setting for HTTP Exporters, +which you can use to block the creation of specific cluster alerts. For more +information, see <>. + +Network:: +* Only bind loopback addresses when binding to local {pull}28029[#28029] + +Recovery:: +* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) + +Search:: +* Use the underlying connection version for CCS connections {pull}28093[#28093] +* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) + +Security:: +* Fixed an issue in the Active Directory realm when following referrals that +resulted in an increase in the number of connections made to Active Directory. +* Fixed exception that occurred when using auditing and transport clients. In +particular, the problem occurred when the number of processors on the transport +client did not match the number of processors on the server. +* Ensured that TLS is not required to install a license if you are using +single-node discovery. For more information, see <> and +{stack-ov}/ssl-tls.html[Setting up TLS on a Cluster]. +* Fixed the <>. In particular, the +`has_all_requested` field in the API results was not taking cluster privileges +into consideration. + +Snapshot/Restore:: +* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) + +Translog:: +* Only sync translog when global checkpoint increased {pull}27973[#27973] (issues: {issue}27837[#27837], {issue}27970[#27970]) + +Watcher:: +* Fixed encoding of UTF-8 data in the HTTP client. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.1.1]] +== {es} version 6.1.1 + +//[float] +//[[breaking-6.1.1]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Snapshot/Restore:: +* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] + +Watcher:: +* Ensured the watcher thread pool size is reasonably bound. In particular, the +watcher thread pool size is now five times the number of processors until 50 +threads are reached. If more than 50 cores exist and 50 threads exist, the +watch thread pool size grows to match the number of processors. + +[float] +=== Bug Fixes + +Inner Hits:: +* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) + +Java REST Client:: +* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) + +Monitoring:: +* Data collectors now all share the same cluster state that existed at the +beginning of data collection. This removes the extremely rare race condition +where the cluster state can change between some data collectors, which could +cause temporary issues in the Monitoring UI. + +Search:: +* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) + +Sequence IDs:: +* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] +* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) + +Watcher:: +* Fixed the pagerduty action to send context data. For more information, see +{stack-ov}/actions-pagerduty.html[PagerDuty Action]. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Ingest:: +* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] + +[[release-notes-6.1.0]] +== {es} version 6.1.0 + +[float] +[[breaking-6.1.0]] +=== Breaking Changes + +Network:: +* Allow only a fixed-size receive predictor {pull}26165[#26165] (issue: {issue}23185[#23185]) + +REST:: +* Standardize underscore requirements in parameters {pull}27414[#27414] (issues: {issue}26886[#26886], {issue}27040[#27040]) + +Scroll:: +* Fail queries with scroll that explicitely set request_cache {pull}27342[#27342] + +Search:: +* Add a limit to from + size in top_hits and inner hits. {pull}26492[#26492] (issue: {issue}11511[#11511]) + +Security:: +* The `certgen` command now returns validation errors when it encounters problems +reading from an input file (with the `-in` command option). Previously these +errors might have been ignored or caused the command to abort with unclear +messages. For more information, see <>. + +[float] +=== Breaking Java Changes + +Aggregations:: +* Moves deferring code into its own subclass {pull}26421[#26421] + +Core:: +* Unify Settings xcontent reading and writing {pull}26739[#26739] + +Settings:: +* Return List instead of an array from settings {pull}26903[#26903] +* Remove `Settings,put(Map)` {pull}26785[#26785] + +[float] +=== Deprecations + +Aggregations:: +* Deprecate global_ordinals_hash and global_ordinals_low_cardinality {pull}26173[#26173] (issue: {issue}26014[#26014]) + +Allocation:: +* Add deprecation warning for negative index.unassigned.node_left.delayed_timeout {pull}26832[#26832] (issue: {issue}26828[#26828]) + +Analysis:: +* Add limits for ngram and shingle settings {pull}27411[#27411] (issues: {issue}25887[#25887], {issue}27211[#27211]) + +Geo:: +* [GEO] 6x Deprecate ShapeBuilders and decouple geojson parse logic {pull}27345[#27345] + +Mapping:: +* Deprecate the `index_options` parameter for numeric fields {pull}26672[#26672] (issue: {issue}21475[#21475]) + +Plugin Repository Azure:: +* Azure repository: Move to named configurations as we do for S3 repository and secure settings {pull}23405[#23405] (issues: {issue}22762[#22762], {issue}22763[#22763]) + +Search:: +* doc: deprecate _primary and _replica shard option {pull}26792[#26792] (issue: {issue}26335[#26335]) + +[float] +=== New Features + +Aggregations:: +* Aggregations: bucket_sort pipeline aggregation {pull}27152[#27152] (issue: {issue}14928[#14928]) +* Add composite aggregator {pull}26800[#26800] + +Analysis:: +* Added Bengali Analyzer to Elasticsearch with respect to the lucene update {pull}26527[#26527] + +Ingest:: +* add URL-Decode Processor to Ingest {pull}26045[#26045] (issue: {issue}25837[#25837]) + +Java High Level REST Client:: +* Added Delete Index support to high-level REST client {pull}27019[#27019] (issue: {issue}25847[#25847]) + +Machine Learning:: +* Added the ability to create job forecasts. This feature enables you to use +historical behavior to predict the future behavior of your time series. You can +create forecasts in {kib} or by using the <> API. ++ +-- +NOTE: You cannot create forecasts for jobs that were created in previous +versions; this functionality is available only for jobs created in 6.1 or later. + +-- +* Added overall buckets, which summarize bucket results for multiple jobs. +For more information, see the <> API. +* Added job groups, which you can use to manage or retrieve information from +multiple jobs at once. Also updated many {ml} APIs to support groups and +wildcard expressions in the job identifier. + +Nested Docs:: +* Multi-level Nested Sort with Filters {pull}26395[#26395] + +Query DSL:: +* Add terms_set query {pull}27145[#27145] (issue: {issue}26915[#26915]) +* Introduce sorted_after query for sorted index {pull}26377[#26377] +* Add support for auto_generate_synonyms_phrase_query in match_query, multi_match_query, query_string and simple_query_string {pull}26097[#26097] + +Search:: +* Expose `fuzzy_transpositions` parameter in fuzzy queries {pull}26870[#26870] (issue: {issue}18348[#18348]) +* Add upper limit for scroll expiry {pull}26448[#26448] (issues: {issue}11511[#11511], {issue}23268[#23268]) +* Implement adaptive replica selection {pull}26128[#26128] (issue: {issue}24915[#24915]) +* configure distance limit {pull}25731[#25731] (issue: {issue}25528[#25528]) + +Similarities:: +* Add a scripted similarity. {pull}25831[#25831] + +Suggesters:: +* Expose duplicate removal in the completion suggester {pull}26496[#26496] (issue: {issue}23364[#23364]) +* Support must and should for context query in context suggester {pull}26407[#26407] (issues: {issue}24421[#24421], {issue}24565[#24565]) + +[float] +=== Enhancements + +Aggregations:: +* Allow aggregation sorting via nested aggregation {pull}26683[#26683] (issue: {issue}16838[#16838]) + +Allocation:: +* Tie-break shard path decision based on total number of shards on path {pull}27039[#27039] (issue: {issue}26654[#26654]) +* Balance shards for an index more evenly across multiple data paths {pull}26654[#26654] (issue: {issue}16763[#16763]) +* Expand "NO" decision message in NodeVersionAllocationDecider {pull}26542[#26542] (issue: {issue}10403[#10403]) +* _reroute's retry_failed flag should reset failure counter {pull}25888[#25888] (issue: {issue}25291[#25291]) + +Analysis:: +* Add configurable `max_token_length` parameter to whitespace tokenizer {pull}26749[#26749] (issue: {issue}26643[#26643]) + +CRUD:: +* Add wait_for_active_shards parameter to index open command {pull}26682[#26682] (issue: {issue}20937[#20937]) + +Core:: +* Fix classes that can exit {pull}27518[#27518] +* Replace empty index block checks with global block checks in template delete/put actions {pull}27050[#27050] (issue: {issue}10530[#10530]) +* Allow Uid#decodeId to decode from a byte array slice {pull}26987[#26987] (issue: {issue}26931[#26931]) +* Use separate searchers for "search visibility" vs "move indexing buffer to disk {pull}26972[#26972] (issues: {issue}15768[#15768], {issue}26802[#26802], {issue}26912[#26912], {issue}3593[#3593]) +* Add ability to split shards {pull}26931[#26931] +* Make circuit breaker mutations debuggable {pull}26067[#26067] (issue: {issue}25891[#25891]) + +Dates:: +* DateProcessor Locale {pull}26186[#26186] (issue: {issue}25513[#25513]) + +Discovery:: +* Stop responding to ping requests before master abdication {pull}27329[#27329] (issue: {issue}27328[#27328]) + +Engine:: +* Ensure external refreshes will also refresh internal searcher to minimize segment creation {pull}27253[#27253] (issue: {issue}26972[#26972]) +* Move IndexShard#getWritingBytes() under InternalEngine {pull}27209[#27209] (issue: {issue}26972[#26972]) +* Refactor internal engine {pull}27082[#27082] + +Geo:: +* Add ignore_malformed to geo_shape fields {pull}24654[#24654] (issue: {issue}23747[#23747]) + +Ingest:: +* add json-processor support for non-map json types {pull}27335[#27335] (issue: {issue}25972[#25972]) +* Introduce templating support to timezone/locale in DateProcessor {pull}27089[#27089] (issue: {issue}24024[#24024]) +* Add support for parsing inline script (#23824) {pull}26846[#26846] (issue: {issue}23824[#23824]) +* Consolidate locale parsing. {pull}26400[#26400] +* Accept ingest simulate params as ints or strings {pull}23885[#23885] (issue: {issue}23823[#23823]) + +Internal:: +* Avoid uid creation in ParsedDocument {pull}27241[#27241] +* Upgrade to Lucene 7.1.0 snapshot version {pull}26864[#26864] (issue: {issue}26527[#26527]) +* Remove `_index` fielddata hack if cluster alias is present {pull}26082[#26082] (issue: {issue}25885[#25885]) + +Java High Level REST Client:: +* Adjust RestHighLevelClient method modifiers {pull}27238[#27238] +* Decouple BulkProcessor from ThreadPool {pull}26727[#26727] (issue: {issue}26028[#26028]) + +Logging:: +* Add more information on _failed_to_convert_ exception (#21946) {pull}27034[#27034] (issue: {issue}21946[#21946]) +* Improve shard-failed log messages. {pull}26866[#26866] + +Machine Learning:: +* Improved the way {ml} jobs are allocated to nodes, such that it is primarily +determined by the estimated memory requirement of the job. If there is insufficient +information about the job's memory requirements, the allocation decision is based +on job counts per node. +* Increased the default value of the `xpack.ml.max_open_jobs` setting from `10` +to `20`. The allocation of jobs to nodes now considers memory usage as well as +job counts, so it's reasonable to permit more small jobs on a single node. For +more information, see <>. +* Decreased the default `model_memory_limit` property value to 1 GB for new jobs. +If you want to create a job that analyzes high cardinality fields, you can +increase this property value. For more information, see <>. +* Improved analytics related to decay rates when predictions are very accurate. +* Improved analytics related to detecting non-negative quantities and using this +information to constrain analysis, predictions, and confidence intervals. +* Improved periodic trough or spike detection. +* Improved the speed of the aggregation of {ml} results. +* Improved probability calculation performance. +* Expedited bucket processing time in very large populations by determining when +there are nearly duplicate values in a bucket and de-duplicating the samples that +are added to the model. +* Improved handling of periodically missing values. +* Improved analytics related to diurnal periodicity. +* Reduced memory usage during population analysis by releasing redundant memory +after the bucket results are written. +* Improved modeling of long periodic components, particularly when there is a +long bucket span. + +Mapping:: +* Allow ip_range to accept CIDR notation {pull}27192[#27192] (issue: {issue}26260[#26260]) +* Deduplicate `_field_names`. {pull}26550[#26550] +* Throw a better error message for empty field names {pull}26543[#26543] (issue: {issue}23348[#23348]) +* Stricter validation for min/max values for whole numbers {pull}26137[#26137] +* Make FieldMapper.copyTo() always non-null. {pull}25994[#25994] + +Monitoring:: +* Added the new `interval_ms` field to monitoring documents. This field +indicates the current collection interval for {es} or external monitored systems. + +Nested Docs:: +* Use the primary_term field to identify parent documents {pull}27469[#27469] (issue: {issue}24362[#24362]) +* Prohibit using `nested_filter`, `nested_path` and new `nested` Option at the same time in FieldSortBuilder {pull}26490[#26490] (issue: {issue}17286[#17286]) + +Network:: +* Remove manual tracking of registered channels {pull}27445[#27445] (issue: {issue}27260[#27260]) +* Remove tcp profile from low level nio channel {pull}27441[#27441] (issue: {issue}27260[#27260]) +* Decouple `ChannelFactory` from Tcp classes {pull}27286[#27286] (issue: {issue}27260[#27260]) + +Percolator:: +* Use Lucene's CoveringQuery to select percolate candidate matches {pull}27271[#27271] (issues: {issue}26081[#26081], {issue}26307[#26307]) +* Add support to percolate query to percolate multiple documents simultaneously {pull}26418[#26418] +* Hint what clauses are important in a conjunction query based on fields {pull}26081[#26081] +* Add support for selecting percolator query candidate matches containing range queries {pull}25647[#25647] (issue: {issue}21040[#21040]) + +Plugin Discovery EC2:: +* update AWS SDK for ECS Task IAM support in discovery-ec2 {pull}26479[#26479] (issue: {issue}23039[#23039]) + +Plugin Lang Painless:: +* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] +* Allow for the Painless Definition to have multiple instances for white-listing {pull}27096[#27096] +* Separate Painless Whitelist Loading from the Painless Definition {pull}26540[#26540] +* Remove Sort enum from Painless Definition {pull}26179[#26179] + +Plugin Repository Azure:: +* Add azure storage endpoint suffix #26432 {pull}26568[#26568] (issue: {issue}26432[#26432]) +* Support for accessing Azure repositories through a proxy {pull}23518[#23518] (issues: {issue}23506[#23506], {issue}23517[#23517]) + +Plugin Repository S3:: +* Remove S3 output stream {pull}27280[#27280] (issue: {issue}27278[#27278]) +* Update to AWS SDK 1.11.223 {pull}27278[#27278] + +Plugins:: +* Plugins: Add versionless alias to all security policy codebase properties {pull}26756[#26756] (issue: {issue}26521[#26521]) +* Allow plugins to plug rescore implementations {pull}26368[#26368] (issue: {issue}26208[#26208]) + +Query DSL:: +* Add support for wildcard on `_index` {pull}27334[#27334] (issue: {issue}25722[#25722]) + +Reindex API:: +* Update by Query is modified to accept short `script` parameter. {pull}26841[#26841] (issue: {issue}24898[#24898]) +* reindex: automatically choose the number of slices {pull}26030[#26030] (issues: {issue}24547[#24547], {issue}25582[#25582]) + +Rollover:: +* Add size-based condition to the index rollover API {pull}27160[#27160] (issue: {issue}27004[#27004]) +* Add size-based condition to the index rollover API {pull}27115[#27115] (issue: {issue}27004[#27004]) + +Scripting:: +* Script: Convert script query to a dedicated script context {pull}26003[#26003] + +Search:: +* Make fields optional in multi_match query and rely on index.query.default_field by default {pull}27380[#27380] +* fix unnecessary logger creation {pull}27349[#27349] +* `ObjectParser` : replace `IllegalStateException` with `ParsingException` {pull}27302[#27302] (issue: {issue}27147[#27147]) +* Uses norms for exists query if enabled {pull}27237[#27237] +* Cross Cluster Search: make remote clusters optional {pull}27182[#27182] (issues: {issue}26118[#26118], {issue}27161[#27161]) +* Enhances exists queries to reduce need for `_field_names` {pull}26930[#26930] (issue: {issue}26770[#26770]) +* Change ParentFieldSubFetchPhase to create doc values iterator once per segment {pull}26815[#26815] +* Change VersionFetchSubPhase to create doc values iterator once per segment {pull}26809[#26809] +* Change ScriptFieldsFetchSubPhase to create search scripts once per segment {pull}26808[#26808] (issue: {issue}26775[#26775]) +* Make sure SortBuilders rewrite inner nested sorts {pull}26532[#26532] +* Extend testing of build method in ScriptSortBuilder {pull}26520[#26520] (issues: {issue}17286[#17286], {issue}26490[#26490]) +* Accept an array of field names and boosts in the index.query.default_field setting {pull}26320[#26320] (issue: {issue}25946[#25946]) +* Reject IPv6-mapped IPv4 addresses when using the CIDR notation. {pull}26254[#26254] (issue: {issue}26078[#26078]) +* Rewrite range queries with open bounds to exists query {pull}26160[#26160] (issue: {issue}22640[#22640]) + +Security:: +* Added the `manage_index_templates` cluster privilege to the built-in role +`kibana_system`. For more information, see +{stack-ov}/security-privileges.html#privileges-list-cluster[Cluster Privileges] +and {stack-ov}/built-in-roles.html[Built-in Roles]. +* Newly created or updated watches execute with the privileges of the user that +last modified the watch. +* Added log messages when a PEM key is found when a PEM certificate was +expected (or vice versa) in the `xpack.ssl.key` or `xpack.ssl.certificate` settings. +* Added the new `certutil` command to simplify the creation of certificates for +use with the Elastic stack. For more information, see <>. +* Added automatic detection of support for AES 256 bit TLS ciphers and enabled +their use when the JVM supports them. + +Sequence IDs:: +* Only fsync global checkpoint if needed {pull}27652[#27652] +* Log primary-replica resync failures {pull}27421[#27421] (issues: {issue}24841[#24841], {issue}27418[#27418]) +* Lazy initialize checkpoint tracker bit sets {pull}27179[#27179] (issue: {issue}10708[#10708]) +* Returns the current primary_term for Get/MultiGet requests {pull}27177[#27177] (issue: {issue}26493[#26493]) + +Settings:: +* Allow affix settings to specify dependencies {pull}27161[#27161] +* Represent lists as actual lists inside Settings {pull}26878[#26878] (issue: {issue}26723[#26723]) +* Remove Settings#getAsMap() {pull}26845[#26845] +* Replace group map settings with affix setting {pull}26819[#26819] +* Throw exception if setting isn't recognized {pull}26569[#26569] (issue: {issue}25607[#25607]) +* Settings: Move keystore creation to plugin installation {pull}26329[#26329] (issue: {issue}26309[#26309]) + +Snapshot/Restore:: +* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] +* Snapshot: Migrate TransportRequestHandler to TransportMasterNodeAction {pull}27165[#27165] (issue: {issue}27151[#27151]) +* Fix toString of class SnapshotStatus (#26851) {pull}26852[#26852] (issue: {issue}26851[#26851]) + +Stats:: +* Adds average document size to DocsStats {pull}27117[#27117] (issue: {issue}27004[#27004]) +* Stats to record how often the ClusterState diff mechanism is used successfully {pull}27107[#27107] (issue: {issue}26973[#26973]) +* Expose adaptive replica selection stats in /_nodes/stats API {pull}27090[#27090] +* Add cgroup memory usage/limit to OS stats on Linux {pull}26166[#26166] +* Add segment attributes to the `_segments` API. {pull}26157[#26157] (issue: {issue}26130[#26130]) + +Suggesters:: +* Improve error message for parse failures of completion fields {pull}27297[#27297] +* Support 'AND' operation for context query in context suggester {pull}24565[#24565] (issue: {issue}24421[#24421]) + +Watcher:: +* Improved error messages when there are no accounts configured for {watcher}. +* Added thread pool rejection information to execution state, which makes it +easier to debug execution failures. +* Added execution state information to watch status details. It is stored in the +`status.execution_state` field. +* Enabled the account monitoring `url` field in the `xpack.notification.jira` +setting to support customized paths. For more information about configuring Jira +accounts for use with watches, see +{stack-ov}/actions-jira.html[Jira Action]. +* Improved handling of exceptions in {watcher} to make it easier to debug +problems. + +[float] +=== Bug Fixes + +Aggregations:: +* Disable the "low cardinality" optimization of terms aggregations. {pull}27545[#27545] (issue: {issue}27543[#27543]) +* scripted_metric _agg parameter disappears if params are provided {pull}27159[#27159] (issues: {issue}19768[#19768], {issue}19863[#19863]) + +Cluster:: +* Properly format IndexGraveyard deletion date as date {pull}27362[#27362] +* Remove optimisations to reuse objects when applying a new `ClusterState` {pull}27317[#27317] + +Core:: +* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) +* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] +* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) +* Protect shard splitting from illegal target shards {pull}27468[#27468] (issue: {issue}26931[#26931]) +* Avoid NPE when getting build information {pull}27442[#27442] +* Fix `ShardSplittingQuery` to respect nested documents. {pull}27398[#27398] (issue: {issue}27378[#27378]) +* When building Settings do not set SecureSettings if empty {pull}26988[#26988] (issue: {issue}316[#316]) + +Engine:: +* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) +* Carry over version map size to prevent excessive resizing {pull}27516[#27516] (issue: {issue}20498[#20498]) + +Geo:: +* Correct two equality checks on incomparable types {pull}27688[#27688] +* [GEO] fix pointsOnly bug for MULTIPOINT {pull}27415[#27415] + +Index Templates:: +* Prevent constructing an index template without index patterns {pull}27662[#27662] + +Ingest:: +* Add pipeline support for REST API bulk upsert {pull}27075[#27075] (issue: {issue}25601[#25601]) +* Fixing Grok pattern for Apache 2.4 {pull}26635[#26635] + +Inner Hits:: +* Return an empty _source for nested inner hit when filtering on a field that doesn't exist {pull}27531[#27531] + +Internal:: +* When checking if key exists in ThreadContextStruct:putHeaders() method,should put requestHeaders in map first {pull}26068[#26068] +* Adding a refresh listener to a recovering shard should be a noop {pull}26055[#26055] + +Java High Level REST Client:: +* Register ip_range aggregation with the high level client {pull}26383[#26383] +* add top hits as a parsed aggregation to the rest high level client {pull}26370[#26370] + +Machine Learning:: +* Improved handling of scenarios where there are insufficient values to +interpolate trend components. +* Improved calculation of confidence intervals. +* Fixed degrees of freedom calculation that could lead to excessive error logging. +* Improved trend modeling with long bucket spans. +* Fixed timing of when model size statistics are written. Previously, if there +were multiple partitions, there could be multiple model size stats docs written +within the same bucket. +* Updated the calculation of the model memory to include the memory used by +partition, over, by, or influencer fields. +* Fixed calculation of the `frequency` property value for {dfeeds} that use +aggregations. The value must be a multiple of the histogram interval. For more +information, see +{stack-ov}/ml-configuring-aggregation.html[Aggregating Data for Faster Performance]. +* Removed unnecessary messages from logs when a job is forcefully closed. + +Mapping:: +* Fix dynamic mapping update generation. {pull}27467[#27467] +* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) +* Fixed rounding of bounds in scaled float comparison {pull}27207[#27207] (issue: {issue}27189[#27189]) + +Nested Docs:: +* Ensure nested documents have consistent version and seq_ids {pull}27455[#27455] +* Prevent duplicate fields when mixing parent and root nested includes {pull}27072[#27072] (issue: {issue}26990[#26990]) + +Network:: +* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) +* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) +* Do not set SO_LINGER on server channels {pull}26997[#26997] +* Do not set SO_LINGER to 0 when not shutting down {pull}26871[#26871] (issue: {issue}26764[#26764]) +* Close TcpTransport on RST in some Spots to Prevent Leaking TIME_WAIT Sockets {pull}26764[#26764] (issue: {issue}26701[#26701]) + +Packaging:: +* Removes minimum master nodes default number {pull}26803[#26803] +* setgid on /etc/elasticearch on package install {pull}26412[#26412] (issue: {issue}26410[#26410]) + +Percolator:: +* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) + +Plugin Analysis ICU:: +* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] + +Plugin Lang Painless:: +* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) +* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) + +Plugin Repository GCS:: +* Create new handlers for every new request in GoogleCloudStorageService {pull}27339[#27339] (issue: {issue}27092[#27092]) + +Recovery:: +* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) + +Reindex API:: +* Reindex: Fix headers in reindex action {pull}26937[#26937] (issue: {issue}22976[#22976]) + +Scroll:: +* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] + +Search:: +* Fix profiling naming issues {pull}27133[#27133] +* Fix max score tracking with field collapsing {pull}27122[#27122] (issue: {issue}23840[#23840]) +* Apply missing request options to the expand phase {pull}27118[#27118] (issues: {issue}26649[#26649], {issue}27079[#27079]) +* Calculate and cache result when advanceExact is called {pull}26920[#26920] (issue: {issue}26817[#26817]) +* Filter unsupported relation for RangeQueryBuilder {pull}26620[#26620] (issue: {issue}26575[#26575]) +* Handle leniency for phrase query on a field indexed without positions {pull}26388[#26388] + +Security:: +* Fixed REST requests that required a body but did not validate it, resulting in +null pointer exceptions. + +Sequence IDs:: +* Obey translog durability in global checkpoint sync {pull}27641[#27641] +* Fix resync request serialization {pull}27418[#27418] (issue: {issue}24841[#24841]) + +Settings:: +* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) + +Snapshot/Restore:: +* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] +* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) +* Fix snapshot getting stuck in INIT state {pull}27214[#27214] (issue: {issue}27180[#27180]) +* Fix default value of ignore_unavailable for snapshot REST API (#25359) {pull}27056[#27056] (issue: {issue}25359[#25359]) +* Do not create directory on readonly repository (#21495) {pull}26909[#26909] (issue: {issue}21495[#21495]) + +Stats:: +* Include internal refreshes in refresh stats {pull}27615[#27615] +* Make Segment statistics aware of segments hold by internal readers {pull}27558[#27558] +* Ensure `doc_stats` are changing even if refresh is disabled {pull}27505[#27505] + +Watcher:: +* Fixed handling of watcher templates. Missing watcher templates can be added by +any node if that node has a higher version than the master node. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to Jackson 2.8.10 {pull}27230[#27230] +* Upgrade to Lucene 7.1 {pull}27225[#27225] + +Plugin Discovery EC2:: +* Upgrade AWS SDK Jackson Databind to 2.6.7.1 {pull}27361[#27361] (issues: {issue}27278[#27278], {issue}27359[#27359]) + +Plugin Discovery GCE:: +* Update Google SDK to version 1.23.0 {pull}27381[#27381] (issue: {issue}26636[#26636]) + +Plugin Lang Painless:: +* Upgrade Painless from ANTLR 4.5.1-1 to ANTLR 4.5.3. {pull}27153[#27153] diff --git a/docs/reference/release-notes/6.2.asciidoc b/docs/reference/release-notes/6.2.asciidoc new file mode 100644 index 0000000000000..14be7e016c9b0 --- /dev/null +++ b/docs/reference/release-notes/6.2.asciidoc @@ -0,0 +1,812 @@ +//// +// To add a release, copy and paste the following text, uncomment the relevant +// sections, and add a link to the new section in the list of releases at the +// top of the page. Note that release subheads must be floated and sections +// cannot be empty. +// TEMPLATE + +// [[release-notes-n.n.n]] +// == {es} n.n.n + +//[float] +[[breaking-n.n.n]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +//[float] +//=== Bug Fixes + +//[float] +//=== Regressions + +//[float] +//=== Known Issues +//// + +[[release-notes-6.2.4]] +== {es} version 6.2.4 + +//[float] +//[[breaking-6.2.4]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +[float] +=== Bug Fixes + +Engine:: +* Harden periodically check to avoid endless flush loop {pull}29125[#29125] (issues: {issue}28350[#28350], {issue}29097[#29097]) + +Ingest:: +* Don't allow referencing the pattern bank name in the pattern bank {pull}29295[#29295] (issue: {issue}29257[#29257]) + +[float] +=== Regressions +Fail snapshot operations early when creating or deleting a snapshot on a repository that has been +written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) + +Java High Level REST Client:: +* Bulk processor#awaitClose to close scheduler {pull}29263[#29263] +Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) +Do not fail snapshot when deleting a missing snapshotted file ({pull}30332[#30332]) + +Java Low Level REST Client:: +* REST client: hosts marked dead for the first time should not be immediately retried {pull}29230[#29230] + +Machine Learning:: +* Prevents failed jobs from mistakenly re-opening after node loss recovery. +* Returns an error when an operation cannot be submitted because the process was +killed. +* Respects the datafeed frequency when it is less or equal to the +`query_delay` setting. + +Network:: +* Cross-cluster search and default connections can get crossed [OPEN] [ISSUE] {pull}29321[#29321] + +Percolator:: +* Fixed bug when non percolator docs end up in the search hits {pull}29447[#29447] (issue: {issue}29429[#29429]) +* Fixed a msm accounting error that can occur during analyzing a percolator query {pull}29415[#29415] (issue: {issue}29393[#29393]) +* Fix more query extraction bugs. {pull}29388[#29388] (issues: {issue}28353[#28353], {issue}29376[#29376]) +* Fix some query extraction bugs. {pull}29283[#29283] + +Plugins:: +* Plugins: Fix native controller confirmation for non-meta plugin {pull}29434[#29434] + +Search:: +* Propagate ignore_unmapped to inner_hits {pull}29261[#29261] (issue: {issue}29071[#29071]) + +Security/Authentication:: +* Adds missing `idp.use_single_logout` and `populate_user_metadata` SAML realm +settings. See <>. + +Settings:: +* Archive unknown or invalid settings on updates {pull}28888[#28888] (issue: {issue}28609[#28609]) + +Watcher:: +* Re-enables `smtp.*` account configuration properties in the notification +settings. See <>. +* Ensures starting and stopping {watcher} is properly acknowledged as a master +node action. +* Refrains from appending a question mark to an HTTP request if no parameters +are used. + +//[float] +//=== Known Issues + +[[release-notes-6.2.3]] +== {es} version 6.2.3 + +//[float] +//[[breaking-6.2.3]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +[float] +=== Deprecations + +Deprecated multi-argument versions of the request methods in the RestClient. +Prefer the "Request" object flavored methods. ({pull}30315[#30315]) + +//[float] +//=== New Features + +A new analysis plugin called `analysis_nori` that exposes the Lucene Korean +analysis module. ({pull}30397[#30397]) + +[float] +=== Enhancements + +Highlighting:: +* Limit analyzed text for highlighting (improvements) {pull}28808[#28808] (issues: {issue}16764[#16764], {issue}27934[#27934]) +{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255]) +{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow +copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404]) + +Added new "Request" object flavored request methods in the RestClient. Prefer +these instead of the multi-argument versions. ({pull}29623[#29623]) + +Recovery:: +* Require translogUUID when reading global checkpoint {pull}28587[#28587] (issue: {issue}28435[#28435]) +Added `setJsonEntity` to `Request` object so it is marginally easier to send JSON. ({pull}30447[#30447]) +Watcher HTTP client used in watches now allows more parallel connections to the +same endpoint and evicts long running connections. ({pull}30130[#30130]) + +The cluster state listener to decide if watcher should be +stopped/started/paused now runs far less code in an executor but is more +synchronous and predictable. Also the trigger engine thread is only started on +data nodes. And the Execute Watch API can be triggered regardless is watcher is +started or stopped. ({pull}30118[#30118]) + +Added put index template API to the high level rest client ({pull}30400[#30400]) + +Add ability to filter coordinating-only nodes when interacting with cluster +APIs. ({pull}30313[#30313]) + +[float] +=== Bug Fixes + +Core:: +* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) + +Engine:: +* Avoid class cast exception from index writer {pull}28989[#28989] +* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) +* Never block on key in `LiveVersionMap#pruneTombstones` {pull}28736[#28736] (issue: {issue}28714[#28714]) + +Ingest:: +* Continue registering pipelines after one pipeline parse failure. {pull}28752[#28752] (issue: {issue}28269[#28269]) + +Java High Level REST Client:: +* REST high-level client: encode path parts {pull}28663[#28663] (issue: {issue}28625[#28625]) + +Machine Learning:: +* Fixed the <> such that it +returns only machine learning-specific node attributes. + +Monitoring:: +* Aligned reporting of index statistics that exist in the current cluster state. +This fix avoids subtle race conditions in stats reporting. + +Packaging:: +* Delay path expansion on Windows {pull}28753[#28753] (issues: {issue}27675[#27675], {issue}28748[#28748]) + +Percolator:: +* Fix percolator query analysis for function_score query {pull}28854[#28854] +* Improved percolator's random candidate query duel test {pull}28840[#28840] + +Security:: +* Fixed handling of comments in XML documents [ESA-2018-07]. +* Fixed auditing such that when you use a local audit index, it maintains the +mappings automatically. Maintenance is necessary, for example, when new fields +are introduced or document types change. +* Added and changed settings for the SAML NameID policy. For example, added the +`nameid.allow_create` setting and changed the default value for +the SPNameQualifier setting to blank. See {stack-ov}/saml-realm.html[SAML Authentication]. +* Fixed handling of an Assertion Consumer Service (ACS) URL with existing query +parameters. See {stack-ov}/saml-realm.html[SAML Authentication]. +* Fixed the PKI realm bootstrap check such that it works with secure settings. +For more information, see <>. + +Snapshot/Restore:: +* Fix NPE when using deprecated Azure settings {pull}28769[#28769] (issues: {issue}23518[#23518], {issue}28299[#28299]) + +Stats:: +* Fix AdaptiveSelectionStats serialization bug {pull}28718[#28718] (issue: {issue}28713[#28713]) + +Watcher:: +* Fixed the serialization of failed hipchat messages, such that it no longer +tries to write the status field twice. +* Fixed TransformInput toXContent serialization errors. For more information, +see +{stack-ov}/input-chain.html#_transforming_chained_input_data[Transforming Chained Input Data]. + + +Allocation:: + +Auto-expand replicas when adding or removing nodes to prevent shard copies from +being dropped and resynced when a data node rejoins the cluster ({pull}30423[#30423]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + + +[[release-notes-6.2.2]] +== {es} version 6.2.2 + +//[float] +//[[breaking-6.2.2]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Recovery:: +* Synced-flush should not seal index of out of sync replicas {pull}28464[#28464] (issue: {issue}10032[#10032]) + +[float] +=== Bug Fixes + +Core:: +* Handle throws on tasks submitted to thread pools {pull}28667[#28667] +* Fix size blocking queue to not lie about its weight {pull}28557[#28557] (issue: {issue}28547[#28547]) + +Ingest:: +* Guard accessDeclaredMembers for Tika on JDK 10 {pull}28603[#28603] (issue: {issue}28602[#28602]) +* Fix for bug that prevents pipelines to load that use stored scripts after a restart {pull}28588[#28588] + +Java High Level REST Client:: +* Fix parsing of script fields {pull}28395[#28395] (issue: {issue}28380[#28380]) +* Move to POST when calling API to retrieve which support request body {pull}28342[#28342] (issue: {issue}28326[#28326]) + +Machine Learning:: +* Fixed an exception that occurred when a categorization field contained an +empty string. + +Monitoring:: +* Properly registered `xpack.monitoring.exporters.*.headers.*` settings, which +were broken in 6.2.0 and 6.2.1. For more information, see +<>. + +Packaging:: +* Fix using relative custom config path {pull}28700[#28700] (issue: {issue}27610[#27610]) +* Disable console logging in the Windows service {pull}28618[#28618] (issue: {issue}20422[#20422]) + +Percolator:: +* Do not take duplicate query extractions into account for minimum_should_match attribute {pull}28353[#28353] (issue: {issue}28315[#28315]) + +Recovery:: +* Fsync directory after cleanup {pull}28604[#28604] (issue: {issue}28435[#28435]) + +Security:: +* Added CachingRealm to published artifacts so it can be used in custom realm +extensions. +* If the realm uses native role mappings and the security index health changes, +the realm caches are cleared. For example, they are cleared when the index +recovers from a red state, when the index is deleted, when the index becomes +outdated, and when the index becomes up-to-date. +* Fixed a bug that could prevent auditing to a remote index if the remote +cluster was re-started at the same time as the audited cluster. +* Removed AuthorityKeyIdentifier's Issuer and Serial number from certificates +generated by `certgen` and `certutil`. This improves compatibility with +certificate verification in {kib}. + +Watcher:: +* Proxies now use HTTP by default, which was the default prior to 6.0. This +fixes issues with HTTPS requests that tried to access proxies via HTTP. +* Fixed the HTML sanitizer settings +(`xpack.notification.email.html.sanitization.*`), which were broken in 6.2. For +more information, see <>. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.2.1]] +== {es} version 6.2.1 + +//[float] +//[[breaking-6.2.1]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements +The cluster state listener to decide if watcher should be +stopped/started/paused now runs far less code in an executor but is more +synchronous and predictable. Also the trigger engine thread is only started on +data nodes. And the Execute Watch API can be triggered regardless is watcher is +started or stopped. ({pull}30118[#30118]) + +[float] +=== Bug Fixes + +Plugin Lang Painless:: +* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) + +Plugins:: +* Fix the ability to remove old plugin {pull}28540[#28540] (issue: {issue}28538[#28538]) + +Security:: +* Fixed missing dependencies for x-pack-transport. +* Fixed `saml-metadata` env file such that it sources the appropriate +environment file. + +Machine Learning:: + +* Account for gaps in data counts after job is reopened ({pull}30294[#30294]) + +[[release-notes-6.2.0]] +== {es} version 6.2.0 + +[float] +[[breaking-6.2.0]] +=== Breaking Changes + +Aggregations:: +* Add a new cluster setting to limit the total number of buckets returned by a request {pull}27581[#27581] (issues: {issue}26012[#26012], {issue}27452[#27452]) + +Core:: +* Forbid granting the all permission in production {pull}27548[#27548] + +Highlighting:: +* Limit the analyzed text for highlighting {pull}27934[#27934] (issue: {issue}27517[#27517]) + +Rollover:: +* Fail rollover if duplicated alias found in templates {pull}28110[#28110] (issue: {issue}26976[#26976]) + +Search:: +* Introduce limit to the number of terms in Terms Query {pull}27968[#27968] (issue: {issue}18829[#18829]) + +[float] +=== Breaking Java Changes + +Java API:: +* Remove `operationThreaded` from Java API {pull}27836[#27836] + +Java High Level REST Client:: +* REST high-level client: remove index suffix from indices client method names {pull}28263[#28263] + +[float] +=== Deprecations + +Analysis:: +* Backport delimited payload filter renaming {pull}27535[#27535] (issue: {issue}26625[#26625]) + +Suggesters:: +* deprecating `jarowinkler` in favor of `jaro_winkler` {pull}27526[#27526] +* Deprecating `levenstein` in favor of `levensHtein` {pull}27409[#27409] (issue: {issue}27325[#27325]) + +[float] +=== New Features + +Machine Learning:: +* Added the ability to identify scheduled events and prevent anomaly detection +during these periods. For more information, see +{stack-ov}/ml-calendars.html[Calendars and Scheduled Events]. + +Plugin Ingest GeoIp:: +* Enable ASN support for Ingest GeoIP plugin. {pull}27958[#27958] (issue: {issue}27849[#27849]) + +Plugin Lang Painless:: +* Painless: Add spi jar that will be published for extending whitelists {pull}28302[#28302] +* Painless: Add a simple cache for whitelist methods and fields. {pull}28142[#28142] + +Plugins:: +* Add the ability to bundle multiple plugins into a meta plugin {pull}28022[#28022] (issue: {issue}27316[#27316]) + +Rank Evaluation:: +* Backport of ranking evaluation API (#27478) {pull}27844[#27844] (issue: {issue}27478[#27478]) + +Recovery:: +* Backport for using lastSyncedGlobalCheckpoint in deletion policy {pull}27866[#27866] (issue: {issue}27826[#27826]) + +Reindex API:: +* Add scroll parameter to _reindex API {pull}28041[#28041] (issue: {issue}27555[#27555]) + +Security:: +* {security} now supports user authentication using SAML Single Sign on. For +more information, see {stack-ov}/saml-realm.html[SAML authentication]. + +Watcher:: +* Added a transform input for chained input. For more information, see +{stack-ov}/input-chain.html#_transforming_chained_input_data[Transforming Chained Input Data]. + +[float] +=== Enhancements + +Allocation:: +* Fix cluster.routing.allocation.enable and cluster.routing.rebalance.enable case {pull}28037[#28037] (issue: {issue}28007[#28007]) +* Add node id to shard failure message {pull}28024[#28024] (issue: {issue}28018[#28018]) + +Analysis:: +* Limit the analyzed text for highlighting (#27934) {pull}28176[#28176] (issue: {issue}27517[#27517]) +* Allow TrimFilter to be used in custom normalizers {pull}27758[#27758] (issue: {issue}27310[#27310]) + +Circuit Breakers:: +* Add accounting circuit breaker and track segment memory usage {pull}27116[#27116] (issue: {issue}27044[#27044]) + +Cluster:: +* Adds wait_for_no_initializing_shards to cluster health API {pull}27489[#27489] (issue: {issue}25623[#25623]) + +Core:: +* Introduce elasticsearch-core jar {pull}28191[#28191] (issue: {issue}27933[#27933]) +* Rename core module to server {pull}28190[#28190] (issue: {issue}27933[#27933]) +* Rename core module to server {pull}28180[#28180] (issue: {issue}27933[#27933]) +* Introduce elasticsearch-core jar {pull}28178[#28178] (issue: {issue}27933[#27933]) +* Add Writeable.Reader support to TransportResponseHandler {pull}28010[#28010] (issue: {issue}26315[#26315]) +* Simplify rejected execution exception {pull}27664[#27664] (issue: {issue}27663[#27663]) +* Add node name to thread pool executor name {pull}27663[#27663] (issues: {issue}26007[#26007], {issue}26835[#26835]) + +Discovery:: +* Add information when master node left to DiscoveryNodes' shortSummary() {pull}28197[#28197] (issue: {issue}28169[#28169]) + +Engine:: +* Move uid lock into LiveVersionMap {pull}27905[#27905] +* Optimize version map for append-only indexing {pull}27752[#27752] + +Geo:: +* [GEO] Add WKT Support to GeoBoundingBoxQueryBuilder {pull}27692[#27692] (issues: {issue}27690[#27690], {issue}9120[#9120]) +* [Geo] Add Well Known Text (WKT) Parsing Support to ShapeBuilders {pull}27417[#27417] (issue: {issue}9120[#9120]) + +Highlighting:: +* Include all sentences smaller than fragment_size in the unified highlighter {pull}28132[#28132] (issue: {issue}28089[#28089]) + +Ingest:: +* Enable convert processor to support Long and Double {pull}27891[#27891] (issues: {issue}23085[#23085], {issue}23423[#23423]) + +Internal:: +* Make KeyedLock reentrant {pull}27920[#27920] +* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) +* Tighten the CountedBitSet class {pull}27632[#27632] +* Avoid doing redundant work when checking for self references. {pull}26927[#26927] (issue: {issue}26907[#26907]) + +Java API:: +* Add missing delegate methods to NodeIndicesStats {pull}28092[#28092] +* Java api clean-up : consistency for `shards_acknowledged` getters {pull}27819[#27819] (issue: {issue}27784[#27784]) + +Java High Level REST Client:: +* add toString implementation for UpdateRequest. {pull}27997[#27997] (issue: {issue}27986[#27986]) +* Add Close Index API to the high level REST client {pull}27734[#27734] (issue: {issue}27205[#27205]) +* Add Open Index API to the high level REST client {pull}27574[#27574] (issue: {issue}27205[#27205]) +* Added Create Index support to high-level REST client {pull}27351[#27351] (issue: {issue}27205[#27205]) +* Add multi get api to the high level rest client {pull}27337[#27337] (issue: {issue}27205[#27205]) +* Add msearch api to high level client {pull}27274[#27274] + +Machine Learning:: +* Increased tokenization flexibility for categorization. Now all {es} analyzer +functionality is available, which opens up the possibility of sensibly +categorizing non-English log messages. For more information, see {stack-ov}/ml-configuring-categories.html#ml-configuring-analyzer[Customizing the Categorization Analyzer]. +* Improved the sensitivity of the analysis to high variance data with lots of +values near zero. +* Improved the decay rate of the model memory by using a weighted moving average. +* Machine learning indices created after upgrading to 6.2 have the +`auto_expand_replicas: 0-1` setting rather than a fixed setting of 1 replica. +As a result, {ml} indices created after upgrading to 6.2 can have a green +status on single node clusters. There is no impact in multi-node clusters. +* Changed the credentials that are used by {dfeeds}. When {security} is enabled, +a {dfeed} stores the roles of the user who created or updated the {dfeed} +**at that time**. This means that if those roles are updated, the {dfeed} +subsequently runs with the new permissions that are associated with the roles. +However, if the user's roles are adjusted after creating or updating the {dfeed} +then the {dfeed} continues to run with the permissions that are associated with +the original roles. For more information, see +{stack-ov}/ml-dfeeds.html[Datafeeds]. +* Added a new `scheduled` forecast status, which indicates that the forecast +has not started yet. + +Mapping:: +* Allow `_doc` as a type. {pull}27816[#27816] (issues: {issue}27750[#27750], {issue}27751[#27751]) + +Monitoring:: +* {monitoring} indices (`.monitoring`) created after upgrading to 6.2 have the +`auto_expand_replicas: 0-1` setting rather than a fixed setting of 1 replica. +As a result, monitoring indices created after upgrading to 6.2 can have a green +status on single node clusters. There is no impact in multi-node clusters. +* Added a cluster alert that triggers whenever a node is added, removed, or +restarted. + +Network:: +* Add NioGroup for use in different transports {pull}27737[#27737] (issue: {issue}27260[#27260]) +* Add read timeouts to http module {pull}27713[#27713] +* Implement byte array reusage in `NioTransport` {pull}27696[#27696] (issue: {issue}27563[#27563]) +* Introduce resizable inbound byte buffer {pull}27551[#27551] (issue: {issue}27563[#27563]) +* Decouple nio constructs from the tcp transport {pull}27484[#27484] (issue: {issue}27260[#27260]) + +Packaging:: +* Extend JVM options to support multiple versions {pull}27675[#27675] (issue: {issue}27646[#27646]) +* Add explicit coreutils dependency {pull}27660[#27660] (issue: {issue}27609[#27609]) +* Detect mktemp from coreutils {pull}27659[#27659] (issues: {issue}27609[#27609], {issue}27643[#27643]) +* Enable GC logs by default {pull}27610[#27610] +* Use private directory for temporary files {pull}27609[#27609] (issues: {issue}14372[#14372], {issue}27144[#27144]) + +Percolator:: +* also extract match_all queries when indexing percolator queries {pull}27585[#27585] + +Plugin Lang Painless:: +* Painless: Add whitelist extensions {pull}28161[#28161] +* Painless: Modify Loader to Load Classes Directly from Definition {pull}28088[#28088] +* Clean Up Painless Cast Object {pull}27794[#27794] +* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] + +Plugins:: +* Add client actions to action plugin {pull}28280[#28280] (issue: {issue}27759[#27759]) +* Plugins: Add validation to plugin descriptor parsing {pull}27951[#27951] +* Plugins: Add plugin extension capabilities {pull}27881[#27881] +* Add support for filtering mappings fields {pull}27603[#27603] + +Rank Evaluation:: +* Simplify RankEvalResponse output {pull}28266[#28266] + +Recovery:: +* Truncate tlog cli should assign global checkpoint {pull}28192[#28192] (issue: {issue}28181[#28181]) +* Replica starts peer recovery with safe commit {pull}28181[#28181] (issue: {issue}10708[#10708]) +* Primary send safe commit in file-based recovery {pull}28038[#28038] (issue: {issue}10708[#10708]) +* Fail resync-failed shards in subsequent writes {pull}28005[#28005] +* Introduce promoting index shard state {pull}28004[#28004] (issue: {issue}24841[#24841]) +* Non-peer recovery should set the global checkpoint {pull}27965[#27965] +* Persist global checkpoint when finalizing a peer recovery {pull}27947[#27947] (issue: {issue}27861[#27861]) +* Rollback a primary before recovering from translog {pull}27804[#27804] (issue: {issue}10708[#10708]) + +Search:: +* Use typeName() to check field type in GeoShapeQueryBuilder {pull}27730[#27730] +* Optimize search_after when sorting in index sort order {pull}26401[#26401] + +Security:: +* Added the ability to refresh tokens that were created by the token API. The +API provides information about a refresh token, which you can use within 24 +hours of its creation to extend the life of a token. For more information, see +<>. +* Added principal and role information to `access_granted`, `access_denied`, +`run_as_granted`, and `run_as_denied` audit events. For more information about +these events, see {stack-ov}/auditing.html[Auditing Security Events]. +* Added audit event ignore policies, which are a way to tune the verbosity of an +audit trail. These policies define rules for ignoring audit events that match +specific attribute values. For more information, see +{stack-ov}/audit-log-output.html#audit-log-ignore-policy[Logfile Audit Events Ignore Policies]. +* Added a certificates API, which enables you to retrieve information about the +X.509 certificates that are used to encrypt communications in your {es} cluster. +For more information, see <>. + +Sequence IDs:: +* Do not keep 5.x commits when having 6.x commits {pull}28188[#28188] (issues: {issue}27606[#27606], {issue}28038[#28038]) +* Use lastSyncedGlobalCheckpoint in deletion policy {pull}27826[#27826] (issue: {issue}27606[#27606]) +* Use CountedBitSet in LocalCheckpointTracker {pull}27793[#27793] +* Only fsync global checkpoint if needed {pull}27652[#27652] +* Keep commits and translog up to the global checkpoint {pull}27606[#27606] +* Adjust CombinedDeletionPolicy for multiple commits {pull}27456[#27456] (issues: {issue}10708[#10708], {issue}27367[#27367]) +* Keeps index commits up to the current global checkpoint {pull}27367[#27367] (issue: {issue}10708[#10708]) +* Dedup translog operations by reading in reverse {pull}27268[#27268] (issue: {issue}10708[#10708]) + +Settings:: +* Add validation of keystore setting names {pull}27626[#27626] + +Snapshot/Restore:: +* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] +* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] +* Include include_global_state in Snapshot status API (#22423) {pull}26853[#26853] (issue: {issue}22423[#22423]) + +Task Manager:: +* Add ability to associate an ID with tasks {pull}27764[#27764] (issue: {issue}23250[#23250]) + +Translog:: +* Simplify MultiSnapshot#SeqNoset {pull}27547[#27547] (issue: {issue}27268[#27268]) +* Enclose CombinedDeletionPolicy in SnapshotDeletionPolicy {pull}27528[#27528] (issues: {issue}27367[#27367], {issue}27456[#27456]) + +Watcher:: +* Added the ability to set the `index` and `doc_type` dynamically in an index +action. For more information, see {stack-ov}/actions-index.html[Index Action]. +* Added a `refresh` index action attribute, which enables you to set the +refresh policy of the write request. For more information, see +{stack-ov}/actions-index.html[Index Action]. +* Added support for actions in slack attachments, which enables you to add +buttons that can be clicked in slack messages. For more information, see +{stack-ov}/actions-slack.html[Slack Action]. +* {watcher} indices (`.watch*` and `triggered_watches`) created after upgrading +to 6.2 have the `auto_expand_replicas: 0-1` setting rather than a fixed setting +of 1 replica. As a result, {watcher} indices created after upgrading to 6.2 can +have a green status on single node clusters. There is no impact in multi-node +clusters. + +[float] +=== Bug Fixes + +Aggregations:: +* Adds metadata to rewritten aggregations {pull}28185[#28185] (issue: {issue}28170[#28170]) +* Fix NPE on composite aggregation with sub-aggregations that need scores {pull}28129[#28129] +* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) +* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) +* Fix global aggregation that requires breadth first and scores {pull}27942[#27942] (issues: {issue}22321[#22321], {issue}27928[#27928]) +* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] +* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) +* Using DocValueFormat::parseBytesRef for parsing missing value parameter {pull}27855[#27855] (issue: {issue}27788[#27788]) +* Fix illegal cast of the "low cardinality" optimization of the `terms` aggregation. {pull}27543[#27543] +* Always include the _index and _id for nested search hits. {pull}27201[#27201] (issue: {issue}27053[#27053]) + +Allocation:: +* Do not open indices with broken settings {pull}26995[#26995] + +Core:: +* Fix lock accounting in releasable lock {pull}28202[#28202] +* Fixes ByteSizeValue to serialise correctly {pull}27702[#27702] (issue: {issue}27568[#27568]) +* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) +* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] +* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) + +Engine:: +* Replica recovery could go into an endless flushing loop {pull}28350[#28350] +* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) +* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) +* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) + +Geo:: +* Correct two equality checks on incomparable types {pull}27688[#27688] +* Handle case where the hole vertex is south of the containing polygon(s) {pull}27685[#27685] (issue: {issue}25933[#25933]) + +Highlighting:: +* Fix highlighting on a keyword field that defines a normalizer {pull}27604[#27604] + +Inner Hits:: +* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) + +Internal:: +* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) +* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) +* Retain originalIndex info when rewriting FieldCapabilities requests {pull}27761[#27761] + +Java REST Client:: +* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) + +Machine Learning:: +* Improved error reporting for crashes and resource problems on Linux. +* Improved the detection of seasonal trends in bucket spans longer than 1 hour. +* Updated the forecast API to wait for validation and return an error if the +validation fails. +* Set the actual bucket value to 0 in model plots for empty buckets for count +and sum functions. The count and sum functions treat empty buckets as 0 rather +than unknown for anomaly detection, so it was inconsistent not to do the same +for model plots. This inconsistency resulted in problems plotting these buckets +in {kib}. + +Mapping:: +* Ignore null value for range field (#27845) {pull}28116[#28116] (issue: {issue}27845[#27845]) +* Pass `java.locale.providers=COMPAT` to Java 9 onwards {pull}28080[#28080] (issue: {issue}10984[#10984]) +* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) +* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) + +Network:: +* Only bind loopback addresses when binding to local {pull}28029[#28029] (issue: {issue}1877[#1877]) +* Remove potential nio selector leak {pull}27825[#27825] +* Fix issue where the incorrect buffers are written {pull}27695[#27695] (issue: {issue}27551[#27551]) +* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) +* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) + +Packaging:: +* Allow custom service names when installing on windows {pull}25255[#25255] (issue: {issue}25231[#25231]) + +Percolator:: +* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) + +Plugin Analysis ICU:: +* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] + +Plugin Analysis Phonetic:: +* Fix daitch_mokotoff phonetic filter to use the dedicated Lucene filter {pull}28225[#28225] (issue: {issue}28211[#28211]) + +Plugin Lang Painless:: +* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) +* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) + +Plugin Repository HDFS:: +* Fix SecurityException when HDFS Repository used against HA Namenodes {pull}27196[#27196] + +Plugins:: +* Make sure that we don't detect files as maven coordinate when installing a plugin {pull}28163[#28163] +* Fix upgrading indices which use a custom similarity plugin. {pull}26985[#26985] (issue: {issue}25350[#25350]) + +Recovery:: +* Open engine should keep only starting commit {pull}28228[#28228] (issues: {issue}27804[#27804], {issue}28181[#28181]) +* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) +* Set global checkpoint before open engine from store {pull}27972[#27972] (issues: {issue}27965[#27965], {issue}27970[#27970]) +* Check and repair index under the store metadata lock {pull}27768[#27768] (issues: {issue}24481[#24481], {issue}24787[#24787], {issue}27731[#27731]) +* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) + +Rollover:: +* Make index rollover action atomic {pull}28039[#28039] (issue: {issue}26976[#26976]) + +Scripting:: +* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] + +Scroll:: +* Reject scroll query if size is 0 (#22552) {pull}27842[#27842] (issue: {issue}22552[#22552]) +* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] + +Search:: +* Fix simple_query_string on invalid input {pull}28219[#28219] (issue: {issue}28204[#28204]) +* Use the underlying connection version for CCS connections {pull}28093[#28093] +* Fix synonym phrase query expansion for cross_fields parsing {pull}28045[#28045] +* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) +* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) + +Security:: +* Updated the `setup-passwords` command to generate passwords with characters +`A-Z`, `a-z`, and `0-9`, so that they are safe to use in shell scripts. For more +information about this command, see <>. +* Improved the error messages that occur if the `x-pack` directory is missing +when you run <>. +* Fixed the ordering of realms in a realm chain, which determines the order in +which the realms are consulted. For more information, see +{stack-ov}/realms.html[Realms]. + +Sequence IDs:: +* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] +* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) +* Obey translog durability in global checkpoint sync {pull}27641[#27641] + +Settings:: +* Settings: Introduce settings updater for a list of settings {pull}28338[#28338] (issue: {issue}28047[#28047]) +* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) +* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) +* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) + +Snapshot/Restore:: +* Consistent updates of IndexShardSnapshotStatus {pull}28130[#28130] (issue: {issue}26480[#26480]) +* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) +* Do not start snapshots that are deleted during initialization {pull}27931[#27931] +* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] +* Consistent update of stage and failure message in IndexShardSnapshotStatus {pull}27557[#27557] (issue: {issue}26480[#26480]) +* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) +* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) + +Stats:: +* Fixes DocStats to properly deal with shards that report -1 index size {pull}27863[#27863] +* Include internal refreshes in refresh stats {pull}27615[#27615] + +Term Vectors:: +* Fix term vectors generator with keyword and normalizer {pull}27608[#27608] (issue: {issue}27320[#27320]) + +Watcher:: +* Replaced group settings with affix key settings where filters are needed. +For more information, see https://github.com/elastic/elasticsearch/pull/28338. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Dependencies: Update joda time to 2.9.9 {pull}28261[#28261] +* upgrade to lucene 7.2.1 {pull}28218[#28218] (issue: {issue}28044[#28044]) +* Upgrade jna from 4.4.0-1 to 4.5.1 {pull}28183[#28183] (issue: {issue}28172[#28172]) + +Ingest:: +* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] diff --git a/docs/reference/release-notes/6.3.asciidoc b/docs/reference/release-notes/6.3.asciidoc new file mode 100644 index 0000000000000..238ea86a93140 --- /dev/null +++ b/docs/reference/release-notes/6.3.asciidoc @@ -0,0 +1,113 @@ +//// +// To add a release, copy and paste the following text, uncomment the relevant +// sections, and add a link to the new section in the list of releases at the +// top of the page. Note that release subheads must be floated and sections +// cannot be empty. +// TEMPLATE + +// [[release-notes-n.n.n]] +// == {es} n.n.n + +//[float] +[[breaking-n.n.n]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +//[float] +//=== Bug Fixes + +//[float] +//=== Regressions + +//[float] +//=== Known Issues +//// + +[[release-notes-6.3.1]] +== Elasticsearch version 6.3.1 + +coming[6.3.1] + +//[float] +[[breaking-6.3.1]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +[float] +=== Bug Fixes + +Reduce the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180]) + +Respect accept header on requests with no handler ({pull}30383[#30383]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.3.0]] +== {es} version 6.3.0 + +coming[6.3.0] + +[float] +[[breaking-6.3.0]] +=== Breaking Changes + +[float] +=== Deprecations +Monitoring:: +* By default when you install {xpack}, monitoring is enabled but data collection +is disabled. To enable data collection, use the new +`xpack.monitoring.collection.enabled` setting. You can update this setting by +using the <>. For more +information, see <>. + +Security:: +* The legacy `XPackExtension` extension mechanism has been removed and replaced +with an SPI based extension mechanism that is installed and built as an +elasticsearch plugin. + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +//[float] +//=== Bug Fixes + +//[float] +//=== Regressions + +//[float] +//=== Known Issues diff --git a/docs/reference/release-notes/6.4.asciidoc b/docs/reference/release-notes/6.4.asciidoc new file mode 100644 index 0000000000000..c2266f53e2af9 --- /dev/null +++ b/docs/reference/release-notes/6.4.asciidoc @@ -0,0 +1,89 @@ +//// +// To add a release, copy and paste the following text, uncomment the relevant +// sections, and add a link to the new section in the list of releases at the +// top of the page. Note that release subheads must be floated and sections +// cannot be empty. +// TEMPLATE + +// [[release-notes-n.n.n]] +// == {es} n.n.n + +//[float] +[[breaking-n.n.n]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +//[float] +//=== Bug Fixes + +//[float] +//=== Regressions + +//[float] +//=== Known Issues +//// + +[[release-notes-6.4.0]] +== {es} version 6.4.0 + +coming[6.4.0] + +//[float] +//[[breaking-6.4.0]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +[float] +=== New Features + +The new <> field allows to know which fields +got ignored at index time because of the <> +option. ({pull}30140[#29658]) + +A new analysis plugin called `analysis_nori` that exposes the Lucene Korean +analysis module. ({pull}30397[#30397]) + +[float] +=== Enhancements + +{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255]) + +Geo:: +* Add validation that geohashes are not empty and don't contain unsupported characters ({pull}30376[#30376]) + +Rollup:: +* Validate timezone in range queries to ensure they match the selected job when +searching ({pull}30338[#30338]) + +[float] +=== Bug Fixes + +Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310]) + +Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) + +Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) + +Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues From 28011ef921549b00248c43c342f69dfdaef9ac42 Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 8 Jun 2018 12:50:12 -0700 Subject: [PATCH 17/24] [DOCS] Removes 6.3.1 release notes --- docs/reference/release-notes.asciidoc | 1 - docs/reference/release-notes/6.3.asciidoc | 42 ++++------------------- 2 files changed, 6 insertions(+), 37 deletions(-) diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 361bf01ffbd45..75018e3b58037 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,7 +6,6 @@ This section summarizes the changes in each release. * <> -* <> * <> * <> * <> diff --git a/docs/reference/release-notes/6.3.asciidoc b/docs/reference/release-notes/6.3.asciidoc index 238ea86a93140..6e1152bcfea3f 100644 --- a/docs/reference/release-notes/6.3.asciidoc +++ b/docs/reference/release-notes/6.3.asciidoc @@ -34,40 +34,6 @@ //=== Known Issues //// -[[release-notes-6.3.1]] -== Elasticsearch version 6.3.1 - -coming[6.3.1] - -//[float] -[[breaking-6.3.1]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Reduce the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180]) - -Respect accept header on requests with no handler ({pull}30383[#30383]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - [[release-notes-6.3.0]] == {es} version 6.3.0 @@ -103,8 +69,12 @@ elasticsearch plugin. //[float] //=== Enhancements -//[float] -//=== Bug Fixes +[float] +=== Bug Fixes + +Security:: +* Reduces the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180]) +* Respects accept header on requests with no handler ({pull}30383[#30383]) //[float] //=== Regressions From 49ea4355daa55fae5427e7af52157ef291d3f353 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 8 Jun 2018 13:53:35 -0700 Subject: [PATCH 18/24] Remove DocumentFieldMappers#simpleMatchToFullName. (#31041) * Remove DocumentFieldMappers#simpleMatchToFullName, as it is duplicative of MapperService#simpleMatchToIndexNames. * Rename MapperService#simpleMatchToIndexNames -> simpleMatchToFullName for consistency. * Simplify EsIntegTestCase#assertConcreteMappingsOnAll to accept concrete fields instead of wildcard patterns. (cherry picked from commit 00b0e1006320cf8d11d2aaf2f4233dab1bcc1be3) --- .../TransportFieldCapabilitiesIndexAction.java | 2 +- .../index/mapper/DocumentFieldMappers.java | 13 ------------- .../elasticsearch/index/mapper/MapperService.java | 2 +- .../index/query/QueryShardContext.java | 2 +- .../index/termvectors/TermVectorsService.java | 2 +- .../fetch/subphase/highlight/HighlightPhase.java | 3 +-- .../index/mapper/FieldNamesFieldTypeTests.java | 2 +- .../index/mapper/ParentFieldMapperTests.java | 2 +- .../org/elasticsearch/test/ESIntegTestCase.java | 12 ++++++------ ...ecurityIndexSearcherWrapperIntegrationTests.java | 2 +- 10 files changed, 14 insertions(+), 28 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index b24dc685df6d0..f1a1dc451406e 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -76,7 +76,7 @@ protected FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesI MapperService mapperService = indicesService.indexServiceSafe(shardId.getIndex()).mapperService(); Set fieldNames = new HashSet<>(); for (String field : request.fields()) { - fieldNames.addAll(mapperService.simpleMatchToIndexNames(field)); + fieldNames.addAll(mapperService.simpleMatchToFullName(field)); } Predicate fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName()); Map responseMap = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java index ea242aca68f44..9193ca209ba23 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java @@ -20,16 +20,13 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.analysis.Analyzer; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.analysis.FieldNameAnalyzer; import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.Map; -import java.util.Set; public final class DocumentFieldMappers implements Iterable { @@ -70,16 +67,6 @@ public FieldMapper getMapper(String field) { return fieldMappers.get(field); } - public Collection simpleMatchToFullName(String pattern) { - Set fields = new HashSet<>(); - for (FieldMapper fieldMapper : this) { - if (Regex.simpleMatch(pattern, fieldMapper.fieldType().name())) { - fields.add(fieldMapper.fieldType().name()); - } - } - return fields; - } - /** * A smart analyzer used for indexing that takes into account specific analyzers configured * per {@link FieldMapper}. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index e6ba9e8f39025..eaadff997de79 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -789,7 +789,7 @@ public MappedFieldType fullName(String fullName) { * Returns all the fields that match the given pattern. If the pattern is prefixed with a type * then the fields will be returned with a type prefix. */ - public Collection simpleMatchToIndexNames(String pattern) { + public Collection simpleMatchToFullName(String pattern) { if (Regex.isSimpleMatchPattern(pattern) == false) { // no wildcards return Collections.singletonList(pattern); diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 32a1f64d37b33..c7bea4e8b1ff3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -197,7 +197,7 @@ public void setIsFilter(boolean isFilter) { * type then the fields will be returned with a type prefix. */ public Collection simpleMatchToIndexNames(String pattern) { - return mapperService.simpleMatchToIndexNames(pattern); + return mapperService.simpleMatchToFullName(pattern); } public MappedFieldType fieldMapper(String name) { diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index 573e75d78060a..6d60e2ab70b5e 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -158,7 +158,7 @@ else if (docIdAndVersion != null) { private static void handleFieldWildcards(IndexShard indexShard, TermVectorsRequest request) { Set fieldNames = new HashSet<>(); for (String pattern : request.selectedFields()) { - fieldNames.addAll(indexShard.mapperService().simpleMatchToIndexNames(pattern)); + fieldNames.addAll(indexShard.mapperService().simpleMatchToFullName(pattern)); } request.selectedFields(fieldNames.toArray(Strings.EMPTY_ARRAY)); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java index 16cc6a50e8c7e..4343a1ebca564 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java @@ -53,8 +53,7 @@ public void hitExecute(SearchContext context, HitContext hitContext) { for (SearchContextHighlight.Field field : context.highlight().fields()) { Collection fieldNamesToHighlight; if (Regex.isSimpleMatchPattern(field.field())) { - DocumentMapper documentMapper = context.mapperService().documentMapper(hitContext.hit().getType()); - fieldNamesToHighlight = documentMapper.mappers().simpleMatchToFullName(field.field()); + fieldNamesToHighlight = context.mapperService().simpleMatchToFullName(field.field()); } else { fieldNamesToHighlight = Collections.singletonList(field.field()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java index 945407fc39492..4ae03138764d7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java @@ -63,7 +63,7 @@ public void testTermQuery() { MapperService mapperService = mock(MapperService.class); when(mapperService.fullName("_field_names")).thenReturn(fieldNamesFieldType); when(mapperService.fullName("field_name")).thenReturn(fieldType); - when(mapperService.simpleMatchToIndexNames("field_name")).thenReturn(Collections.singletonList("field_name")); + when(mapperService.simpleMatchToFullName("field_name")).thenReturn(Collections.singletonList("field_name")); QueryShardContext queryShardContext = new QueryShardContext(0, indexSettings, null, null, mapperService, null, null, null, null, null, null, () -> 0L, null); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index 23af6e749f506..3f29844ad0fce 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -124,7 +124,7 @@ public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception { .endObject() .endObject().endObject(); mapperService.merge("some_type", new CompressedXContent(Strings.toString(mappingSource)), MergeReason.MAPPING_UPDATE, false); - Set allFields = new HashSet<>(mapperService.simpleMatchToIndexNames("*")); + Set allFields = new HashSet<>(mapperService.simpleMatchToFullName("*")); assertTrue(allFields.contains("_parent")); assertFalse(allFields.contains("_parent#null")); MappedFieldType fieldType = mapperService.fullName("_parent"); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index cd2735a0f96b5..e63c93f9e10c0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -126,7 +126,8 @@ import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.Segment; -import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MockFieldFilterPlugin; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -823,7 +824,7 @@ public void waitNoPendingTasksOnAll() throws Exception { } /** - * Waits till a (pattern) field name mappings concretely exists on all nodes. Note, this waits for the current + * Waits until mappings for the provided fields exist on all nodes. Note, this waits for the current * started shards and checks for concrete mappings. */ public void assertConcreteMappingsOnAll(final String index, final String type, final String... fieldNames) throws Exception { @@ -833,11 +834,10 @@ public void assertConcreteMappingsOnAll(final String index, final String type, f IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); IndexService indexService = indicesService.indexService(resolveIndex(index)); assertThat("index service doesn't exists on " + node, indexService, notNullValue()); - DocumentMapper documentMapper = indexService.mapperService().documentMapper(type); - assertThat("document mapper doesn't exists on " + node, documentMapper, notNullValue()); + MapperService mapperService = indexService.mapperService(); for (String fieldName : fieldNames) { - Collection matches = documentMapper.mappers().simpleMatchToFullName(fieldName); - assertThat("field " + fieldName + " doesn't exists on " + node, matches, Matchers.not(emptyIterable())); + MappedFieldType fieldType = mapperService.fullName(fieldName); + assertNotNull("field " + fieldName + " doesn't exists on " + node, fieldType); } } assertMappingOnMaster(index, type, fieldNames); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java index 0158a556fa051..01f8f5ccdeff6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java @@ -60,7 +60,7 @@ public void testDLS() throws Exception { MapperService mapperService = mock(MapperService.class); ScriptService scriptService = mock(ScriptService.class); when(mapperService.docMappers(anyBoolean())).thenReturn(Collections.emptyList()); - when(mapperService.simpleMatchToIndexNames(anyString())) + when(mapperService.simpleMatchToFullName(anyString())) .then(invocationOnMock -> Collections.singletonList((String) invocationOnMock.getArguments()[0])); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); From 62bf33369c3d4ee46045fa3f93d20d8c905ad382 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 8 Jun 2018 21:19:16 -0400 Subject: [PATCH 19/24] Remove version from license file name for GCS SDK (#31221) Most of our license file names strip the version off the artifact name when deducing the license filename. However, the version on the GCS SDK (google-api-services-storage) does not match the usual format and instead starts with a vee. This means that the license filename for this license ended up carrying the version and we should not do that. This commit adjusts the regex the deduces the license filename to account for this case, and adjusts the google-api-services-storage license files accordingly. --- .../gradle/precommit/DependencyLicensesTask.groovy | 2 +- ...v115-LICENSE.txt => google-api-services-storage-LICENSE.txt} | 0 ...rev115-NOTICE.txt => google-api-services-storage-NOTICE.txt} | 0 3 files changed, 1 insertion(+), 1 deletion(-) rename plugins/repository-gcs/licenses/{google-api-services-storage-v1-rev115-LICENSE.txt => google-api-services-storage-LICENSE.txt} (100%) rename plugins/repository-gcs/licenses/{google-api-services-storage-v1-rev115-NOTICE.txt => google-api-services-storage-NOTICE.txt} (100%) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy index 4d292d87ec39c..df30326a59d79 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy @@ -151,7 +151,7 @@ public class DependencyLicensesTask extends DefaultTask { for (File dependency : dependencies) { String jarName = dependency.getName() - String depName = jarName - ~/\-\d+.*/ + String depName = jarName - ~/\-v?\d+.*/ if (ignoreShas.contains(depName)) { // local deps should not have sha files! if (getShaFile(jarName).exists()) { diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-LICENSE.txt b/plugins/repository-gcs/licenses/google-api-services-storage-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-LICENSE.txt rename to plugins/repository-gcs/licenses/google-api-services-storage-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-NOTICE.txt b/plugins/repository-gcs/licenses/google-api-services-storage-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-NOTICE.txt rename to plugins/repository-gcs/licenses/google-api-services-storage-NOTICE.txt From ea223c365d4033297a9717db6bb92f642f1c32fa Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Fri, 8 Jun 2018 17:19:41 -0600 Subject: [PATCH 20/24] Fully encapsulate LocalCheckpointTracker inside of the engine (#31213) * Fully encapsulate LocalCheckpointTracker inside of the engine This makes the Engine interface not expose the `LocalCheckpointTracker`, instead exposing the pieces needed (like retrieving the local checkpoint) as individual methods. --- .../elasticsearch/index/engine/Engine.java | 25 +++- .../index/engine/InternalEngine.java | 24 +++- .../elasticsearch/index/shard/IndexShard.java | 22 ++-- .../index/shard/LocalShardSnapshot.java | 2 +- .../cluster/routing/PrimaryAllocationIT.java | 3 +- .../index/engine/InternalEngineTests.java | 117 +++++++++--------- .../index/shard/IndexShardTests.java | 3 +- .../SharedClusterSnapshotRestoreIT.java | 3 +- .../index/engine/EngineTestCase.java | 9 ++ .../index/shard/IndexShardTestCase.java | 2 +- 10 files changed, 129 insertions(+), 81 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 2c91ef36bc764..2dc39f093a5ab 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -62,7 +62,7 @@ import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; @@ -635,11 +635,28 @@ public CommitStats commitStats() { } /** - * The sequence number service for this engine. + * @return the local checkpoint for this Engine + */ + public abstract long getLocalCheckpoint(); + + /** + * Waits for all operations up to the provided sequence number to complete. * - * @return the sequence number service + * @param seqNo the sequence number that the checkpoint must advance to before this method returns + * @throws InterruptedException if the thread was interrupted while blocking on the condition + */ + public abstract void waitForOpsToComplete(long seqNo) throws InterruptedException; + + /** + * Reset the local checkpoint in the tracker to the given local checkpoint + * @param localCheckpoint the new checkpoint to be set + */ + public abstract void resetLocalCheckpoint(long localCheckpoint); + + /** + * @return a {@link SeqNoStats} object, using local state and the supplied global checkpoint */ - public abstract LocalCheckpointTracker getLocalCheckpointTracker(); + public abstract SeqNoStats getSeqNoStats(long globalCheckpoint); /** * Returns the latest global checkpoint value that has been persisted in the underlying storage (i.e. translog's checkpoint) diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 897a41eddec46..9b2a109a0512b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -67,6 +67,7 @@ import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ElasticsearchMergePolicy; import org.elasticsearch.index.shard.ShardId; @@ -2234,10 +2235,31 @@ public MergeStats getMergeStats() { return mergeScheduler.stats(); } - public final LocalCheckpointTracker getLocalCheckpointTracker() { + // Used only for testing! Package private to prevent anyone else from using it + LocalCheckpointTracker getLocalCheckpointTracker() { return localCheckpointTracker; } + @Override + public long getLocalCheckpoint() { + return localCheckpointTracker.getCheckpoint(); + } + + @Override + public void waitForOpsToComplete(long seqNo) throws InterruptedException { + localCheckpointTracker.waitForOpsToComplete(seqNo); + } + + @Override + public void resetLocalCheckpoint(long localCheckpoint) { + localCheckpointTracker.resetCheckpoint(localCheckpoint); + } + + @Override + public SeqNoStats getSeqNoStats(long globalCheckpoint) { + return localCheckpointTracker.getStats(globalCheckpoint); + } + /** * Returns the number of times a version was looked up either from the index. * Note this is only available if assertions are enabled diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 9103a9dd27cad..a03def2cd572b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -407,7 +407,7 @@ public void updateShardState(final ShardRouting newRouting, final Engine engine = getEngine(); if (currentRouting.isRelocationTarget() == false || recoverySourceNode.getVersion().before(Version.V_6_0_0_alpha1)) { // there was no primary context hand-off in < 6.0.0, need to manually activate the shard - replicationTracker.activatePrimaryMode(getEngine().getLocalCheckpointTracker().getCheckpoint()); + replicationTracker.activatePrimaryMode(getEngine().getLocalCheckpoint()); } if (currentRouting.isRelocationTarget() == true && recoverySourceNode.getVersion().before(Version.V_6_0_0_alpha1)) { // Flush the translog as it may contain operations with no sequence numbers. We want to make sure those @@ -497,8 +497,7 @@ public void updateShardState(final ShardRouting newRouting, */ engine.rollTranslogGeneration(); engine.fillSeqNoGaps(newPrimaryTerm); - replicationTracker.updateLocalCheckpoint(currentRouting.allocationId().getId(), - getEngine().getLocalCheckpointTracker().getCheckpoint()); + replicationTracker.updateLocalCheckpoint(currentRouting.allocationId().getId(), getLocalCheckpoint()); primaryReplicaSyncer.accept(this, new ActionListener() { @Override public void onResponse(ResyncTask resyncTask) { @@ -524,7 +523,7 @@ public void onFailure(Exception e) { } }, e -> failShard("exception during primary term transition", e)); - replicationTracker.activatePrimaryMode(getEngine().getLocalCheckpointTracker().getCheckpoint()); + replicationTracker.activatePrimaryMode(getLocalCheckpoint()); primaryTerm = newPrimaryTerm; } } @@ -905,7 +904,7 @@ public CommitStats commitStats() { @Nullable public SeqNoStats seqNoStats() { Engine engine = getEngineOrNull(); - return engine == null ? null : engine.getLocalCheckpointTracker().getStats(replicationTracker.getGlobalCheckpoint()); + return engine == null ? null : engine.getSeqNoStats(replicationTracker.getGlobalCheckpoint()); } public IndexingStats indexingStats(String... types) { @@ -1742,7 +1741,7 @@ public void updateGlobalCheckpointForShard(final String allocationId, final long * @throws InterruptedException if the thread was interrupted while blocking on the condition */ public void waitForOpsToComplete(final long seqNo) throws InterruptedException { - getEngine().getLocalCheckpointTracker().waitForOpsToComplete(seqNo); + getEngine().waitForOpsToComplete(seqNo); } /** @@ -1775,7 +1774,7 @@ public void markAllocationIdAsInSync(final String allocationId, final long local * @return the local checkpoint */ public long getLocalCheckpoint() { - return getEngine().getLocalCheckpointTracker().getCheckpoint(); + return getEngine().getLocalCheckpoint(); } /** @@ -1816,7 +1815,7 @@ public void maybeSyncGlobalCheckpoint(final String reason) { return; } // only sync if there are not operations in flight - final SeqNoStats stats = getEngine().getLocalCheckpointTracker().getStats(replicationTracker.getGlobalCheckpoint()); + final SeqNoStats stats = getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint()); if (stats.getMaxSeqNo() == stats.getGlobalCheckpoint()) { final ObjectLongMap globalCheckpoints = getInSyncGlobalCheckpoints(); final String allocationId = routingEntry().allocationId().getId(); @@ -1853,7 +1852,7 @@ public ReplicationGroup getReplicationGroup() { */ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final String reason) { verifyReplicationTarget(); - final long localCheckpoint = getEngine().getLocalCheckpointTracker().getCheckpoint(); + final long localCheckpoint = getLocalCheckpoint(); if (globalCheckpoint > localCheckpoint) { /* * This can happen during recovery when the shard has started its engine but recovery is not finalized and is receiving global @@ -1882,8 +1881,7 @@ public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext p verifyPrimary(); assert shardRouting.isRelocationTarget() : "only relocation target can update allocation IDs from primary context: " + shardRouting; assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) && - getEngine().getLocalCheckpointTracker().getCheckpoint() == - primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); + getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); synchronized (mutex) { replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex } @@ -2269,7 +2267,7 @@ public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final operationPrimaryTerm, getLocalCheckpoint(), localCheckpoint); - getEngine().getLocalCheckpointTracker().resetCheckpoint(localCheckpoint); + getEngine().resetLocalCheckpoint(localCheckpoint); getEngine().rollTranslogGeneration(); }); globalCheckpointUpdated = true; diff --git a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java index d7105c0c14d38..09391c9bc9643 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java @@ -62,7 +62,7 @@ Index getIndex() { } long maxSeqNo() { - return shard.getEngine().getLocalCheckpointTracker().getMaxSeqNo(); + return shard.getEngine().getSeqNoStats(-1).getMaxSeqNo(); } long maxUnsafeAutoIdTimestamp() { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 407212936d1d6..90173455c3be3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; @@ -350,7 +351,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { assertThat(indexResult.getShardInfo().getSuccessful(), equalTo(numberOfReplicas + 1)); } final IndexShard oldPrimaryShard = internalCluster().getInstance(IndicesService.class, oldPrimary).getShardOrNull(shardId); - IndexShardTestCase.getEngine(oldPrimaryShard).getLocalCheckpointTracker().generateSeqNo(); // Make gap in seqno. + EngineTestCase.generateNewSeqNo(IndexShardTestCase.getEngine(oldPrimaryShard)); // Make gap in seqno. long moreDocs = scaledRandomIntBetween(1, 10); for (int i = 0; i < moreDocs; i++) { IndexResponse indexResult = index("test", "doc", Long.toString(numDocs + i)); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 19ccfe99bd94a..8f1d80d22dde1 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -225,7 +225,7 @@ public void testVersionMapAfterAutoIDDocument() throws IOException { new BytesArray("{}".getBytes(Charset.defaultCharset())), null); operation = randomBoolean() ? appendOnlyPrimary(doc, false, 1) - : appendOnlyReplica(doc, false, 1, engine.getLocalCheckpointTracker().generateSeqNo()); + : appendOnlyReplica(doc, false, 1, generateNewSeqNo(engine)); engine.index(operation); assertTrue("safe access should be required", engine.isSafeAccessRequired()); assertEquals(1, engine.getVersionMapSize()); // now we add this to the map @@ -1019,7 +1019,7 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { engine.index(indexForDoc(doc)); boolean inSync = randomBoolean(); if (inSync) { - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + globalCheckpoint.set(engine.getLocalCheckpoint()); } engine.flush(); @@ -1037,7 +1037,7 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 4L : 1L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(4L)); - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + globalCheckpoint.set(engine.getLocalCheckpoint()); engine.flush(true, true); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(5L)); assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(5L)); @@ -2083,12 +2083,12 @@ public void testSeqNoAndCheckpoints() throws IOException { final Engine.DeleteResult result = initialEngine.delete(delete); if (result.getResultType() == Engine.Result.Type.SUCCESS) { assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1)); - assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo + 1)); + assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo + 1)); indexedIds.remove(id); primarySeqNo++; } else { assertThat(result.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); - assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo)); + assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo)); } } else { // index a document @@ -2101,12 +2101,12 @@ public void testSeqNoAndCheckpoints() throws IOException { final Engine.IndexResult result = initialEngine.index(index); if (result.getResultType() == Engine.Result.Type.SUCCESS) { assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1)); - assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo + 1)); + assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo + 1)); indexedIds.add(id); primarySeqNo++; } else { assertThat(result.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); - assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo)); + assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo)); } } @@ -2115,7 +2115,7 @@ public void testSeqNoAndCheckpoints() throws IOException { replicaLocalCheckpoint = randomIntBetween(Math.toIntExact(replicaLocalCheckpoint), Math.toIntExact(primarySeqNo)); } gcpTracker.updateLocalCheckpoint(primary.allocationId().getId(), - initialEngine.getLocalCheckpointTracker().getCheckpoint()); + initialEngine.getLocalCheckpoint()); gcpTracker.updateLocalCheckpoint(replica.allocationId().getId(), replicaLocalCheckpoint); if (rarely()) { @@ -2128,8 +2128,8 @@ public void testSeqNoAndCheckpoints() throws IOException { logger.info("localcheckpoint {}, global {}", replicaLocalCheckpoint, primarySeqNo); globalCheckpoint = gcpTracker.getGlobalCheckpoint(); - assertEquals(primarySeqNo, initialEngine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(primarySeqNo, initialEngine.getLocalCheckpointTracker().getCheckpoint()); + assertEquals(primarySeqNo, initialEngine.getSeqNoStats(-1).getMaxSeqNo()); + assertEquals(primarySeqNo, initialEngine.getLocalCheckpoint()); assertThat(globalCheckpoint, equalTo(replicaLocalCheckpoint)); assertThat( @@ -2151,7 +2151,7 @@ public void testSeqNoAndCheckpoints() throws IOException { try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())){ recoveringEngine.recoverFromTranslog(); - assertEquals(primarySeqNo, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); + assertEquals(primarySeqNo, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertThat( Long.parseLong(recoveringEngine.commitStats().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), equalTo(primarySeqNo)); @@ -2164,9 +2164,9 @@ public void testSeqNoAndCheckpoints() throws IOException { // that the committed max seq no is equivalent to what the current primary seq no is, as all data // we have assigned sequence numbers to should be in the commit equalTo(primarySeqNo)); - assertThat(recoveringEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo(primarySeqNo)); - assertThat(recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo)); - assertThat(recoveringEngine.getLocalCheckpointTracker().generateSeqNo(), equalTo(primarySeqNo + 1)); + assertThat(recoveringEngine.getLocalCheckpoint(), equalTo(primarySeqNo)); + assertThat(recoveringEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo)); + assertThat(generateNewSeqNo(recoveringEngine), equalTo(primarySeqNo + 1)); } } @@ -2469,7 +2469,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { try (InternalEngine engine = createEngine(config)) { engine.index(firstIndexRequest); - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + globalCheckpoint.set(engine.getLocalCheckpoint()); expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); @@ -2632,7 +2632,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s engine.recoverFromTranslog(); final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc1)); - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + globalCheckpoint.set(engine.getLocalCheckpoint()); throwErrorOnCommit.set(true); FlushFailedEngineException e = expectThrows(FlushFailedEngineException.class, engine::flush); assertThat(e.getCause().getMessage(), equalTo("power's out")); @@ -2690,7 +2690,7 @@ private Path[] filterExtraFSFiles(Path[] files) { } public void testTranslogReplay() throws IOException { - final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getLocalCheckpointTracker().getCheckpoint(); + final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getLocalCheckpoint(); final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); @@ -3625,7 +3625,7 @@ private ToLongBiFunction getStallingSeqNoGenerator( final AtomicBoolean stall, final AtomicLong expectedLocalCheckpoint) { return (engine, operation) -> { - final long seqNo = engine.getLocalCheckpointTracker().generateSeqNo(); + final long seqNo = generateNewSeqNo(engine); final CountDownLatch latch = latchReference.get(); if (stall.get()) { try { @@ -3677,8 +3677,8 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro } } - assertThat(initialEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo(expectedLocalCheckpoint.get())); - assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo((long) (docs - 1))); + assertThat(initialEngine.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint.get())); + assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo((long) (docs - 1))); initialEngine.flush(true, true); latchReference.get().countDown(); @@ -3692,7 +3692,7 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { recoveringEngine.recoverFromTranslog(); recoveringEngine.fillSeqNoGaps(2); - assertThat(recoveringEngine.getLocalCheckpointTracker().getCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); + assertThat(recoveringEngine.getLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); } } @@ -3770,7 +3770,7 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio expectedLocalCheckpoint = numberOfOperations - 1; } - assertThat(engine.getLocalCheckpointTracker().getCheckpoint(), equalTo(expectedLocalCheckpoint)); + assertThat(engine.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "type", "2", uid), searcherFactory)) { assertThat(result.exists(), equalTo(exists)); } @@ -3800,11 +3800,11 @@ protected long doGenerateSeqNoForOperation(Operation operation) { final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); final String reason = randomAlphaOfLength(16); noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason)); - assertThat(noOpEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) (maxSeqNo + 1))); + assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled)); noOpEngine.noOp( new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason)); - assertThat(noOpEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) (maxSeqNo + 2))); + assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 2))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled + 1)); // skip to the op that we added to the translog Translog.Operation op; @@ -3957,17 +3957,17 @@ public void markSeqNoAsCompleted(long seqNo) { actualEngine.rollTranslogGeneration(); } } - final long currentLocalCheckpoint = actualEngine.getLocalCheckpointTracker().getCheckpoint(); + final long currentLocalCheckpoint = actualEngine.getLocalCheckpoint(); final long resetLocalCheckpoint = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Math.toIntExact(currentLocalCheckpoint)); - actualEngine.getLocalCheckpointTracker().resetCheckpoint(resetLocalCheckpoint); + actualEngine.resetLocalCheckpoint(resetLocalCheckpoint); completedSeqNos.clear(); actualEngine.restoreLocalCheckpointFromTranslog(); final Set intersection = new HashSet<>(expectedCompletedSeqNos); intersection.retainAll(LongStream.range(resetLocalCheckpoint + 1, operations).boxed().collect(Collectors.toSet())); assertThat(completedSeqNos, equalTo(intersection)); - assertThat(actualEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo(currentLocalCheckpoint)); - assertThat(actualEngine.getLocalCheckpointTracker().generateSeqNo(), equalTo((long) operations)); + assertThat(actualEngine.getLocalCheckpoint(), equalTo(currentLocalCheckpoint)); + assertThat(generateNewSeqNo(actualEngine), equalTo((long) operations)); } finally { IOUtils.close(actualEngine); } @@ -3991,7 +3991,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { replicaEngine.index(replicaIndexForDoc(doc, 1, indexResult.getSeqNo(), false)); } } - checkpointOnReplica = replicaEngine.getLocalCheckpointTracker().getCheckpoint(); + checkpointOnReplica = replicaEngine.getLocalCheckpoint(); } finally { IOUtils.close(replicaEngine); } @@ -4001,16 +4001,16 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); Engine recoveringEngine = null; try { - assertEquals(docs - 1, engine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(docs - 1, engine.getLocalCheckpointTracker().getCheckpoint()); - assertEquals(maxSeqIDOnReplica, replicaEngine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpointTracker().getCheckpoint()); + assertEquals(docs - 1, engine.getSeqNoStats(-1).getMaxSeqNo()); + assertEquals(docs - 1, engine.getLocalCheckpoint()); + assertEquals(maxSeqIDOnReplica, replicaEngine.getSeqNoStats(-1).getMaxSeqNo()); + assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpoint()); trimUnsafeCommits(copy(replicaEngine.config(), globalCheckpoint::get)); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().stats().getUncommittedOperations()); recoveringEngine.recoverFromTranslog(); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpointTracker().getCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); + assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpoint()); assertEquals((maxSeqIDOnReplica + 1) - numDocsOnReplica, recoveringEngine.fillSeqNoGaps(2)); // now snapshot the tlog and ensure the primary term is updated @@ -4025,10 +4025,10 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { } } - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); if ((flushed = randomBoolean())) { - globalCheckpoint.set(recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); + globalCheckpoint.set(recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); recoveringEngine.getTranslog().sync(); recoveringEngine.flush(true, true); } @@ -4045,11 +4045,11 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { assertThat(recoveringEngine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); } recoveringEngine.recoverFromTranslog(); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); assertEquals(0, recoveringEngine.fillSeqNoGaps(3)); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); } finally { IOUtils.close(recoveringEngine); } @@ -4232,7 +4232,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s // Advance the global checkpoint during the flush to create a lag between a persisted global checkpoint in the translog // (this value is visible to the deletion policy) and an in memory global checkpoint in the SequenceNumbersService. if (rarely()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), getLocalCheckpointTracker().getCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), getLocalCheckpoint())); } super.commitIndexWriter(writer, translog, syncId); } @@ -4244,7 +4244,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); engine.index(indexForDoc(testParsedDocument(Integer.toString(docId), null, document, B_1, null))); if (frequently()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); engine.getTranslog().sync(); } if (frequently()) { @@ -4378,11 +4378,11 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { engine.flush(false, randomBoolean()); List commits = DirectoryReader.listCommits(store.directory()); // Global checkpoint advanced but not enough - all commits are kept. - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint() - 1)); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint() - 1)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), equalTo(commits)); // Global checkpoint advanced enough - only the last commit is kept. - globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpointTracker().getCheckpoint(), Long.MAX_VALUE)); + globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpoint(), Long.MAX_VALUE)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); } @@ -4406,7 +4406,7 @@ public void testCleanupCommitsWhenReleaseSnapshot() throws Exception { for (int i = 0; i < numSnapshots; i++) { snapshots.add(engine.acquireSafeIndexCommit()); // taking snapshots from the safe commit. } - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + globalCheckpoint.set(engine.getLocalCheckpoint()); engine.syncTranslog(); final List commits = DirectoryReader.listCommits(store.directory()); for (int i = 0; i < numSnapshots - 1; i++) { @@ -4456,13 +4456,13 @@ public void testShouldPeriodicallyFlush() throws Exception { assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); // If the new index commit still points to the same translog generation as the current index commit, // we should not enable the periodically flush condition; otherwise we can get into an infinite loop of flushes. - engine.getLocalCheckpointTracker().generateSeqNo(); // create a gap here + generateNewSeqNo(engine); // create a gap here for (int id = 0; id < numDocs; id++) { if (randomBoolean()) { translog.rollGeneration(); } final ParsedDocument doc = testParsedDocument("new" + id, null, testDocumentWithTextField(), SOURCE, null); - engine.index(replicaIndexForDoc(doc, 2L, engine.getLocalCheckpointTracker().generateSeqNo(), false)); + engine.index(replicaIndexForDoc(doc, 2L, generateNewSeqNo(engine), false)); if (engine.shouldPeriodicallyFlush()) { engine.flush(); assertThat(engine.getLastCommittedSegmentInfos(), not(sameInstance(lastCommitInfo))); @@ -4483,7 +4483,7 @@ public void testStressShouldPeriodicallyFlush() throws Exception { engine.onSettingsChanged(); final int numOps = scaledRandomIntBetween(100, 10_000); for (int i = 0; i < numOps; i++) { - final long localCheckPoint = engine.getLocalCheckpointTracker().getCheckpoint(); + final long localCheckPoint = engine.getLocalCheckpoint(); final long seqno = randomLongBetween(Math.max(0, localCheckPoint), localCheckPoint + 5); final ParsedDocument doc = testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null); engine.index(replicaIndexForDoc(doc, 1L, seqno, false)); @@ -4570,9 +4570,9 @@ public void testPruneOnlyDeletesAtMostLocalCheckpoint() throws Exception { } final long deleteBatch = between(10, 20); final long gapSeqNo = randomLongBetween( - engine.getLocalCheckpointTracker().getMaxSeqNo() + 1, engine.getLocalCheckpointTracker().getMaxSeqNo() + deleteBatch); + engine.getSeqNoStats(-1).getMaxSeqNo() + 1, engine.getSeqNoStats(-1).getMaxSeqNo() + deleteBatch); for (int i = 0; i < deleteBatch; i++) { - final long seqno = engine.getLocalCheckpointTracker().generateSeqNo(); + final long seqno = generateNewSeqNo(engine); if (seqno != gapSeqNo) { if (randomBoolean()) { clock.incrementAndGet(); @@ -4619,7 +4619,7 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument("append-only" + i, null, testDocumentWithTextField(), SOURCE, null); if (randomBoolean()) { - engine.index(appendOnlyReplica(doc, randomBoolean(), 1, engine.getLocalCheckpointTracker().generateSeqNo())); + engine.index(appendOnlyReplica(doc, randomBoolean(), 1, generateNewSeqNo(engine))); } else { engine.index(appendOnlyPrimary(doc, randomBoolean(), randomNonNegativeLong())); } @@ -4636,7 +4636,7 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { for (int i = 0; i < numOps; i++) { ParsedDocument parsedDocument = testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), SOURCE, null); if (randomBoolean()) { // On replica - update max_seqno for non-append-only operations - final long seqno = engine.getLocalCheckpointTracker().generateSeqNo(); + final long seqno = generateNewSeqNo(engine); final Engine.Index doc = replicaIndexForDoc(parsedDocument, 1, seqno, randomBoolean()); if (randomBoolean()) { engine.index(doc); @@ -4655,7 +4655,7 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { } appendOnlyIndexer.join(120_000); assertThat(engine.getMaxSeqNoOfNonAppendOnlyOperations(), equalTo(maxSeqNoOfNonAppendOnly)); - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + globalCheckpoint.set(engine.getLocalCheckpoint()); engine.syncTranslog(); engine.flush(); } @@ -4667,15 +4667,14 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { public void testSkipOptimizeForExposedAppendOnlyOperations() throws Exception { long lookupTimes = 0L; - final LocalCheckpointTracker localCheckpointTracker = engine.getLocalCheckpointTracker(); final int initDocs = between(0, 10); for (int i = 0; i < initDocs; i++) { index(engine, i); lookupTimes++; } // doc1 is delayed and arrived after a non-append-only op. - final long seqNoAppendOnly1 = localCheckpointTracker.generateSeqNo(); - final long seqnoNormalOp = localCheckpointTracker.generateSeqNo(); + final long seqNoAppendOnly1 = generateNewSeqNo(engine); + final long seqnoNormalOp = generateNewSeqNo(engine); if (randomBoolean()) { engine.index(replicaIndexForDoc( testParsedDocument("d", null, testDocumentWithTextField(), SOURCE, null), 1, seqnoNormalOp, false)); @@ -4694,7 +4693,7 @@ public void testSkipOptimizeForExposedAppendOnlyOperations() throws Exception { // optimize for other append-only 2 (its seqno > max_seqno of non-append-only) - do not look up in version map. engine.index(appendOnlyReplica(testParsedDocument("append-only-2", null, testDocumentWithTextField(), SOURCE, null), - false, randomNonNegativeLong(), localCheckpointTracker.generateSeqNo())); + false, randomNonNegativeLong(), generateNewSeqNo(engine))); assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index a3a4101d29c00..8a3cf73554d1b 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -73,6 +73,7 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.Segment; @@ -845,7 +846,7 @@ public void testGlobalCheckpointSync() throws IOException { recoverReplica(replicaShard, primaryShard); final int maxSeqNo = randomIntBetween(0, 128); for (int i = 0; i <= maxSeqNo; i++) { - primaryShard.getEngine().getLocalCheckpointTracker().generateSeqNo(); + EngineTestCase.generateNewSeqNo(primaryShard.getEngine()); } final long checkpoint = rarely() ? maxSeqNo - scaledRandomIntBetween(0, maxSeqNo) : maxSeqNo; diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 47f1fbbdd633a..69daa2be26feb 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -77,6 +77,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; @@ -3334,7 +3335,7 @@ public void testSnapshottingWithMissingSequenceNumbers() { final Index index = resolveIndex(indexName); final IndexShard primary = internalCluster().getInstance(IndicesService.class, dataNode).getShardOrNull(new ShardId(index, 0)); // create a gap in the sequence numbers - getEngineFromShard(primary).getLocalCheckpointTracker().generateSeqNo(); + EngineTestCase.generateNewSeqNo(getEngineFromShard(primary)); for (int i = 5; i < 10; i++) { index(indexName, "doc", Integer.toString(i), "foo", "bar" + i); diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 8fff17900b072..0d5e693d62da6 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -386,6 +386,15 @@ public interface IndexWriterFactory { IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException; } + /** + * Generate a new sequence number and return it. Only works on InternalEngines + */ + public static long generateNewSeqNo(final Engine engine) { + assert engine instanceof InternalEngine : "expected InternalEngine, got: " + engine.getClass(); + InternalEngine internalEngine = (InternalEngine) engine; + return internalEngine.getLocalCheckpointTracker().generateSeqNo(); + } + public static InternalEngine createInternalEngine( @Nullable final IndexWriterFactory indexWriterFactory, @Nullable final BiFunction localCheckpointTrackerSupplier, diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 6bd09e6933998..81d6ab6ca5238 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -572,7 +572,7 @@ protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); } shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), - shard.getEngine().getLocalCheckpointTracker().getCheckpoint()); + shard.getLocalCheckpoint()); } else { result = shard.applyIndexOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); From e6303e46358ec95b5a7430644d151d9309da8203 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 9 Jun 2018 07:28:41 -0400 Subject: [PATCH 21/24] Fix unknown licenses (#31223) The goal of this commit is to address unknown licenses when producing the dependencies info report. We have two different checks that we run on licenses. The first check is whether or not we have stashed a copy of the license text for a dependency in the repository. The second is to map every dependency to a license type (e.g., BSD 3-clause). The problem here is that the way we were handling licenses in the second check differs from how we handle licenses in the first check. The first check works by finding a license file with the name of the artifact followed by the text -LICENSE.txt. Yet in some cases we allow mapping an artifact name to another name used to check for the license (e.g., we map lucene-.* to lucene, and opensaml-.* to shibboleth. The second check understood the first way of looking for a license file but not the second way. So in this commit we teach the second check about the mappings from artifact names to license names. We do this by copying the configuration from the dependencyLicenses task to the dependenciesInfo task and then reusing the code from the first check in the second check. There were some other challenges here though. For example, dependenciesInfo was checking too many dependencies. For now, we should only be checking direct dependencies and leaving transitive dependencies from another org.elasticsearch artifact to that artifact (we want to do this differently in a follow-up). We also want to disable dependenciesInfo for projects that we do not publish, users only care about licenses they might be exposed to if they use our assembled products. With all of the changes in this commit we have eliminated all unknown licenses. A follow-up will enforce that when we add a new dependency it does not get mapped to unknown, these will be forbidden in the future. Therefore, with this change and earlier changes are left having no unknown licenses and two custom licenses; custom here means it does not map to an SPDX license type. Those two licenses are xz and ldapsdk. A future change will not allow additional custom licenses unless they are explicitly whitelisted. This ensures that if a new dependency is added it is mapped to an SPDX license or mapped to custom because it does not have an SPDX license. --- build.gradle | 7 ++- .../elasticsearch/gradle/BuildPlugin.groovy | 6 ++- .../gradle/DependenciesInfoTask.groovy | 47 +++++++++++++++---- .../precommit/DependencyLicensesTask.groovy | 35 ++++++++------ .../build.gradle | 3 ++ distribution/build.gradle | 5 ++ test/fixtures/example-fixture/build.gradle | 2 + x-pack/qa/build.gradle | 4 ++ x-pack/qa/sql/build.gradle | 1 + x-pack/test/feature-aware/build.gradle | 1 + 10 files changed, 87 insertions(+), 24 deletions(-) diff --git a/build.gradle b/build.gradle index fd7ac8ebbeec8..85567aed2ce5f 100644 --- a/build.gradle +++ b/build.gradle @@ -537,7 +537,7 @@ subprojects { project -> } } -/* Remove assemble on all qa projects because we don't need to publish +/* Remove assemble/dependenciesInfo on all qa projects because we don't need to publish * artifacts for them. */ gradle.projectsEvaluated { subprojects { @@ -547,6 +547,11 @@ gradle.projectsEvaluated { project.tasks.remove(assemble) project.build.dependsOn.remove('assemble') } + Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') + if (dependenciesInfo) { + project.tasks.remove(dependenciesInfo) + project.precommit.dependsOn.remove('dependenciesInfo') + } } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 9cc5bb82552ab..085c4c5cbc7ec 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -761,6 +761,10 @@ class BuildPlugin implements Plugin { private static configureDependenciesInfo(Project project) { Task deps = project.tasks.create("dependenciesInfo", DependenciesInfoTask.class) - deps.dependencies = project.configurations.compile.allDependencies + deps.runtimeConfiguration = project.configurations.runtime + deps.compileOnlyConfiguration = project.configurations.compileOnly + project.afterEvaluate { + deps.mappings = project.dependencyLicenses.mappings + } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy index b42e6cc8e3caa..e62fe4db954c5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy @@ -19,14 +19,19 @@ package org.elasticsearch.gradle +import org.elasticsearch.gradle.precommit.DependencyLicensesTask import org.gradle.api.DefaultTask +import org.gradle.api.artifacts.Configuration import org.gradle.api.artifacts.Dependency +import org.gradle.api.artifacts.DependencyResolutionListener import org.gradle.api.artifacts.DependencySet import org.gradle.api.tasks.Input import org.gradle.api.tasks.InputDirectory import org.gradle.api.tasks.OutputFile import org.gradle.api.tasks.TaskAction +import java.util.regex.Matcher +import java.util.regex.Pattern /** * A task to gather information about the dependencies and export them into a csv file. @@ -44,7 +49,14 @@ public class DependenciesInfoTask extends DefaultTask { /** Dependencies to gather information from. */ @Input - public DependencySet dependencies + public Configuration runtimeConfiguration + + /** We subtract compile-only dependencies. */ + @Input + public Configuration compileOnlyConfiguration + + @Input + public LinkedHashMap mappings /** Directory to read license files */ @InputDirectory @@ -59,15 +71,34 @@ public class DependenciesInfoTask extends DefaultTask { @TaskAction public void generateDependenciesInfo() { + + final DependencySet runtimeDependencies = runtimeConfiguration.getAllDependencies() + // we have to resolve the transitive dependencies and create a group:artifactId:version map + final Set compileOnlyArtifacts = + compileOnlyConfiguration + .getResolvedConfiguration() + .resolvedArtifacts + .collect { it -> "${it.moduleVersion.id.group}:${it.moduleVersion.id.name}:${it.moduleVersion.id.version}" } + final StringBuilder output = new StringBuilder() - for (Dependency dependency : dependencies) { - // Only external dependencies are checked - if (dependency.group != null && dependency.group.contains("elasticsearch") == false) { - final String url = createURL(dependency.group, dependency.name, dependency.version) - final String licenseType = getLicenseType(dependency.group, dependency.name) - output.append("${dependency.group}:${dependency.name},${dependency.version},${url},${licenseType}\n") + for (final Dependency dependency : runtimeDependencies) { + // we do not need compile-only dependencies here + if (compileOnlyArtifacts.contains("${dependency.group}:${dependency.name}:${dependency.version}")) { + continue + } + // only external dependencies are checked + if (dependency.group != null && dependency.group.contains("org.elasticsearch")) { + continue } + + final String url = createURL(dependency.group, dependency.name, dependency.version) + final String dependencyName = DependencyLicensesTask.getDependencyName(mappings, dependency.name) + logger.info("mapped dependency ${dependency.group}:${dependency.name} to ${dependencyName} for license info") + + final String licenseType = getLicenseType(dependency.group, dependencyName) + output.append("${dependency.group}:${dependency.name},${dependency.version},${url},${licenseType}\n") + } outputFile.setText(output.toString(), 'UTF-8') } @@ -173,7 +204,7 @@ are met: derived from this software without specific prior written permission\\.| (3\\.)? Neither the name of .+ nor the names of its contributors may be used to endorse or promote products derived from - this software without specific prior written permission\\.) + this software without specific prior written permission\\.) THIS SOFTWARE IS PROVIDED BY .+ (``|''|")AS IS(''|") AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy index df30326a59d79..04fb023e2051a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy @@ -109,6 +109,10 @@ public class DependencyLicensesTask extends DefaultTask { mappings.put(from, to) } + public LinkedHashMap getMappings() { + return new LinkedHashMap<>(mappings) + } + /** * Add a rule which will skip SHA checking for the given dependency name. This should be used for * locally build dependencies, which cause the sha to change constantly. @@ -129,10 +133,6 @@ public class DependencyLicensesTask extends DefaultTask { throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies") } - - // order is the same for keys and values iteration since we use a linked hashmap - List mapped = new ArrayList<>(mappings.values()) - Pattern mappingsPattern = Pattern.compile('(' + mappings.keySet().join(')|(') + ')') Map licenses = new HashMap<>() Map notices = new HashMap<>() Set shaFiles = new HashSet() @@ -162,16 +162,10 @@ public class DependencyLicensesTask extends DefaultTask { checkSha(dependency, jarName, shaFiles) } - logger.info("Checking license/notice for " + depName) - Matcher match = mappingsPattern.matcher(depName) - if (match.matches()) { - int i = 0 - while (i < match.groupCount() && match.group(i + 1) == null) ++i; - logger.info("Mapped dependency name ${depName} to ${mapped.get(i)} for license check") - depName = mapped.get(i) - } - checkFile(depName, jarName, licenses, 'LICENSE') - checkFile(depName, jarName, notices, 'NOTICE') + final String dependencyName = getDependencyName(mappings, depName) + logger.info("mapped dependency name ${depName} to ${dependencyName} for license/notice check") + checkFile(dependencyName, jarName, licenses, 'LICENSE') + checkFile(dependencyName, jarName, notices, 'NOTICE') } licenses.each { license, count -> @@ -189,6 +183,19 @@ public class DependencyLicensesTask extends DefaultTask { } } + public static String getDependencyName(final LinkedHashMap mappings, final String dependencyName) { + // order is the same for keys and values iteration since we use a linked hashmap + List mapped = new ArrayList<>(mappings.values()) + Pattern mappingsPattern = Pattern.compile('(' + mappings.keySet().join(')|(') + ')') + Matcher match = mappingsPattern.matcher(dependencyName) + if (match.matches()) { + int i = 0 + while (i < match.groupCount() && match.group(i + 1) == null) ++i; + return mapped.get(i) + } + return dependencyName + } + private File getShaFile(String jarName) { return new File(licensesDir, jarName + SHA_EXTENSION) } diff --git a/client/client-benchmark-noop-api-plugin/build.gradle b/client/client-benchmark-noop-api-plugin/build.gradle index bee41034c3ce5..cc84207d90d22 100644 --- a/client/client-benchmark-noop-api-plugin/build.gradle +++ b/client/client-benchmark-noop-api-plugin/build.gradle @@ -31,6 +31,9 @@ esplugin { tasks.remove(assemble) build.dependsOn.remove('assemble') +dependencyLicenses.enabled = false +dependenciesInfo.enabled = false + compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" // no unit tests diff --git a/distribution/build.gradle b/distribution/build.gradle index fa62513a54069..7f08c244f456d 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -32,6 +32,11 @@ Collection distributions = project('archives').subprojects + project('packages') // Concatenates the dependencies CSV files into a single file task generateDependenciesReport(type: ConcatFilesTask) { + project.rootProject.allprojects { + afterEvaluate { + if (it.tasks.findByName("dependenciesInfo")) dependsOn it.tasks.dependenciesInfo + } + } files = fileTree(dir: project.rootDir, include: '**/dependencies.csv' ) headerLine = "name,version,url,license" target = new File(System.getProperty('csv')?: "${project.buildDir}/dependencies/es-dependencies.csv") diff --git a/test/fixtures/example-fixture/build.gradle b/test/fixtures/example-fixture/build.gradle index 225a2cf9deba6..ce562e89abb7f 100644 --- a/test/fixtures/example-fixture/build.gradle +++ b/test/fixtures/example-fixture/build.gradle @@ -22,3 +22,5 @@ test.enabled = false // Not published so no need to assemble tasks.remove(assemble) build.dependsOn.remove('assemble') + +dependenciesInfo.enabled = false diff --git a/x-pack/qa/build.gradle b/x-pack/qa/build.gradle index 1570b218592fe..24b6618b7d8f6 100644 --- a/x-pack/qa/build.gradle +++ b/x-pack/qa/build.gradle @@ -28,5 +28,9 @@ gradle.projectsEvaluated { project.tasks.remove(assemble) project.build.dependsOn.remove('assemble') } + Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') + if (dependenciesInfo) { + project.precommit.dependsOn.remove('dependenciesInfo') + } } } diff --git a/x-pack/qa/sql/build.gradle b/x-pack/qa/sql/build.gradle index a3c147bbc04fc..0bea3a9364b71 100644 --- a/x-pack/qa/sql/build.gradle +++ b/x-pack/qa/sql/build.gradle @@ -22,6 +22,7 @@ dependencies { test.enabled = false dependencyLicenses.enabled = false +dependenciesInfo.enabled = false // the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { diff --git a/x-pack/test/feature-aware/build.gradle b/x-pack/test/feature-aware/build.gradle index 217ed25a2d4b1..11b0e67183c8f 100644 --- a/x-pack/test/feature-aware/build.gradle +++ b/x-pack/test/feature-aware/build.gradle @@ -10,6 +10,7 @@ dependencies { forbiddenApisMain.enabled = true dependencyLicenses.enabled = false +dependenciesInfo.enabled = false jarHell.enabled = false From 219a3d7a8d08fa5ac129760b1b2345e8fcb64138 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 9 Jun 2018 09:46:57 -0400 Subject: [PATCH 22/24] Add recognition of MPL 2.0 (#31226) This commit adds the ability for the dependencies info check to recognize version 2.0 of the Mozilla Public License. --- .../org/elasticsearch/gradle/DependenciesInfoTask.groovy | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy index e62fe4db954c5..13e457c031706 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy @@ -245,6 +245,8 @@ SOFTWARE\\. final String MOZILLA_1_1 = "Mozilla Public License.*Version 1.1" + final String MOZILLA_2_0 = "Mozilla\\s*Public\\s*License\\s*Version\\s*2\\.0" + switch (licenseText) { case ~/.*${APACHE_2_0}.*/: spdx = 'Apache-2.0' @@ -273,6 +275,9 @@ SOFTWARE\\. case ~/.*${MOZILLA_1_1}.*/: spdx = 'MPL-1.1' break + case ~/.*${MOZILLA_2_0}.*/: + spdx = 'MPL-2.0' + break default: break } From e371df285697fd91fdd66e63a42d8da4740fc6af Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 9 Jun 2018 09:50:24 -0400 Subject: [PATCH 23/24] Remove dependencies report task dependencies (#31227) A previous commit tried to add task dependencies for the :distribution:generateDependenciesReport task so that a user did not have to run "dependenciesInfo :distribution:generateDependenciesReport". However this method did not reliably add all task dependencies due to task ordering issues in previous versions of Gradle and our build. This commit removes this for now and a user will continue to have to run "dependenciesInfo :distribution:generateDependenciesReport". --- distribution/build.gradle | 5 ----- 1 file changed, 5 deletions(-) diff --git a/distribution/build.gradle b/distribution/build.gradle index 7f08c244f456d..fa62513a54069 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -32,11 +32,6 @@ Collection distributions = project('archives').subprojects + project('packages') // Concatenates the dependencies CSV files into a single file task generateDependenciesReport(type: ConcatFilesTask) { - project.rootProject.allprojects { - afterEvaluate { - if (it.tasks.findByName("dependenciesInfo")) dependsOn it.tasks.dependenciesInfo - } - } files = fileTree(dir: project.rootDir, include: '**/dependencies.csv' ) headerLine = "name,version,url,license" target = new File(System.getProperty('csv')?: "${project.buildDir}/dependencies/es-dependencies.csv") From 33d59846b5df14d5660bf6775102249b8f9bc001 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 9 Jun 2018 09:50:36 -0400 Subject: [PATCH 24/24] Move default location of dependencies report (#31228) This commit moves the default location of the full dependencies report to be under the reports directory to align it with the location for the dependenciesInfo task output. --- distribution/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distribution/build.gradle b/distribution/build.gradle index fa62513a54069..068c8da480f11 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -34,7 +34,7 @@ Collection distributions = project('archives').subprojects + project('packages') task generateDependenciesReport(type: ConcatFilesTask) { files = fileTree(dir: project.rootDir, include: '**/dependencies.csv' ) headerLine = "name,version,url,license" - target = new File(System.getProperty('csv')?: "${project.buildDir}/dependencies/es-dependencies.csv") + target = new File(System.getProperty('csv')?: "${project.buildDir}/reports/dependencies/es-dependencies.csv") } /*****************************************************************************