diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 03fa840f57b5e..86525912ec0a9 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -214,7 +214,7 @@ public IndexShard(ShardId shardId, @IndexSettings Settings indexSettings, IndexS this.mergeInterval = indexSettings.getAsTime("index.merge.async_interval", TimeValue.timeValueSeconds(1)); /* create engine config */ this.config = new EngineConfig(shardId, - indexSettings.getAsBoolean(EngineConfig.INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING, true), + indexSettings.getAsBoolean(EngineConfig.INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING, false), threadPool,indexingService,indexSettingsService, warmer, store, deletionPolicy, translog, mergePolicyProvider, mergeScheduler, analysisService.defaultIndexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener); diff --git a/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java index c34ae77a17479..1cd81af05ca68 100644 --- a/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java @@ -30,8 +30,10 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.*; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.IOContext; +import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.elasticsearch.ExceptionsHelper; @@ -53,6 +55,7 @@ import org.elasticsearch.index.engine.*; import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; @@ -238,7 +241,7 @@ protected Engine createEngine(IndexSettingsService indexSettingsService, Store s public EngineConfig config(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) { IndexWriterConfig iwc = newIndexWriterConfig(Lucene.STANDARD_ANALYZER); - EngineConfig config = new EngineConfig(shardId, true, threadPool, new ShardIndexingService(shardId, EMPTY_SETTINGS, new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)), indexSettingsService + EngineConfig config = new EngineConfig(shardId, false/*per default optimization for auto generated ids is disabled*/, threadPool, new ShardIndexingService(shardId, EMPTY_SETTINGS, new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)), indexSettingsService , null, store, createSnapshotDeletionPolicy(), translog, createMergePolicy(), mergeSchedulerProvider, iwc.getAnalyzer(), iwc.getSimilarity() , new CodecService(shardId.index()), new Engine.FailedEngineListener() { @Override @@ -1522,4 +1525,88 @@ public void testSettings() { assertEquals(currentIndexWriterConfig.getCheckIntegrityAtMerge(), checksumOnMerge); } } + + @Test + public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOException { + + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false); + boolean canHaveDuplicates = false; + boolean autoGeneratedId = true; + + Engine.Create index = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId); + engine.create(index); + assertThat(index.version(), equalTo(1l)); + + index = new Engine.Create(null, newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), canHaveDuplicates, autoGeneratedId); + replicaEngine.create(index); + assertThat(index.version(), equalTo(1l)); + + canHaveDuplicates = true; + index = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId); + engine.create(index); + assertThat(index.version(), equalTo(1l)); + engine.refresh("test", true); + Engine.Searcher searcher = engine.acquireSearcher("test"); + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertThat(topDocs.totalHits, equalTo(1)); + + index = new Engine.Create(null, newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), canHaveDuplicates, autoGeneratedId); + try { + replicaEngine.create(index); + fail(); + } catch (VersionConflictEngineException e) { + // we ignore version conflicts on replicas, see TransportShardReplicationOperationAction.ignoreReplicaException + } + replicaEngine.refresh("test", true); + Engine.Searcher replicaSearcher = replicaEngine.acquireSearcher("test"); + topDocs = replicaSearcher.searcher().search(new MatchAllDocsQuery(), 10); + assertThat(topDocs.totalHits, equalTo(1)); + searcher.close(); + replicaSearcher.close(); + } + + @Test + public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() throws IOException { + + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false); + boolean canHaveDuplicates = true; + boolean autoGeneratedId = true; + + Engine.Create firstIndexRequest = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId); + engine.create(firstIndexRequest); + assertThat(firstIndexRequest.version(), equalTo(1l)); + + Engine.Create firstIndexRequestReplica = new Engine.Create(null, newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), canHaveDuplicates, autoGeneratedId); + replicaEngine.create(firstIndexRequestReplica); + assertThat(firstIndexRequestReplica.version(), equalTo(1l)); + + canHaveDuplicates = false; + Engine.Create secondIndexRequest = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId); + try { + engine.create(secondIndexRequest); + fail(); + } catch (DocumentAlreadyExistsException e) { + // we can ignore the exception. In case this happens because the retry request arrived first then this error will not be sent back anyway. + // in any other case this is an actual error + } + engine.refresh("test", true); + Engine.Searcher searcher = engine.acquireSearcher("test"); + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertThat(topDocs.totalHits, equalTo(1)); + + Engine.Create secondIndexRequestReplica = new Engine.Create(null, newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), canHaveDuplicates, autoGeneratedId); + try { + replicaEngine.create(secondIndexRequestReplica); + fail(); + } catch (VersionConflictEngineException e) { + // we ignore version conflicts on replicas, see TransportShardReplicationOperationAction.ignoreReplicaException. + } + replicaEngine.refresh("test", true); + Engine.Searcher replicaSearcher = replicaEngine.acquireSearcher("test"); + topDocs = replicaSearcher.searcher().search(new MatchAllDocsQuery(), 10); + assertThat(topDocs.totalHits, equalTo(1)); + searcher.close(); + replicaSearcher.close(); + } + } diff --git a/src/test/java/org/elasticsearch/index/store/ExceptionRetryTests.java b/src/test/java/org/elasticsearch/index/store/ExceptionRetryTests.java index b9455dd1a4dc6..e3248c37231f5 100644 --- a/src/test/java/org/elasticsearch/index/store/ExceptionRetryTests.java +++ b/src/test/java/org/elasticsearch/index/store/ExceptionRetryTests.java @@ -67,7 +67,6 @@ protected Settings nodeSettings(int nodeOrdinal) { * see https://github.com/elasticsearch/elasticsearch/issues/8788 */ @Test - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/8788") public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, InterruptedException, IOException { final AtomicBoolean exceptionThrown = new AtomicBoolean(false); int numDocs = scaledRandomIntBetween(100, 1000);