indices, long
*
* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check
* {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted.
- * @param shard shard to be snapshotted
+ * @param indexShard the shard to be snapshotted
+ * @param snapshotId snapshot id
+ * @param indexId id for the index being snapshotted
+ * @param snapshotIndexCommit commit point
+ * @param snapshotStatus snapshot status
+ * @deprecated use {@link #snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} instead
+ */
+ @Deprecated
+ default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
+ IndexShardSnapshotStatus snapshotStatus) {
+ snapshotShard(indexShard.store(), indexShard.mapperService(), snapshotId, indexId, snapshotIndexCommit, snapshotStatus);
+ }
+
+ /**
+ * Creates a snapshot of the shard based on the index commit point.
+ *
+ * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireLastIndexCommit} method.
+ * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller.
+ *
+ * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check
+ * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted.
* @param store store to be snapshotted
+ * @param mapperService the shards mapper service
* @param snapshotId snapshot id
* @param indexId id for the index being snapshotted
* @param snapshotIndexCommit commit point
* @param snapshotStatus snapshot status
*/
- void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
+ void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
IndexShardSnapshotStatus snapshotStatus);
/**
* Restores snapshot of the shard.
*
* The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied.
- *
* @param shard the shard to restore the index into
+ * @param store the store to restore the index into
+ * @param snapshotId snapshot id
+ * @param version version of elasticsearch that created this snapshot
+ * @param indexId id of the index in the repository from which the restore is occurring
+ * @param snapshotShardId shard id (in the snapshot)
+ * @param recoveryState recovery state
+ * @deprecated use {@link #restoreShard(Store, SnapshotId, Version, IndexId, ShardId, RecoveryState)} instead
+ */
+ @Deprecated
+ default void restoreShard(IndexShard shard, Store store, SnapshotId snapshotId, Version version, IndexId indexId,
+ ShardId snapshotShardId, RecoveryState recoveryState) {
+ restoreShard(store, snapshotId, version, indexId, snapshotShardId, recoveryState);
+ }
+
+ /**
+ * Restores snapshot of the shard.
+ *
+ * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied.
+ * @param store the store to restore the index into
* @param snapshotId snapshot id
* @param version version of elasticsearch that created this snapshot
* @param indexId id of the index in the repository from which the restore is occurring
* @param snapshotShardId shard id (in the snapshot)
* @param recoveryState recovery state
*/
- void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId,
- ShardId snapshotShardId, RecoveryState recoveryState);
+ void restoreShard(Store store, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId,
+ RecoveryState recoveryState);
/**
* Retrieve shard snapshot status for the stored snapshot
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
index 320b7ff2d5550..86409ebac7d31 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -71,7 +71,7 @@
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.core.internal.io.Streams;
-import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;
import org.elasticsearch.index.snapshots.IndexShardSnapshotException;
@@ -793,8 +793,8 @@ private void writeAtomic(final String blobName, final BytesReference bytesRef, b
}
@Override
- public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
- IndexShardSnapshotStatus snapshotStatus) {
+ public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId,
+ IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {
SnapshotContext snapshotContext = new SnapshotContext(store, snapshotId, indexId, snapshotStatus, System.currentTimeMillis());
try {
snapshotContext.snapshot(snapshotIndexCommit);
@@ -809,18 +809,19 @@ public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId,
}
@Override
- public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId,
- RecoveryState recoveryState) {
- final Context context = new Context(snapshotId, indexId, shard.shardId(), snapshotShardId);
+ public void restoreShard(Store store, SnapshotId snapshotId,
+ Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
+ ShardId shardId = store.shardId();
+ final Context context = new Context(snapshotId, indexId, shardId, snapshotShardId);
BlobPath path = basePath().add("indices").add(indexId.getId()).add(Integer.toString(snapshotShardId.getId()));
BlobContainer blobContainer = blobStore().blobContainer(path);
- final RestoreContext snapshotContext = new RestoreContext(shard, snapshotId, recoveryState, blobContainer);
+ final RestoreContext snapshotContext = new RestoreContext(shardId, snapshotId, recoveryState, blobContainer);
try {
BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot();
SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles());
- snapshotContext.restore(snapshotFiles);
+ snapshotContext.restore(snapshotFiles, store);
} catch (Exception e) {
- throw new IndexShardRestoreFailedException(shard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e);
+ throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e);
}
}
@@ -1366,13 +1367,13 @@ private class RestoreContext extends FileRestoreContext {
/**
* Constructs new restore context
- * @param indexShard shard to restore into
+ * @param shardId shard id to restore into
* @param snapshotId snapshot id
* @param recoveryState recovery state to report progress
* @param blobContainer the blob container to read the files from
*/
- RestoreContext(IndexShard indexShard, SnapshotId snapshotId, RecoveryState recoveryState, BlobContainer blobContainer) {
- super(metadata.name(), indexShard, snapshotId, recoveryState, BUFFER_SIZE);
+ RestoreContext(ShardId shardId, SnapshotId snapshotId, RecoveryState recoveryState, BlobContainer blobContainer) {
+ super(metadata.name(), shardId, snapshotId, recoveryState, BUFFER_SIZE);
this.blobContainer = blobContainer;
}
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java
index 1e0ab2dd8beee..f78ddab9ee44c 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java
@@ -31,7 +31,6 @@
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.util.iterable.Iterables;
-import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;
import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot;
@@ -64,7 +63,6 @@ public abstract class FileRestoreContext {
protected static final Logger logger = LogManager.getLogger(FileRestoreContext.class);
protected final String repositoryName;
- protected final IndexShard indexShard;
protected final RecoveryState recoveryState;
protected final SnapshotId snapshotId;
protected final ShardId shardId;
@@ -73,26 +71,24 @@ public abstract class FileRestoreContext {
/**
* Constructs new restore context
*
- * @param indexShard shard to restore into
+ * @param shardId shard id to restore into
* @param snapshotId snapshot id
* @param recoveryState recovery state to report progress
* @param bufferSize buffer size for restore
*/
- protected FileRestoreContext(String repositoryName, IndexShard indexShard, SnapshotId snapshotId, RecoveryState recoveryState,
+ protected FileRestoreContext(String repositoryName, ShardId shardId, SnapshotId snapshotId, RecoveryState recoveryState,
int bufferSize) {
this.repositoryName = repositoryName;
this.recoveryState = recoveryState;
- this.indexShard = indexShard;
this.snapshotId = snapshotId;
- this.shardId = indexShard.shardId();
+ this.shardId = shardId;
this.bufferSize = bufferSize;
}
/**
* Performs restore operation
*/
- public void restore(SnapshotFiles snapshotFiles) throws IOException {
- final Store store = indexShard.store();
+ public void restore(SnapshotFiles snapshotFiles, Store store) throws IOException {
store.incRef();
try {
logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId);
@@ -108,7 +104,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException {
// version number and no checksum, even though the index itself is perfectly fine to restore, this
// empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty
// shard anyway, we just create the empty shard here and then exit.
- store.createEmpty(indexShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion);
+ store.createEmpty(store.indexSettings().getIndexVersionCreated().luceneVersion);
return;
}
@@ -117,7 +113,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException {
// this will throw an IOException if the store has no segments infos file. The
// store can still have existing files but they will be deleted just before being
// restored.
- recoveryTargetMetadata = indexShard.snapshotStoreMetadata();
+ recoveryTargetMetadata = store.getMetadata(null, true);
} catch (org.apache.lucene.index.IndexNotFoundException e) {
// happens when restore to an empty shard, not a big deal
logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId);
@@ -127,7 +123,6 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException {
shardId, snapshotId), e);
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
}
-
final List filesToRecover = new ArrayList<>();
final Map snapshotMetaData = new HashMap<>();
final Map fileInfos = new HashMap<>();
@@ -157,7 +152,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException {
final Store.RecoveryDiff diff = sourceMetaData.recoveryDiff(recoveryTargetMetadata);
for (StoreFileMetaData md : diff.identical) {
BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfos.get(md.name());
- recoveryState.getIndex().addFileDetail(fileInfo.name(), fileInfo.length(), true);
+ recoveryState.getIndex().addFileDetail(fileInfo.physicalName(), fileInfo.length(), true);
if (logger.isTraceEnabled()) {
logger.trace("[{}] [{}] not_recovering file [{}] from [{}], exists in local store and is same", shardId, snapshotId,
fileInfo.physicalName(), fileInfo.name());
@@ -167,7 +162,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException {
for (StoreFileMetaData md : concat(diff)) {
BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfos.get(md.name());
filesToRecover.add(fileInfo);
- recoveryState.getIndex().addFileDetail(fileInfo.name(), fileInfo.length(), false);
+ recoveryState.getIndex().addFileDetail(fileInfo.physicalName(), fileInfo.length(), false);
if (logger.isTraceEnabled()) {
logger.trace("[{}] [{}] recovering [{}] from [{}]", shardId, snapshotId,
fileInfo.physicalName(), fileInfo.name());
@@ -260,7 +255,7 @@ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, fi
int length;
while ((length = stream.read(buffer)) > 0) {
indexOutput.writeBytes(buffer, 0, length);
- recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.name(), length);
+ recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), length);
}
Store.verify(indexOutput);
indexOutput.close();
diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
index a0c5ea9392c67..f79b6da6ef626 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
@@ -367,8 +367,7 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina
try {
// we flush first to make sure we get the latest writes snapshotted
try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) {
- repository.snapshotShard(indexShard, indexShard.store(), snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(),
- snapshotStatus);
+ repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus);
if (logger.isDebugEnabled()) {
final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy();
logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus);
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index 04ef68852cc3f..1710154f72f94 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -2300,8 +2300,8 @@ public void testRestoreShard() throws IOException {
target.markAsRecovering("store", new RecoveryState(routing, localNode, null));
assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") {
@Override
- public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId,
- RecoveryState recoveryState) {
+ public void restoreShard(Store store, SnapshotId snapshotId,
+ Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
try {
cleanLuceneIndex(targetStore.directory());
for (String file : sourceStore.directory().listAll()) {
diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java
index 505c0628d6aba..ae703795ec622 100644
--- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java
@@ -33,7 +33,7 @@
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleListener;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
@@ -200,14 +200,14 @@ public boolean isReadOnly() {
}
@Override
- public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
- IndexShardSnapshotStatus snapshotStatus) {
+ public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit
+ snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {
}
@Override
- public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId,
- RecoveryState recoveryState) {
+ public void restoreShard(Store store, SnapshotId snapshotId,
+ Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
}
diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java
new file mode 100644
index 0000000000000..ec8a444d84fae
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.fs;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.CodecReader;
+import org.apache.lucene.index.FilterMergePolicy;
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOSupplier;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.RepositoryMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingHelper;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.indices.recovery.RecoveryState;
+import org.elasticsearch.repositories.IndexId;
+import org.elasticsearch.snapshots.Snapshot;
+import org.elasticsearch.snapshots.SnapshotId;
+import org.elasticsearch.test.DummyShardLock;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.IndexSettingsModule;
+import org.elasticsearch.threadpool.TestThreadPool;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.stream.Collectors;
+
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.emptySet;
+
+public class FsRepositoryTests extends ESTestCase {
+
+ public void testSnapshotAndRestore() throws IOException, InterruptedException {
+ ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName());
+ try (Directory directory = newDirectory()) {
+ Path repo = createTempDir();
+ Settings settings = Settings.builder()
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath())
+ .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath())
+ .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths())
+ .put("location", repo)
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES).build();
+
+ int numDocs = indexDocs(directory);
+ RepositoryMetaData metaData = new RepositoryMetaData("test", "fs", settings);
+ FsRepository repository = new FsRepository(metaData, new Environment(settings, null), NamedXContentRegistry.EMPTY, threadPool);
+ repository.start();
+ final Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "myindexUUID").build();
+ IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("myindex", indexSettings);
+ ShardId shardId = new ShardId(idxSettings.getIndex(), 1);
+ Store store = new Store(shardId, idxSettings, directory, new DummyShardLock(shardId));
+ SnapshotId snapshotId = new SnapshotId("test", "test");
+ IndexId indexId = new IndexId(idxSettings.getIndex().getName(), idxSettings.getUUID());
+
+ IndexCommit indexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory());
+ runGeneric(threadPool, () -> {
+ IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing();
+ repository.snapshotShard(store, null, snapshotId, indexId, indexCommit,
+ snapshotStatus);
+ IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy();
+ assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount());
+ });
+ Lucene.cleanLuceneIndex(directory);
+ expectThrows(org.apache.lucene.index.IndexNotFoundException.class, () -> Lucene.readSegmentInfos(directory));
+ DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
+ ShardRouting routing = ShardRouting.newUnassigned(shardId, true, new RecoverySource.SnapshotRecoverySource("test",
+ new Snapshot("foo", snapshotId), Version.CURRENT, "myindex"),
+ new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, ""));
+ routing = ShardRoutingHelper.initialize(routing, localNode.getId(), 0);
+ RecoveryState state = new RecoveryState(routing, localNode, null);
+ runGeneric(threadPool, () ->
+ repository.restoreShard(store, snapshotId, Version.CURRENT, indexId, shardId, state));
+ assertTrue(state.getIndex().recoveredBytes() > 0);
+ assertEquals(0, state.getIndex().reusedFileCount());
+ assertEquals(indexCommit.getFileNames().size(), state.getIndex().recoveredFileCount());
+ assertEquals(numDocs, Lucene.readSegmentInfos(directory).totalMaxDoc());
+ deleteRandomDoc(store.directory());
+ SnapshotId incSnapshotId = new SnapshotId("test1", "test1");
+ IndexCommit incIndexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory());
+ Collection commitFileNames = incIndexCommit.getFileNames();
+ runGeneric(threadPool, () -> {
+ IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing();
+ repository.snapshotShard(store, null, incSnapshotId, indexId, incIndexCommit, snapshotStatus);
+ IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy();
+ assertEquals(2, copy.getIncrementalFileCount());
+ assertEquals(commitFileNames.size(), copy.getTotalFileCount());
+ });
+
+ // roll back to the first snap and then incrementally restore
+ RecoveryState firstState = new RecoveryState(routing, localNode, null);
+ runGeneric(threadPool, () ->
+ repository.restoreShard(store, snapshotId, Version.CURRENT, indexId, shardId, firstState));
+ assertEquals("should reuse everything except of .liv and .si",
+ commitFileNames.size()-2, firstState.getIndex().reusedFileCount());
+
+ RecoveryState secondState = new RecoveryState(routing, localNode, null);
+ runGeneric(threadPool, () ->
+ repository.restoreShard(store, incSnapshotId, Version.CURRENT, indexId, shardId, secondState));
+ assertEquals(secondState.getIndex().reusedFileCount(), commitFileNames.size()-2);
+ assertEquals(secondState.getIndex().recoveredFileCount(), 2);
+ List recoveredFiles =
+ secondState.getIndex().fileDetails().stream().filter(f -> f.reused() == false).collect(Collectors.toList());
+ Collections.sort(recoveredFiles, Comparator.comparing(RecoveryState.File::name));
+ assertTrue(recoveredFiles.get(0).name(), recoveredFiles.get(0).name().endsWith(".liv"));
+ assertTrue(recoveredFiles.get(1).name(), recoveredFiles.get(1).name().endsWith("segments_2"));
+ } finally {
+ terminate(threadPool);
+ }
+ }
+
+ private void runGeneric(ThreadPool threadPool, Runnable runnable) throws InterruptedException {
+ CountDownLatch latch = new CountDownLatch(1);
+ threadPool.generic().submit(() -> {
+ try {
+ runnable.run();
+ } finally {
+ latch.countDown();
+ }
+ });
+ latch.await();
+ }
+
+ private void deleteRandomDoc(Directory directory) throws IOException {
+ try(IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(random(),
+ new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()).setMergePolicy(new FilterMergePolicy(NoMergePolicy.INSTANCE) {
+ @Override
+ public boolean keepFullyDeletedSegment(IOSupplier readerIOSupplier) {
+ return true;
+ }
+
+ }))) {
+ final int numDocs = writer.getDocStats().numDocs;
+ writer.deleteDocuments(new Term("id", "" + randomIntBetween(0, writer.getDocStats().numDocs-1)));
+ writer.commit();
+ assertEquals(writer.getDocStats().numDocs, numDocs-1);
+ }
+ }
+
+ private int indexDocs(Directory directory) throws IOException {
+ try(IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(random(),
+ new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()))) {
+ int docs = 1 + random().nextInt(100);
+ for (int i = 0; i < docs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body",
+ TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
+ writer.addDocument(doc);
+ }
+ writer.commit();
+ return docs;
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java
index 6175a22760029..2a2176f1c100d 100644
--- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java
@@ -797,7 +797,7 @@ protected void flushShard(IndexShard shard, boolean force) {
/** Recover a shard from a snapshot using a given repository **/
protected void recoverShardFromSnapshot(final IndexShard shard,
final Snapshot snapshot,
- final Repository repository) throws IOException {
+ final Repository repository) {
final Version version = Version.CURRENT;
final ShardId shardId = shard.shardId();
final String index = shardId.getIndexName();
@@ -806,9 +806,12 @@ protected void recoverShardFromSnapshot(final IndexShard shard,
final RecoverySource.SnapshotRecoverySource recoverySource =
new RecoverySource.SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, version, index);
final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource);
-
shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null));
- repository.restoreShard(shard, snapshot.getSnapshotId(), version, indexId, shard.shardId(), shard.recoveryState());
+ repository.restoreShard(shard.store(),
+ snapshot.getSnapshotId(), version,
+ indexId,
+ shard.shardId(),
+ shard.recoveryState());
}
/** Snapshot a shard using a given repository **/
@@ -820,8 +823,8 @@ protected void snapshotShard(final IndexShard shard,
Index index = shard.shardId().getIndex();
IndexId indexId = new IndexId(index.getName(), index.getUUID());
- repository.snapshotShard(shard, shard.store(), snapshot.getSnapshotId(), indexId, indexCommitRef.getIndexCommit(),
- snapshotStatus);
+ repository.snapshotShard(shard.store(), shard.mapperService(), snapshot.getSnapshotId(), indexId,
+ indexCommitRef.getIndexCommit(), snapshotStatus);
}
final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy();
diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java
index bc60b4c194622..2279b48c3c023 100644
--- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java
+++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java
@@ -26,6 +26,7 @@
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.repositories.IndexId;
@@ -133,8 +134,8 @@ public boolean isReadOnly() {
}
@Override
- public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
- IndexShardSnapshotStatus snapshotStatus) {
+ public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId,
+ IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {
}
@Override
diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java
index 5a0472339c192..3010f90b803e9 100644
--- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java
+++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java
@@ -42,10 +42,10 @@
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.seqno.LocalCheckpointTracker;
import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException;
import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException;
-import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardRecoveryException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;
@@ -294,18 +294,19 @@ public boolean isReadOnly() {
}
@Override
- public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
- IndexShardSnapshotStatus snapshotStatus) {
+ public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId,
+ IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
- public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId,
- RecoveryState recoveryState) {
+ public void restoreShard(Store store, SnapshotId snapshotId,
+ Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
// TODO: Add timeouts to network calls / the restore process.
- createEmptyStore(indexShard, shardId);
+ createEmptyStore(store);
+ ShardId shardId = store.shardId();
- final Map ccrMetaData = indexShard.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY);
+ final Map ccrMetaData = store.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY);
final String leaderIndexName = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY);
final String leaderUUID = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY);
final Index leaderIndex = new Index(leaderIndexName, leaderUUID);
@@ -314,14 +315,14 @@ public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version v
final Client remoteClient = getRemoteClusterClient();
final String retentionLeaseId =
- retentionLeaseId(localClusterName, indexShard.shardId().getIndex(), remoteClusterAlias, leaderIndex);
+ retentionLeaseId(localClusterName, shardId.getIndex(), remoteClusterAlias, leaderIndex);
acquireRetentionLeaseOnLeader(shardId, retentionLeaseId, leaderShardId, remoteClient);
// schedule renewals to run during the restore
final Scheduler.Cancellable renewable = threadPool.scheduleWithFixedDelay(
() -> {
- logger.trace("{} background renewal of retention lease [{}] during restore", indexShard.shardId(), retentionLeaseId);
+ logger.trace("{} background renewal of retention lease [{}] during restore", shardId, retentionLeaseId);
final ThreadContext threadContext = threadPool.getThreadContext();
try (ThreadContext.StoredContext ignore = threadContext.stashContext()) {
// we have to execute under the system context so that if security is enabled the renewal is authorized
@@ -336,36 +337,34 @@ public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version v
e -> {
assert e instanceof ElasticsearchSecurityException == false : e;
logger.warn(new ParameterizedMessage(
- "{} background renewal of retention lease [{}] failed during restore",
- indexShard.shardId(),
- retentionLeaseId),
- e);
+ "{} background renewal of retention lease [{}] failed during restore", shardId,
+ retentionLeaseId), e);
}));
}
},
- CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(indexShard.indexSettings().getNodeSettings()),
+ CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(store.indexSettings().getNodeSettings()),
Ccr.CCR_THREAD_POOL_NAME);
// TODO: There should be some local timeout. And if the remote cluster returns an unknown session
// response, we should be able to retry by creating a new session.
- try (RestoreSession restoreSession = openSession(metadata.name(), remoteClient, leaderShardId, indexShard, recoveryState)) {
- restoreSession.restoreFiles();
- updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, indexShard.routingEntry().index());
+ try (RestoreSession restoreSession = openSession(metadata.name(), remoteClient, leaderShardId, shardId, recoveryState)) {
+ restoreSession.restoreFiles(store);
+ updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, shardId.getIndex());
} catch (Exception e) {
- throw new IndexShardRestoreFailedException(indexShard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e);
+ throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e);
} finally {
- logger.trace("{} canceling background renewal of retention lease [{}] at the end of restore", shardId, retentionLeaseId);
+ logger.trace("{} canceling background renewal of retention lease [{}] at the end of restore", shardId,
+ retentionLeaseId);
renewable.cancel();
}
}
- private void createEmptyStore(final IndexShard indexShard, final ShardId shardId) {
- final Store store = indexShard.store();
+ private void createEmptyStore(Store store) {
store.incRef();
try {
- store.createEmpty(indexShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion);
+ store.createEmpty(store.indexSettings().getIndexVersionCreated().luceneVersion);
} catch (final EngineException | IOException e) {
- throw new IndexShardRecoveryException(shardId, "failed to create empty store", e);
+ throw new IndexShardRecoveryException(store.shardId(), "failed to create empty store", e);
} finally {
store.decRef();
}
@@ -432,12 +431,12 @@ private void updateMappings(Client leaderClient, Index leaderIndex, long leaderM
}
}
- RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, IndexShard indexShard,
+ RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, ShardId indexShardId,
RecoveryState recoveryState) {
String sessionUUID = UUIDs.randomBase64UUID();
PutCcrRestoreSessionAction.PutCcrRestoreSessionResponse response = remoteClient.execute(PutCcrRestoreSessionAction.INSTANCE,
new PutCcrRestoreSessionRequest(sessionUUID, leaderShardId)).actionGet(ccrSettings.getRecoveryActionTimeout());
- return new RestoreSession(repositoryName, remoteClient, sessionUUID, response.getNode(), indexShard, recoveryState,
+ return new RestoreSession(repositoryName, remoteClient, sessionUUID, response.getNode(), indexShardId, recoveryState,
response.getStoreFileMetaData(), response.getMappingVersion(), threadPool, ccrSettings, throttledTime::inc);
}
@@ -452,10 +451,10 @@ private static class RestoreSession extends FileRestoreContext implements Closea
private final LongConsumer throttleListener;
private final ThreadPool threadPool;
- RestoreSession(String repositoryName, Client remoteClient, String sessionUUID, DiscoveryNode node, IndexShard indexShard,
+ RestoreSession(String repositoryName, Client remoteClient, String sessionUUID, DiscoveryNode node, ShardId shardId,
RecoveryState recoveryState, Store.MetadataSnapshot sourceMetaData, long mappingVersion,
ThreadPool threadPool, CcrSettings ccrSettings, LongConsumer throttleListener) {
- super(repositoryName, indexShard, SNAPSHOT_ID, recoveryState, Math.toIntExact(ccrSettings.getChunkSize().getBytes()));
+ super(repositoryName, shardId, SNAPSHOT_ID, recoveryState, Math.toIntExact(ccrSettings.getChunkSize().getBytes()));
this.remoteClient = remoteClient;
this.sessionUUID = sessionUUID;
this.node = node;
@@ -466,14 +465,14 @@ private static class RestoreSession extends FileRestoreContext implements Closea
this.throttleListener = throttleListener;
}
- void restoreFiles() throws IOException {
+ void restoreFiles(Store store) throws IOException {
ArrayList fileInfos = new ArrayList<>();
for (StoreFileMetaData fileMetaData : sourceMetaData) {
ByteSizeValue fileSize = new ByteSizeValue(fileMetaData.length());
fileInfos.add(new FileInfo(fileMetaData.name(), fileMetaData, fileSize));
}
SnapshotFiles snapshotFiles = new SnapshotFiles(LATEST, fileInfos);
- restore(snapshotFiles);
+ restore(snapshotFiles, store);
}
@Override
diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java
index c5a357c7df817..abef313d0b017 100644
--- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java
+++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java
@@ -447,8 +447,8 @@ protected synchronized void recoverPrimary(IndexShard primary) {
primary.markAsRecovering("remote recovery from leader", new RecoveryState(routing, localNode, null));
primary.restoreFromRepository(new RestoreOnlyRepository(index.getName()) {
@Override
- public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version,
- IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
+ public void restoreShard(Store store, SnapshotId snapshotId,
+ Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
try {
IndexShard leader = leaderGroup.getPrimary();
Lucene.cleanLuceneIndex(primary.store().directory());
diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java
index 947ce78da2ca3..f8260f2fce57c 100644
--- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java
+++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java
@@ -127,8 +127,8 @@ public void testRestoreShard() throws IOException {
target.markAsRecovering("store", new RecoveryState(routing, localNode, null));
assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") {
@Override
- public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId,
- RecoveryState recoveryState) {
+ public void restoreShard(Store store, SnapshotId snapshotId,
+ Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
try {
cleanLuceneIndex(targetStore.directory());
for (String file : sourceStore.directory().listAll()) {
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java
index d7f70cf8ef2e1..bb5819e1bda43 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java
@@ -10,7 +10,9 @@
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.search.Query;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
@@ -24,8 +26,7 @@
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.ReadOnlyEngine;
-import org.elasticsearch.index.shard.IndexShard;
-import org.elasticsearch.index.shard.ShardPath;
+import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.TranslogStats;
@@ -104,15 +105,18 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met
}
@Override
- public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
- IndexShardSnapshotStatus snapshotStatus) {
- if (shard.mapperService().documentMapper() != null // if there is no mapping this is null
- && shard.mapperService().documentMapper().sourceMapper().isComplete() == false) {
+ public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId,
+ IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {
+ if (mapperService.documentMapper() != null // if there is no mapping this is null
+ && mapperService.documentMapper().sourceMapper().isComplete() == false) {
throw new IllegalStateException("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled " +
"or filters the source");
}
- ShardPath shardPath = shard.shardPath();
- Path dataPath = shardPath.getDataPath();
+ Directory unwrap = FilterDirectory.unwrap(store.directory());
+ if (unwrap instanceof FSDirectory == false) {
+ throw new AssertionError("expected FSDirectory but got " + unwrap.toString());
+ }
+ Path dataPath = ((FSDirectory) unwrap).getDirectory().getParent();
// TODO should we have a snapshot tmp directory per shard that is maintained by the system?
Path snapPath = dataPath.resolve(SNAPSHOT_DIR_NAME);
try (FSDirectory directory = new SimpleFSDirectory(snapPath)) {
@@ -122,7 +126,7 @@ protected void closeInternal() {
// do nothing;
}
}, Store.OnClose.EMPTY);
- Supplier querySupplier = shard.mapperService().hasNested() ? Queries::newNestedFilter : null;
+ Supplier querySupplier = mapperService.hasNested() ? Queries::newNestedFilter : null;
// SourceOnlySnapshot will take care of soft- and hard-deletes no special casing needed here
SourceOnlySnapshot snapshot = new SourceOnlySnapshot(tempStore.directory(), querySupplier);
snapshot.syncSnapshot(snapshotIndexCommit);
@@ -133,7 +137,7 @@ protected void closeInternal() {
store.incRef();
try (DirectoryReader reader = DirectoryReader.open(tempStore.directory())) {
IndexCommit indexCommit = reader.getIndexCommit();
- super.snapshotShard(shard, tempStore, snapshotId, indexId, indexCommit, snapshotStatus);
+ super.snapshotShard(tempStore, mapperService, snapshotId, indexId, indexCommit, snapshotStatus);
} finally {
store.decRef();
}
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java
index 6a37e8265c096..948503b33478c 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java
@@ -98,7 +98,7 @@ public void testSourceIncomplete() throws IOException {
IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing();
IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () ->
runAsSnapshot(shard.getThreadPool(),
- () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId,
+ () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId,
snapshotRef.getIndexCommit(), indexShardSnapshotStatus)));
assertEquals("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source"
, illegalStateException.getMessage());
@@ -120,8 +120,8 @@ public void testIncrementalSnapshot() throws IOException {
try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) {
IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing();
SnapshotId snapshotId = new SnapshotId("test", "test");
- runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef
- .getIndexCommit(), indexShardSnapshotStatus));
+ runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId,
+ snapshotRef.getIndexCommit(), indexShardSnapshotStatus));
IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy();
assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount());
totalFileCount = copy.getTotalFileCount();
@@ -134,8 +134,8 @@ public void testIncrementalSnapshot() throws IOException {
SnapshotId snapshotId = new SnapshotId("test_1", "test_1");
IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing();
- runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef
- .getIndexCommit(), indexShardSnapshotStatus));
+ runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId,
+ snapshotRef.getIndexCommit(), indexShardSnapshotStatus));
IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy();
// we processed the segments_N file plus _1.si, _1.fdx, _1.fnm, _1.fdt
assertEquals(5, copy.getIncrementalFileCount());
@@ -148,8 +148,8 @@ public void testIncrementalSnapshot() throws IOException {
SnapshotId snapshotId = new SnapshotId("test_2", "test_2");
IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing();
- runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef
- .getIndexCommit(), indexShardSnapshotStatus));
+ runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId,
+ snapshotRef.getIndexCommit(), indexShardSnapshotStatus));
IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy();
// we processed the segments_N file plus _1_1.liv
assertEquals(2, copy.getIncrementalFileCount());
@@ -197,7 +197,8 @@ public void testRestoreMinmal() throws IOException {
repository.initializeSnapshot(snapshotId, Arrays.asList(indexId),
MetaData.builder().put(shard.indexSettings()
.getIndexMetaData(), false).build());
- repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef.getIndexCommit(), indexShardSnapshotStatus);
+ repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, snapshotRef.getIndexCommit(),
+ indexShardSnapshotStatus);
});
IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy();
assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount());
From 8918dd1f8641f04b16433b7e1fa035bf713b2a26 Mon Sep 17 00:00:00 2001
From: Ioannis Kakavas
Date: Wed, 22 May 2019 13:20:18 +0300
Subject: [PATCH 033/224] Fail early when rp.client_secret is missing in OIDC
realm (#42256)
rp.client_secret is a required secure setting. Make sure we fail with
a SettingsException and a clear, actionable message when building
the realm, if the setting is missing.
---
.../authc/oidc/OpenIdConnectRealm.java | 4 +++
.../authc/SecurityRealmSettingsTests.java | 8 ++++-
.../oidc/OpenIdConnectRealmSettingsTests.java | 36 +++++++++++++++++++
.../authc/oidc/OpenIdConnectRealmTests.java | 18 +++++++---
.../authc/oidc/OpenIdConnectTestCase.java | 11 +++++-
5 files changed, 70 insertions(+), 7 deletions(-)
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java
index 5f876a677d689..ac933dcfef878 100644
--- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java
@@ -247,6 +247,10 @@ private RelyingPartyConfiguration buildRelyingPartyConfiguration(RealmConfig con
}
final ClientID clientId = new ClientID(require(config, RP_CLIENT_ID));
final SecureString clientSecret = config.getSetting(RP_CLIENT_SECRET);
+ if (clientSecret.length() == 0) {
+ throw new SettingsException("The configuration setting [" + RealmSettings.getFullSettingKey(config, RP_CLIENT_SECRET)
+ + "] is required");
+ }
final ResponseType responseType;
try {
// This should never happen as it's already validated in the settings
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java
index bccee36631e3d..b9a557320e3e1 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java
@@ -8,6 +8,7 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.MockSecureSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.SecurityIntegTestCase;
import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction;
@@ -52,8 +53,12 @@ protected Settings nodeSettings(int nodeOrdinal) {
final Path jwkSet = createTempFile("jwkset", "json");
OpenIdConnectTestCase.writeJwkSetToFile(jwkSet);
+ final Settings existingSettings = super.nodeSettings(nodeOrdinal);
+ MockSecureSettings mockSecureSettings =
+ (MockSecureSettings) Settings.builder().put(existingSettings).getSecureSettings();
+ mockSecureSettings.setString("xpack.security.authc.realms.oidc.oidc1.rp.client_secret", randomAlphaOfLength(12));
settings = Settings.builder()
- .put(super.nodeSettings(nodeOrdinal).filter(s -> s.startsWith("xpack.security.authc.realms.") == false))
+ .put(existingSettings.filter(s -> s.startsWith("xpack.security.authc.realms.") == false), false)
.put("xpack.security.authc.token.enabled", true)
.put("xpack.security.authc.realms.file.file1.order", 1)
.put("xpack.security.authc.realms.native.native1.order", 2)
@@ -80,6 +85,7 @@ protected Settings nodeSettings(int nodeOrdinal) {
.put("xpack.security.authc.realms.oidc.oidc1.rp.client_id", "my_client")
.put("xpack.security.authc.realms.oidc.oidc1.rp.response_type", "code")
.put("xpack.security.authc.realms.oidc.oidc1.claims.principal", "sub")
+ .setSecureSettings(mockSecureSettings)
.build();
} catch (IOException e) {
throw new RuntimeException(e);
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java
index 8dbf27070c492..341cf07b0dd7b 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.security.authc.oidc;
+import org.elasticsearch.common.settings.MockSecureSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.common.util.concurrent.ThreadContext;
@@ -42,6 +43,7 @@ public void testIncorrectResponseTypeThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "hybrid");
+ settingsBuilder.setSecureSettings(getSecureSettings());
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -58,6 +60,7 @@ public void testMissingAuthorizationEndpointThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ settingsBuilder.setSecureSettings(getSecureSettings());
SettingsException exception = expectThrows(SettingsException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -75,6 +78,7 @@ public void testInvalidAuthorizationEndpointThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ settingsBuilder.setSecureSettings(getSecureSettings());
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -91,6 +95,7 @@ public void testMissingTokenEndpointThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ settingsBuilder.setSecureSettings(getSecureSettings());
SettingsException exception = expectThrows(SettingsException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -108,6 +113,7 @@ public void testInvalidTokenEndpointThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ settingsBuilder.setSecureSettings(getSecureSettings());
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -123,6 +129,7 @@ public void testMissingJwksUrlThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ settingsBuilder.setSecureSettings(getSecureSettings());
SettingsException exception = expectThrows(SettingsException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -139,6 +146,7 @@ public void testMissingIssuerThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ settingsBuilder.setSecureSettings(getSecureSettings());
SettingsException exception = expectThrows(SettingsException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -155,6 +163,7 @@ public void testMissingRedirectUriThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ settingsBuilder.setSecureSettings(getSecureSettings());
SettingsException exception = expectThrows(SettingsException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -171,6 +180,7 @@ public void testMissingClientIdThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ settingsBuilder.setSecureSettings(getSecureSettings());
SettingsException exception = expectThrows(SettingsException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -189,6 +199,7 @@ public void testMissingPrincipalClaimThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
.putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES),
Arrays.asList("openid", "scope1", "scope2"));
+ settingsBuilder.setSecureSettings(getSecureSettings());
SettingsException exception = expectThrows(SettingsException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -209,6 +220,7 @@ public void testPatternWithoutSettingThrowsError() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
.putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES),
Arrays.asList("openid", "scope1", "scope2"));
+ settingsBuilder.setSecureSettings(getSecureSettings());
SettingsException exception = expectThrows(SettingsException.class, () -> {
new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
});
@@ -218,6 +230,30 @@ public void testPatternWithoutSettingThrowsError() {
Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getPattern())));
}
+ public void testMissingClientSecretThrowsError() {
+ final Settings.Builder settingsBuilder = Settings.builder()
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ SettingsException exception = expectThrows(SettingsException.class, () -> {
+ new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+ });
+ assertThat(exception.getMessage(),
+ Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_SECRET)));
+ }
+
+ private MockSecureSettings getSecureSettings() {
+ MockSecureSettings secureSettings = new MockSecureSettings();
+ secureSettings.setString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_SECRET),
+ randomAlphaOfLengthBetween(12, 18));
+ return secureSettings;
+ }
+
private RealmConfig buildConfig(Settings realmSettings) {
final Settings settings = Settings.builder()
.put("path.home", createTempDir())
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java
index 151a7e1caea19..162b88224414e 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java
@@ -165,7 +165,8 @@ public void testBuildRelyingPartyConfigWithoutOpenIdScope() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
.putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES),
- Arrays.asList("scope1", "scope2"));
+ Arrays.asList("scope1", "scope2"))
+ .setSecureSettings(getSecureSettings());
final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
null);
final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null);
@@ -187,7 +188,8 @@ public void testBuildingAuthenticationRequest() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
.putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES),
- Arrays.asList("openid", "scope1", "scope2"));
+ Arrays.asList("openid", "scope1", "scope2"))
+ .setSecureSettings(getSecureSettings());
final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
null);
final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null);
@@ -207,7 +209,9 @@ public void testBuilidingAuthenticationRequestWithDefaultScope() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
- .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
+ .setSecureSettings(getSecureSettings());
+ ;
final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
null);
final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null);
@@ -237,7 +241,9 @@ public void testBuildingAuthenticationRequestWithExistingStateAndNonce() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
- .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
+ .setSecureSettings(getSecureSettings());
+ ;
final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
null);
final String state = new State().getValue();
@@ -257,7 +263,9 @@ public void testBuildingAuthenticationRequestWithLoginHint() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
- .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
+ .setSecureSettings(getSecureSettings());
+ ;
final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
null);
final String state = new State().getValue();
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java
index 9c1c4e981109a..63071a3d1cb40 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java
@@ -12,6 +12,7 @@
import com.nimbusds.jwt.JWTClaimsSet;
import com.nimbusds.jwt.SignedJWT;
import com.nimbusds.openid.connect.sdk.Nonce;
+import org.elasticsearch.common.settings.MockSecureSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.env.Environment;
@@ -50,7 +51,15 @@ protected static Settings.Builder getBasicRealmSettings() {
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.GROUPS_CLAIM.getClaim()), "groups")
.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.MAIL_CLAIM.getClaim()), "mail")
- .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getClaim()), "name");
+ .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getClaim()), "name")
+ .setSecureSettings(getSecureSettings());
+ }
+
+ protected static MockSecureSettings getSecureSettings() {
+ MockSecureSettings secureSettings = new MockSecureSettings();
+ secureSettings.setString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_SECRET),
+ randomAlphaOfLengthBetween(12, 18));
+ return secureSettings;
}
protected JWT generateIdToken(String subject, String audience, String issuer) throws Exception {
From 3b67d87bf6d6b23694fadbcb8ab8b0d83ac3905d Mon Sep 17 00:00:00 2001
From: Yannick Welsch
Date: Wed, 22 May 2019 12:25:48 +0200
Subject: [PATCH 034/224] Avoid bubbling up failures from a shard that is
recovering (#42287)
A shard that is undergoing peer recovery is subject to logging warnings of the form
org.elasticsearch.action.FailedNodeException: Failed node [XYZ]
...
Caused by: org.apache.lucene.index.IndexNotFoundException: no segments* file found in ...
These failures are actually harmless, and expected to happen while a peer recovery is ongoing (i.e.
there is an IndexShard instance, but no proper IndexCommit just yet).
As these failures are currently bubbled up to the master, they cause unnecessary reroutes and
confusion amongst users due to being logged as warnings.
Closes #40107
---
.../TransportNodesListShardStoreMetaData.java | 14 +++++-
.../indices/recovery/IndexRecoveryIT.java | 44 +++++++++++++++++++
2 files changed, 56 insertions(+), 2 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java
index bc041b4b322ae..20307af32f4ed 100644
--- a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java
+++ b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java
@@ -19,6 +19,7 @@
package org.elasticsearch.indices.store;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
@@ -123,8 +124,17 @@ private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException
if (indexService != null) {
IndexShard indexShard = indexService.getShardOrNull(shardId.id());
if (indexShard != null) {
- exists = true;
- return new StoreFilesMetaData(shardId, indexShard.snapshotStoreMetadata());
+ try {
+ final StoreFilesMetaData storeFilesMetaData = new StoreFilesMetaData(shardId, indexShard.snapshotStoreMetadata());
+ exists = true;
+ return storeFilesMetaData;
+ } catch (org.apache.lucene.index.IndexNotFoundException e) {
+ logger.trace(new ParameterizedMessage("[{}] node is missing index, responding with empty", shardId), e);
+ return new StoreFilesMetaData(shardId, Store.MetadataSnapshot.EMPTY);
+ } catch (IOException e) {
+ logger.warn(new ParameterizedMessage("[{}] can't read metadata from store, responding with empty", shardId), e);
+ return new StoreFilesMetaData(shardId, Store.MetadataSnapshot.EMPTY);
+ }
}
}
// try and see if we an list unallocated
diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java
index 0ea8eb8e9b447..4710c59647c25 100644
--- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java
+++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java
@@ -923,6 +923,50 @@ public void testDoNotInfinitelyWaitForMapping() {
assertHitCount(client().prepareSearch().get(), numDocs);
}
+ /** Makes sure the new master does not repeatedly fetch index metadata from recovering replicas */
+ public void testOngoingRecoveryAndMasterFailOver() throws Exception {
+ String indexName = "test";
+ internalCluster().startNodes(2);
+ String nodeWithPrimary = internalCluster().startDataOnlyNode();
+ assertAcked(client().admin().indices().prepareCreate(indexName)
+ .setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.routing.allocation.include._name", nodeWithPrimary)));
+ MockTransportService transport = (MockTransportService) internalCluster().getInstance(TransportService.class, nodeWithPrimary);
+ CountDownLatch phase1ReadyBlocked = new CountDownLatch(1);
+ CountDownLatch allowToCompletePhase1Latch = new CountDownLatch(1);
+ Semaphore blockRecovery = new Semaphore(1);
+ transport.addSendBehavior((connection, requestId, action, request, options) -> {
+ if (PeerRecoveryTargetService.Actions.CLEAN_FILES.equals(action) && blockRecovery.tryAcquire()) {
+ phase1ReadyBlocked.countDown();
+ try {
+ allowToCompletePhase1Latch.await();
+ } catch (InterruptedException e) {
+ throw new AssertionError(e);
+ }
+ }
+ connection.sendRequest(requestId, action, request, options);
+ });
+ try {
+ String nodeWithReplica = internalCluster().startDataOnlyNode();
+ assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put("index.routing.allocation.include._name", nodeWithPrimary + "," + nodeWithReplica)));
+ phase1ReadyBlocked.await();
+ internalCluster().restartNode(clusterService().state().nodes().getMasterNode().getName(),
+ new InternalTestCluster.RestartCallback());
+ internalCluster().ensureAtLeastNumDataNodes(3);
+ assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
+ .putNull("index.routing.allocation.include._name")));
+ assertFalse(client().admin().cluster().prepareHealth(indexName).setWaitForActiveShards(2).get().isTimedOut());
+ } finally {
+ allowToCompletePhase1Latch.countDown();
+ }
+ ensureGreen(indexName);
+ }
+
public void testRecoveryFlushReplica() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(3);
String indexName = "test-index";
From 7ab59eef11f5f966ae3cad385237a4f8b7ad115f Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Wed, 22 May 2019 12:55:47 +0200
Subject: [PATCH 035/224] Some Cleanup in o.e.i.engine (#42278)
* Some Cleanup in o.e.i.engine
* Remove dead code and parameters
* Reduce visibility in some obvious spots
* Add missing `assert`s (not that important here since the methods
themselves will probably be dead-code eliminated) but still
---
.../elasticsearch/index/engine/Engine.java | 10 +--
.../index/engine/InternalEngine.java | 35 +++++-----
.../index/engine/LiveVersionMap.java | 2 +-
.../index/engine/LuceneChangesSnapshot.java | 3 +-
.../index/engine/ReadOnlyEngine.java | 4 +-
.../index/engine/RecoveryCounter.java | 65 -------------------
.../RecoverySourcePruneMergePolicy.java | 3 +-
.../elasticsearch/index/engine/Segment.java | 18 ++---
.../index/engine/SegmentsStats.java | 25 ++++---
.../engine/SnapshotFailedEngineException.java | 7 +-
.../index/engine/TranslogLeafReader.java | 5 +-
.../VersionConflictEngineException.java | 6 +-
.../index/engine/FrozenEngine.java | 2 +-
13 files changed, 47 insertions(+), 138 deletions(-)
delete mode 100644 server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java
diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java
index 63659126f8438..2d210b716d4b7 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java
@@ -911,7 +911,7 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment
map.put(extension, length);
}
- if (useCompoundFile && directory != null) {
+ if (useCompoundFile) {
try {
directory.close();
} catch (IOException e) {
@@ -954,8 +954,7 @@ protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boole
// now, correlate or add the committed ones...
if (lastCommittedSegmentInfos != null) {
- SegmentInfos infos = lastCommittedSegmentInfos;
- for (SegmentCommitInfo info : infos) {
+ for (SegmentCommitInfo info : lastCommittedSegmentInfos) {
Segment segment = segments.get(info.info.name);
if (segment == null) {
segment = new Segment(info.info.name);
@@ -1783,11 +1782,8 @@ public boolean equals(Object o) {
CommitId commitId = (CommitId) o;
- if (!Arrays.equals(id, commitId.id)) {
- return false;
- }
+ return Arrays.equals(id, commitId.id);
- return true;
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
index 24d1078510c0b..9fb63d0de019d 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
@@ -560,7 +560,7 @@ private String loadTranslogUUIDFromLastCommit() throws IOException {
/**
* Reads the current stored history ID from the IW commit data.
*/
- private String loadHistoryUUID(final IndexWriter writer) throws IOException {
+ private String loadHistoryUUID(final IndexWriter writer) {
final String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY);
if (uuid == null) {
throw new IllegalStateException("commit doesn't contain history uuid");
@@ -632,9 +632,8 @@ public GetResult get(Get get, BiFunction search
if (operation != null) {
// in the case of a already pruned translog generation we might get null here - yet very unlikely
final Translog.Index index = (Translog.Index) operation;
- TranslogLeafReader reader = new TranslogLeafReader(index, engineConfig
- .getIndexSettings().getIndexVersionCreated());
- return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader::close),
+ TranslogLeafReader reader = new TranslogLeafReader(index);
+ return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader),
new VersionsAndSeqNoResolver.DocIdAndVersion(0, index.version(), index.seqNo(), index.primaryTerm(),
reader, 0));
}
@@ -753,7 +752,7 @@ private boolean canOptimizeAddDocument(Index index) {
+ index.getAutoGeneratedIdTimestamp();
switch (index.origin()) {
case PRIMARY:
- assertPrimaryCanOptimizeAddDocument(index);
+ assert assertPrimaryCanOptimizeAddDocument(index);
return true;
case PEER_RECOVERY:
case REPLICA:
@@ -779,7 +778,7 @@ protected boolean assertPrimaryCanOptimizeAddDocument(final Index index) {
private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) {
if (origin == Operation.Origin.PRIMARY) {
- assertPrimaryIncomingSequenceNumber(origin, seqNo);
+ assert assertPrimaryIncomingSequenceNumber(origin, seqNo);
} else {
// sequence number should be set when operation origin is not primary
assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin;
@@ -920,7 +919,7 @@ public IndexResult index(Index index) throws IOException {
}
protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException {
- assertNonPrimaryOrigin(index);
+ assert assertNonPrimaryOrigin(index);
final IndexingStrategy plan;
final boolean appendOnlyRequest = canOptimizeAddDocument(index);
if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) {
@@ -975,13 +974,13 @@ protected IndexingStrategy indexingStrategyForOperation(final Index index) throw
}
}
- protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOException {
+ private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException {
assert index.origin() == Operation.Origin.PRIMARY : "planing as primary but origin isn't. got " + index.origin();
final IndexingStrategy plan;
// resolve an external operation into an internal one which is safe to replay
if (canOptimizeAddDocument(index)) {
if (mayHaveBeenIndexedBefore(index)) {
- plan = IndexingStrategy.overrideExistingAsIfNotThere(1L);
+ plan = IndexingStrategy.overrideExistingAsIfNotThere();
versionMap.enforceSafeAccess();
} else {
plan = IndexingStrategy.optimizedAppendOnly(1L);
@@ -1003,7 +1002,7 @@ protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOExc
if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) {
final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(),
index.getIfSeqNo(), index.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
- plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm());
+ plan = IndexingStrategy.skipDueToVersionConflict(e, true, currentVersion, getPrimaryTerm());
} else if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && (
versionValue.seqNo != index.getIfSeqNo() || versionValue.term != index.getIfPrimaryTerm()
)) {
@@ -1161,9 +1160,9 @@ static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted,
true, false, versionForIndexing, null);
}
- static IndexingStrategy overrideExistingAsIfNotThere(long versionForIndexing) {
+ static IndexingStrategy overrideExistingAsIfNotThere() {
return new IndexingStrategy(true, true, true,
- false, versionForIndexing, null);
+ false, 1L, null);
}
public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long versionForIndexing) {
@@ -1282,7 +1281,7 @@ protected DeletionStrategy deletionStrategyForOperation(final Delete delete) thr
}
protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException {
- assertNonPrimaryOrigin(delete);
+ assert assertNonPrimaryOrigin(delete);
maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr));
assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" +
"max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]";
@@ -1302,7 +1301,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws
} else {
final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete);
if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) {
- plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, false, delete.version());
+ plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, delete.version());
} else {
plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version());
}
@@ -1315,7 +1314,7 @@ protected boolean assertNonPrimaryOrigin(final Operation operation) {
return true;
}
- protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException {
+ private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException {
assert delete.origin() == Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin();
// resolve operation from external to internal
final VersionValue versionValue = resolveDocVersion(delete, delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO);
@@ -1333,7 +1332,7 @@ protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOE
if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) {
final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(),
delete.getIfSeqNo(), delete.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
- plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted);
+ plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), true);
} else if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && (
versionValue.seqNo != delete.getIfSeqNo() || versionValue.term != delete.getIfPrimaryTerm()
)) {
@@ -1425,8 +1424,8 @@ public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, lo
return new DeletionStrategy(false, false, currentlyDeleted, versionOfDeletion, null);
}
- static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean currentlyDeleted, long versionOfDeletion) {
- return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, versionOfDeletion, null);
+ static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, long versionOfDeletion) {
+ return new DeletionStrategy(false, addStaleOpToLucene, false, versionOfDeletion, null);
}
}
diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java
index e4dce8919cf1e..ce955903af494 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java
@@ -234,7 +234,7 @@ long getMinDeleteTimestamp() {
/**
* Tracks bytes used by tombstones (deletes)
*/
- final AtomicLong ramBytesUsedTombstones = new AtomicLong();
+ private final AtomicLong ramBytesUsedTombstones = new AtomicLong();
@Override
public void beforeRefresh() throws IOException {
diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java
index c9550a61f9e58..a3e86ab1606df 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java
@@ -188,8 +188,7 @@ private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray
int readerIndex = 0;
CombinedDocValues combinedDocValues = null;
LeafReaderContext leaf = null;
- for (int i = 0; i < scoreDocs.length; i++) {
- ScoreDoc scoreDoc = scoreDocs[i];
+ for (ScoreDoc scoreDoc : scoreDocs) {
if (scoreDoc.doc >= docBase + maxDoc) {
do {
leaf = leaves.get(readerIndex++);
diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
index e7e0c4d927851..9d5f6054243e4 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
@@ -457,8 +457,8 @@ public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) {
}
- protected void processReaders(IndexReader reader, IndexReader previousReader) {
- searcherFactory.processReaders(reader, previousReader);
+ protected void processReader(IndexReader reader) {
+ searcherFactory.processReaders(reader, null);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java b/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java
deleted file mode 100644
index 31fddbedfb715..0000000000000
--- a/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.engine;
-
-import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.index.store.Store;
-
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * RecoveryCounter keeps tracks of the number of ongoing recoveries for a
- * particular {@link Store}
- */
-public class RecoveryCounter implements Releasable {
-
- private final Store store;
-
- RecoveryCounter(Store store) {
- this.store = store;
- }
-
- private final AtomicInteger onGoingRecoveries = new AtomicInteger();
-
- void startRecovery() {
- store.incRef();
- onGoingRecoveries.incrementAndGet();
- }
-
- public int get() {
- return onGoingRecoveries.get();
- }
-
- /**
- * End the recovery counter by decrementing the store's ref and the ongoing recovery counter
- * @return number of ongoing recoveries remaining
- */
- int endRecovery() {
- store.decRef();
- int left = onGoingRecoveries.decrementAndGet();
- assert onGoingRecoveries.get() >= 0 : "ongoingRecoveries must be >= 0 but was: " + onGoingRecoveries.get();
- return left;
- }
-
- @Override
- public void close() {
- endRecovery();
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java
index 42276f4ca2108..a4221bf01f210 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java
@@ -58,8 +58,7 @@ public CodecReader wrapForMerge(CodecReader reader) throws IOException {
});
}
- // pkg private for testing
- static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier)
+ private static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier)
throws IOException {
NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField);
if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/server/src/main/java/org/elasticsearch/index/engine/Segment.java b/server/src/main/java/org/elasticsearch/index/engine/Segment.java
index 945359eda1b17..b1e6d09d897f2 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/Segment.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/Segment.java
@@ -39,6 +39,7 @@
import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
public class Segment implements Streamable {
@@ -93,10 +94,6 @@ public ByteSizeValue getSize() {
return new ByteSizeValue(sizeInBytes);
}
- public long getSizeInBytes() {
- return this.sizeInBytes;
- }
-
public org.apache.lucene.util.Version getVersion() {
return version;
}
@@ -144,9 +141,8 @@ public boolean equals(Object o) {
Segment segment = (Segment) o;
- if (name != null ? !name.equals(segment.name) : segment.name != null) return false;
+ return Objects.equals(name, segment.name);
- return true;
}
@Override
@@ -211,7 +207,7 @@ public void writeTo(StreamOutput out) throws IOException {
}
}
- Sort readSegmentSort(StreamInput in) throws IOException {
+ private Sort readSegmentSort(StreamInput in) throws IOException {
int size = in.readVInt();
if (size == 0) {
return null;
@@ -262,7 +258,7 @@ Sort readSegmentSort(StreamInput in) throws IOException {
return new Sort(fields);
}
- void writeSegmentSort(StreamOutput out, Sort sort) throws IOException {
+ private void writeSegmentSort(StreamOutput out, Sort sort) throws IOException {
if (sort == null) {
out.writeVInt(0);
return;
@@ -302,14 +298,14 @@ void writeSegmentSort(StreamOutput out, Sort sort) throws IOException {
}
}
- Accountable readRamTree(StreamInput in) throws IOException {
+ private Accountable readRamTree(StreamInput in) throws IOException {
final String name = in.readString();
final long bytes = in.readVLong();
int numChildren = in.readVInt();
if (numChildren == 0) {
return Accountables.namedAccountable(name, bytes);
}
- List children = new ArrayList(numChildren);
+ List children = new ArrayList<>(numChildren);
while (numChildren-- > 0) {
children.add(readRamTree(in));
}
@@ -317,7 +313,7 @@ Accountable readRamTree(StreamInput in) throws IOException {
}
// the ram tree is written recursively since the depth is fairly low (5 or 6)
- void writeRamTree(StreamOutput out, Accountable tree) throws IOException {
+ private void writeRamTree(StreamOutput out, Accountable tree) throws IOException {
out.writeString(tree.toString());
out.writeVLong(tree.ramBytesUsed());
Collection children = tree.getChildResources();
diff --git a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java
index 2d22a6f3caf20..ae78de574531f 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java
@@ -30,7 +30,6 @@
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-import java.util.Iterator;
public class SegmentsStats implements Streamable, Writeable, ToXContentFragment {
@@ -54,7 +53,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
* Ideally this should be in sync to what the current version of Lucene is using, but it's harmless to leave extensions out,
* they'll just miss a proper description in the stats
*/
- private static ImmutableOpenMap fileDescriptions = ImmutableOpenMap.builder()
+ private static final ImmutableOpenMap FILE_DESCRIPTIONS = ImmutableOpenMap.builder()
.fPut("si", "Segment Info")
.fPut("fnm", "Fields")
.fPut("fdx", "Field Index")
@@ -150,8 +149,7 @@ public void addBitsetMemoryInBytes(long bitsetMemoryInBytes) {
public void addFileSizes(ImmutableOpenMap fileSizes) {
ImmutableOpenMap.Builder map = ImmutableOpenMap.builder(this.fileSizes);
- for (Iterator> it = fileSizes.iterator(); it.hasNext();) {
- ObjectObjectCursor entry = it.next();
+ for (ObjectObjectCursor entry : fileSizes) {
if (map.containsKey(entry.key)) {
Long oldValue = map.get(entry.key);
map.put(entry.key, oldValue + entry.value);
@@ -206,7 +204,7 @@ public long getTermsMemoryInBytes() {
return this.termsMemoryInBytes;
}
- public ByteSizeValue getTermsMemory() {
+ private ByteSizeValue getTermsMemory() {
return new ByteSizeValue(termsMemoryInBytes);
}
@@ -217,7 +215,7 @@ public long getStoredFieldsMemoryInBytes() {
return this.storedFieldsMemoryInBytes;
}
- public ByteSizeValue getStoredFieldsMemory() {
+ private ByteSizeValue getStoredFieldsMemory() {
return new ByteSizeValue(storedFieldsMemoryInBytes);
}
@@ -228,7 +226,7 @@ public long getTermVectorsMemoryInBytes() {
return this.termVectorsMemoryInBytes;
}
- public ByteSizeValue getTermVectorsMemory() {
+ private ByteSizeValue getTermVectorsMemory() {
return new ByteSizeValue(termVectorsMemoryInBytes);
}
@@ -239,7 +237,7 @@ public long getNormsMemoryInBytes() {
return this.normsMemoryInBytes;
}
- public ByteSizeValue getNormsMemory() {
+ private ByteSizeValue getNormsMemory() {
return new ByteSizeValue(normsMemoryInBytes);
}
@@ -250,7 +248,7 @@ public long getPointsMemoryInBytes() {
return this.pointsMemoryInBytes;
}
- public ByteSizeValue getPointsMemory() {
+ private ByteSizeValue getPointsMemory() {
return new ByteSizeValue(pointsMemoryInBytes);
}
@@ -261,7 +259,7 @@ public long getDocValuesMemoryInBytes() {
return this.docValuesMemoryInBytes;
}
- public ByteSizeValue getDocValuesMemory() {
+ private ByteSizeValue getDocValuesMemory() {
return new ByteSizeValue(docValuesMemoryInBytes);
}
@@ -326,11 +324,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory());
builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp);
builder.startObject(Fields.FILE_SIZES);
- for (Iterator> it = fileSizes.iterator(); it.hasNext();) {
- ObjectObjectCursor entry = it.next();
+ for (ObjectObjectCursor entry : fileSizes) {
builder.startObject(entry.key);
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(entry.value));
- builder.field(Fields.DESCRIPTION, fileDescriptions.getOrDefault(entry.key, "Others"));
+ builder.field(Fields.DESCRIPTION, FILE_DESCRIPTIONS.getOrDefault(entry.key, "Others"));
builder.endObject();
}
builder.endObject();
@@ -391,7 +388,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(fileSizes.size());
for (ObjectObjectCursor entry : fileSizes) {
out.writeString(entry.key);
- out.writeLong(entry.value.longValue());
+ out.writeLong(entry.value);
}
}
diff --git a/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java
index f669139c07e2a..d858ccb0ab667 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java
@@ -20,17 +20,12 @@
package org.elasticsearch.index.engine;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
public class SnapshotFailedEngineException extends EngineException {
- public SnapshotFailedEngineException(ShardId shardId, Throwable cause) {
- super(shardId, "Snapshot failed", cause);
- }
-
public SnapshotFailedEngineException(StreamInput in) throws IOException{
super(in);
}
-}
\ No newline at end of file
+}
diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java
index c1f92966196a3..d40e7d04e3ef3 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java
@@ -35,7 +35,6 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
-import org.elasticsearch.Version;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.RoutingFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
@@ -61,11 +60,9 @@ final class TranslogLeafReader extends LeafReader {
private static final FieldInfo FAKE_ID_FIELD
= new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(),
0, 0, 0, false);
- private final Version indexVersionCreated;
- TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) {
+ TranslogLeafReader(Translog.Index operation) {
this.operation = operation;
- this.indexVersionCreated = indexVersionCreated;
}
@Override
public CacheHelper getCoreCacheHelper() {
diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java
index 0f6c217409c30..c869e2bc386aa 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java
@@ -42,11 +42,7 @@ public VersionConflictEngineException(ShardId shardId, String id,
}
public VersionConflictEngineException(ShardId shardId, String id, String explanation) {
- this(shardId, null, id, explanation);
- }
-
- public VersionConflictEngineException(ShardId shardId, Throwable cause, String id, String explanation) {
- this(shardId, "[{}]: version conflict, {}", cause, id, explanation);
+ this(shardId, "[{}]: version conflict, {}", null, id, explanation);
}
public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) {
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java
index e9b57e316cccc..50f1125b275f1 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java
@@ -169,7 +169,7 @@ private synchronized DirectoryReader getOrOpenReader() throws IOException {
listeners.beforeRefresh();
}
reader = DirectoryReader.open(engineConfig.getStore().directory());
- processReaders(reader, null);
+ processReader(reader);
reader = lastOpenedReader = wrapReader(reader, Function.identity());
reader.getReaderCacheHelper().addClosedListener(this::onReaderClosed);
for (ReferenceManager.RefreshListener listeners : config ().getInternalRefreshListener()) {
From 28aae648feb921727a6690c193f1162ed87c8e38 Mon Sep 17 00:00:00 2001
From: Alpar Torok
Date: Wed, 22 May 2019 14:25:54 +0300
Subject: [PATCH 036/224] TestClusters: Convert docs (#42100)
* TestClusters: Convert docs
---
.../gradle/doc/DocsTestPlugin.groovy | 10 ++--
.../testclusters/ElasticsearchCluster.java | 12 +++--
.../testclusters/ElasticsearchNode.java | 11 +++-
.../TestClusterConfiguration.java | 3 ++
.../testclusters/TestClustersPluginIT.java | 28 +++++-----
docs/build.gradle | 18 ++++---
docs/reference/cluster/health.asciidoc | 2 +-
docs/reference/getting-started.asciidoc | 2 +-
x-pack/docs/build.gradle | 54 ++-----------------
9 files changed, 60 insertions(+), 80 deletions(-)
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy
index a0ce24e45c729..805a1b213e859 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.gradle.doc
+import org.elasticsearch.gradle.OS
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.test.ClusterFormationTasks
@@ -32,10 +33,13 @@ public class DocsTestPlugin extends RestTestPlugin {
@Override
public void apply(Project project) {
+ project.pluginManager.apply('elasticsearch.testclusters')
project.pluginManager.apply('elasticsearch.standalone-rest-test')
super.apply(project)
+ String distribution = System.getProperty('tests.distribution', 'default')
// The distribution can be configured with -Dtests.distribution on the command line
- project.integTestCluster.distribution = System.getProperty('tests.distribution', 'default')
+ project.testClusters.integTest.distribution = distribution.toUpperCase()
+ project.testClusters.integTest.nameCustomization = { it.replace("integTest", "node") }
// Docs are published separately so no need to assemble
project.tasks.assemble.enabled = false
Map defaultSubstitutions = [
@@ -46,8 +50,8 @@ public class DocsTestPlugin extends RestTestPlugin {
'\\{version\\}': Version.fromString(VersionProperties.elasticsearch).toString(),
'\\{version_qualified\\}': VersionProperties.elasticsearch,
'\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''),
- '\\{build_flavor\\}' : project.integTestCluster.distribution,
- '\\{build_type\\}' : ClusterFormationTasks.getOs().equals("windows") ? "zip" : "tar",
+ '\\{build_flavor\\}' : distribution,
+ '\\{build_type\\}' : OS.conditionalString().onWindows({"zip"}).onUnix({"tar"}).supply(),
]
Task listSnippets = project.tasks.create('listSnippets', SnippetsTask)
listSnippets.group 'Docs'
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java
index 0cb7ee0c10fc7..e245fb0ead95a 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java
@@ -42,6 +42,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
+import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
@@ -70,7 +71,7 @@ public ElasticsearchCluster(String path, String clusterName, Project project, Fi
this.nodes = project.container(ElasticsearchNode.class);
this.nodes.add(
new ElasticsearchNode(
- path, clusterName + "-1",
+ path, clusterName + "-0",
services, artifactsExtractDir, workingDirBase
)
);
@@ -91,7 +92,7 @@ public void setNumberOfNodes(int numberOfNodes) {
);
}
- for (int i = nodes.size() + 1 ; i <= numberOfNodes; i++) {
+ for (int i = nodes.size() ; i < numberOfNodes; i++) {
this.nodes.add(new ElasticsearchNode(
path, clusterName + "-" + i, services, artifactsExtractDir, workingDirBase
));
@@ -99,7 +100,7 @@ public void setNumberOfNodes(int numberOfNodes) {
}
private ElasticsearchNode getFirstNode() {
- return nodes.getAt(clusterName + "-1");
+ return nodes.getAt(clusterName + "-0");
}
public int getNumberOfNodes() {
@@ -276,6 +277,11 @@ public void stop(boolean tailLogs) {
nodes.forEach(each -> each.stop(tailLogs));
}
+ @Override
+ public void setNameCustomization(Function nameCustomization) {
+ nodes.all(each -> each.setNameCustomization(nameCustomization));
+ }
+
@Override
public boolean isProcessAlive() {
return nodes.stream().noneMatch(node -> node.isProcessAlive() == false);
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java
index 3bb1fb2ddb6e3..bba94f6c7d173 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java
@@ -50,6 +50,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
@@ -103,6 +104,7 @@ public class ElasticsearchNode implements TestClusterConfiguration {
private String version;
private File javaHome;
private volatile Process esProcess;
+ private Function nameCustomization = Function.identity();
ElasticsearchNode(String path, String name, GradleServicesAdapter services, File artifactsExtractDir, File workingDirBase) {
this.path = path;
@@ -123,7 +125,7 @@ public class ElasticsearchNode implements TestClusterConfiguration {
}
public String getName() {
- return name;
+ return nameCustomization.apply(name);
}
public String getVersion() {
@@ -536,6 +538,11 @@ public synchronized void stop(boolean tailLogs) {
esProcess = null;
}
+ @Override
+ public void setNameCustomization(Function nameCustomizer) {
+ this.nameCustomization = nameCustomizer;
+ }
+
private void stopHandle(ProcessHandle processHandle, boolean forcibly) {
// Stop all children first, ES could actually be a child when there's some wrapper process like on Windows.
if (processHandle.isAlive() == false) {
@@ -656,7 +663,7 @@ private void syncWithLinks(Path sourceRoot, Path destinationRoot) {
}
private void createConfiguration() {
- defaultConfig.put("node.name", safeName(name));
+ defaultConfig.put("node.name", nameCustomization.apply(safeName(name)));
defaultConfig.put("path.repo", confPathRepo.toAbsolutePath().toString());
defaultConfig.put("path.data", confPathData.toAbsolutePath().toString());
defaultConfig.put("path.logs", confPathLogs.toAbsolutePath().toString());
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java
index 628dadcbb9d37..1ccbeabd4b88a 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java
@@ -29,6 +29,7 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
@@ -85,6 +86,8 @@ public interface TestClusterConfiguration {
void stop(boolean tailLogs);
+ void setNameCustomization(Function nameSupplier);
+
default void waitForConditions(
LinkedHashMap> waitConditions,
long startedAtMillis,
diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java
index c9086d1459afd..39651ff896057 100644
--- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java
+++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java
@@ -86,8 +86,8 @@ public void testUseClusterBySkippedAndWorkingTask() {
assertOutputContains(
result.getOutput(),
"> Task :user1",
- "Starting `node{::myTestCluster-1}`",
- "Stopping `node{::myTestCluster-1}`"
+ "Starting `node{::myTestCluster-0}`",
+ "Stopping `node{::myTestCluster-0}`"
);
}
@@ -104,22 +104,22 @@ public void testMultiProject() {
assertStartedAndStoppedOnce(result);
assertOutputOnlyOnce(
result.getOutput(),
- "Starting `node{:alpha:myTestCluster-1}`",
- "Stopping `node{::myTestCluster-1}`"
+ "Starting `node{:alpha:myTestCluster-0}`",
+ "Stopping `node{::myTestCluster-0}`"
);
assertOutputOnlyOnce(
result.getOutput(),
- "Starting `node{::myTestCluster-1}`",
- "Stopping `node{:bravo:myTestCluster-1}`"
+ "Starting `node{::myTestCluster-0}`",
+ "Stopping `node{:bravo:myTestCluster-0}`"
);
}
public void testReleased() {
BuildResult result = getTestClustersRunner("testReleased").build();
assertTaskSuccessful(result, ":testReleased");
- assertStartedAndStoppedOnce(result, "releasedVersionDefault-1");
- assertStartedAndStoppedOnce(result, "releasedVersionOSS-1");
- assertStartedAndStoppedOnce(result, "releasedVersionIntegTest-1");
+ assertStartedAndStoppedOnce(result, "releasedVersionDefault-0");
+ assertStartedAndStoppedOnce(result, "releasedVersionOSS-0");
+ assertStartedAndStoppedOnce(result, "releasedVersionIntegTest-0");
}
public void testIncremental() {
@@ -143,7 +143,7 @@ public void testUseClusterByFailingOne() {
assertStartedAndStoppedOnce(result);
assertOutputContains(
result.getOutput(),
- "Stopping `node{::myTestCluster-1}`, tailLogs: true",
+ "Stopping `node{::myTestCluster-0}`, tailLogs: true",
"Execution failed for task ':itAlwaysFails'."
);
}
@@ -155,7 +155,7 @@ public void testUseClusterByFailingDependency() {
assertStartedAndStoppedOnce(result);
assertOutputContains(
result.getOutput(),
- "Stopping `node{::myTestCluster-1}`, tailLogs: true",
+ "Stopping `node{::myTestCluster-0}`, tailLogs: true",
"Execution failed for task ':itAlwaysFails'."
);
}
@@ -165,7 +165,7 @@ public void testConfigurationLocked() {
assertTaskFailed(result, ":illegalConfigAlter");
assertOutputContains(
result.getOutput(),
- "Configuration for node{::myTestCluster-1} can not be altered, already locked"
+ "Configuration for node{::myTestCluster-0} can not be altered, already locked"
);
}
@@ -173,9 +173,9 @@ public void testConfigurationLocked() {
public void testMultiNode() {
BuildResult result = getTestClustersRunner(":multiNode").build();
assertTaskSuccessful(result, ":multiNode");
+ assertStartedAndStoppedOnce(result, "multiNode-0");
assertStartedAndStoppedOnce(result, "multiNode-1");
assertStartedAndStoppedOnce(result, "multiNode-2");
- assertStartedAndStoppedOnce(result, "multiNode-3");
}
public void testPluginInstalled() {
@@ -211,7 +211,7 @@ private void assertStartedAndStoppedOnce(BuildResult result, String nodeName) {
}
private void assertStartedAndStoppedOnce(BuildResult result) {
- assertStartedAndStoppedOnce(result, "myTestCluster-1");
+ assertStartedAndStoppedOnce(result, "myTestCluster-0");
}
diff --git a/docs/build.gradle b/docs/build.gradle
index 8156d1d54b57a..feda444301ec7 100644
--- a/docs/build.gradle
+++ b/docs/build.gradle
@@ -1,3 +1,5 @@
+import static org.elasticsearch.gradle.Distribution.DEFAULT
+
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
@@ -35,15 +37,15 @@ buildRestTests.expectedUnconvertedCandidates = [
'reference/ml/apis/update-snapshot.asciidoc',
]
-integTestCluster {
- if ("default".equals(integTestCluster.distribution)) {
+testClusters.integTest {
+ if (singleNode().distribution == DEFAULT) {
setting 'xpack.license.self_generated.type', 'trial'
}
// enable regexes in painless so our tests don't complain about example snippets that use them
setting 'script.painless.regex.enabled', 'true'
Closure configFile = {
- extraConfigFile it, "src/test/cluster/config/$it"
+ extraConfigFile it, file("src/test/cluster/config/$it")
}
configFile 'analysis/example_word_list.txt'
configFile 'analysis/hyphenation_patterns.xml'
@@ -52,8 +54,8 @@ integTestCluster {
configFile 'userdict_ja.txt'
configFile 'userdict_ko.txt'
configFile 'KeywordTokenizer.rbbi'
- extraConfigFile 'hunspell/en_US/en_US.aff', '../server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff'
- extraConfigFile 'hunspell/en_US/en_US.dic', '../server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic'
+ extraConfigFile 'hunspell/en_US/en_US.aff', project(":server").file('src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff')
+ extraConfigFile 'hunspell/en_US/en_US.dic', project(":server").file('src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic')
// Whitelist reindexing from the local node so we can test it.
setting 'reindex.remote.whitelist', '127.0.0.1:*'
}
@@ -65,10 +67,12 @@ project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each {
if (subproj.path.startsWith(':plugins:repository-')) {
return
}
+ // FIXME
subproj.afterEvaluate { // need to wait until the project has been configured
- integTestCluster {
- plugin subproj.path
+ testClusters.integTest {
+ plugin file(subproj.bundlePlugin.archiveFile)
}
+ tasks.integTest.dependsOn subproj.bundlePlugin
}
}
diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc
index 1e33455d02613..d75ce77d1af80 100644
--- a/docs/reference/cluster/health.asciidoc
+++ b/docs/reference/cluster/health.asciidoc
@@ -34,7 +34,7 @@ Returns this:
"active_shards_percent_as_number": 50.0
}
--------------------------------------------------
-// TESTRESPONSE[s/testcluster/docs_integTestCluster/]
+// TESTRESPONSE[s/testcluster/integTest/]
// TESTRESPONSE[s/"number_of_pending_tasks" : 0,/"number_of_pending_tasks" : $body.number_of_pending_tasks,/]
// TESTRESPONSE[s/"task_max_waiting_in_queue_millis": 0/"task_max_waiting_in_queue_millis": $body.task_max_waiting_in_queue_millis/]
diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc
index b81d2b284371d..7df9bdfe7aa6c 100755
--- a/docs/reference/getting-started.asciidoc
+++ b/docs/reference/getting-started.asciidoc
@@ -301,7 +301,7 @@ And the response:
epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1475247709 17:01:49 elasticsearch green 1 1 0 0 0 0 0 0 - 100.0%
--------------------------------------------------
-// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTestCluster/]
+// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ integTest/]
// TESTRESPONSE[s/0 0 -/0 \\d+ -/]
// TESTRESPONSE[_cat]
diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle
index 0075b4989e69f..0a23bb9c9cf62 100644
--- a/x-pack/docs/build.gradle
+++ b/x-pack/docs/build.gradle
@@ -27,54 +27,14 @@ dependencies {
testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts')
}
-Closure waitWithAuth = { NodeInfo node, AntBuilder ant ->
- File tmpFile = new File(node.cwd, 'wait.success')
- // wait up to twenty seconds
- final long stopTime = System.currentTimeMillis() + 20000L;
- Exception lastException = null;
- while (System.currentTimeMillis() < stopTime) {
- lastException = null;
- // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned
- HttpURLConnection httpURLConnection = null;
- try {
- httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health").openConnection();
- httpURLConnection.setRequestProperty("Authorization", "Basic " +
- Base64.getEncoder().encodeToString("test_admin:x-pack-test-password".getBytes(StandardCharsets.UTF_8)));
- httpURLConnection.setRequestMethod("GET");
- httpURLConnection.setConnectTimeout(1000);
- httpURLConnection.setReadTimeout(30000);
- httpURLConnection.connect();
- if (httpURLConnection.getResponseCode() == 200) {
- tmpFile.withWriter StandardCharsets.UTF_8.name(), {
- it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name()))
- }
- break;
- }
- } catch (Exception e) {
- logger.debug("failed to call cluster health", e)
- lastException = e
- } finally {
- if (httpURLConnection != null) {
- httpURLConnection.disconnect();
- }
- }
-
- // did not start, so wait a bit before trying again
- Thread.sleep(500L);
- }
- if (tmpFile.exists() == false && lastException != null) {
- logger.error("final attempt of calling cluster health failed", lastException)
- }
- return tmpFile.exists()
-}
-
// copy xpack rest api
File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources')
project.copyRestSpec.from(xpackResources) {
include 'rest-api-spec/api/**'
}
-File jwks = new File(xpackProject('test:idp-fixture').projectDir, 'oidc/op-jwks.json')
-integTestCluster {
+
+testClusters.integTest {
+ extraConfigFile 'op-jwks.json', xpackProject('test:idp-fixture').file("oidc/op-jwks.json")
setting 'xpack.security.enabled', 'true'
setting 'xpack.security.authc.api_key.enabled', 'true'
setting 'xpack.security.authc.token.enabled', 'true'
@@ -91,17 +51,13 @@ integTestCluster {
setting 'xpack.security.authc.realms.oidc.oidc1.op.jwkset_path', 'op-jwks.json'
setting 'xpack.security.authc.realms.oidc.oidc1.rp.redirect_uri', 'https://my.fantastic.rp/cb'
setting 'xpack.security.authc.realms.oidc.oidc1.rp.client_id', 'elasticsearch-rp'
- keystoreSetting 'xpack.security.authc.realms.oidc.oidc1.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2'
+ keystore 'xpack.security.authc.realms.oidc.oidc1.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2'
setting 'xpack.security.authc.realms.oidc.oidc1.rp.response_type', 'id_token'
setting 'xpack.security.authc.realms.oidc.oidc1.claims.principal', 'sub'
- setupCommand 'setupTestAdmin',
- 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser'
- waitCondition = waitWithAuth
- extraConfigFile 'op-jwks.json', jwks
+ user username: 'test_admin'
}
-
buildRestTests.docs = fileTree(projectDir) {
// No snippets in here!
exclude 'build.gradle'
From 385dfd95d6d149b9d9ca117768fe48a1dec1f7b6 Mon Sep 17 00:00:00 2001
From: Zachary Tong
Date: Wed, 22 May 2019 08:10:10 -0400
Subject: [PATCH 037/224] Update version skips and constants after backport
(#42290)
After https://github.com/elastic/elasticsearch/pull/41906 was
backported, we need to update the various test skips and version
constants
---
.../test/search.aggregation/230_composite.yml | 4 ++--
.../test/search.aggregation/250_moving_fn.yml | 4 ++--
.../test/search.aggregation/80_typed_keys.yml | 2 +-
.../rest-api-spec/test/search/240_date_nanos.yml | 2 +-
.../bucket/histogram/DateIntervalWrapper.java | 4 ++--
.../xpack/restart/FullClusterRestartIT.java | 2 +-
.../upgrades/RollupDateHistoUpgradeIT.java | 2 +-
.../test/mixed_cluster/40_ml_datafeed_crud.yml | 6 +-----
.../test/old_cluster/40_ml_datafeed_crud.yml | 13 ++++---------
.../test/upgraded_cluster/40_ml_datafeed_crud.yml | 4 ----
10 files changed, 15 insertions(+), 28 deletions(-)
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
index 4003d29abb5bf..fc0710fdb5375 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
@@ -241,7 +241,7 @@ setup:
---
"Composite aggregation with format":
- skip:
- version: " - 7.99.99" #TODO change this after backport
+ version: " - 7.1.99"
reason: calendar_interval introduced in 7.2.0
features: warnings
@@ -307,7 +307,7 @@ setup:
---
"Composite aggregation with format and calendar_interval":
- skip:
- version: " - 7.99.99" #TODO change this after backport
+ version: " - 7.1.99"
reason: calendar_interval introduced in 7.2.0
- do:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml
index a4517d46d2c62..cd24da7bd616b 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml
@@ -2,7 +2,7 @@
"Bad window":
- skip:
- version: " - 7.99.0" #TODO change this after backport
+ version: " - 7.1.99"
reason: "calendar_interval added in 7.2"
- do:
@@ -30,7 +30,7 @@
"Bad window deprecated interval":
- skip:
- version: " - 7.99.0" #TODO change this after backport
+ version: " - 7.1.99"
reason: "interval deprecation added in 7.2"
features: "warnings"
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml
index 023c08f3b2d50..d041432556430 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml
@@ -206,7 +206,7 @@ setup:
---
"Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation":
- skip:
- version: " - 7.99.0" #TODO change this after backport
+ version: " - 7.1.99"
reason: "calendar_interval added in 7.2"
- do:
search:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml
index 352d5edf6b374..2caf9c7084792 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml
@@ -123,7 +123,7 @@ setup:
---
"date histogram aggregation with date and date_nanos mapping":
- skip:
- version: " - 7.99.99" #TODO change this after backport
+ version: " - 7.1.99"
reason: calendar_interval introduced in 7.2.0
- do:
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java
index b08782f1fd37a..229fa0d15bb30 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java
@@ -113,7 +113,7 @@ public static void declareIntervalFields(Object
public DateIntervalWrapper() {}
public DateIntervalWrapper(StreamInput in) throws IOException {
- if (in.getVersion().before(Version.V_8_0_0)) { // TODO change this after backport
+ if (in.getVersion().before(Version.V_7_2_0)) {
long interval = in.readLong();
DateHistogramInterval histoInterval = in.readOptionalWriteable(DateHistogramInterval::new);
@@ -374,7 +374,7 @@ public boolean isEmpty() {
@Override
public void writeTo(StreamOutput out) throws IOException {
- if (out.getVersion().before(Version.V_8_0_0)) { // TODO change this after backport
+ if (out.getVersion().before(Version.V_7_2_0)) {
if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) {
out.writeLong(TimeValue.parseTimeValue(dateHistogramInterval.toString(),
DateHistogramAggregationBuilder.NAME + ".innerWriteTo").getMillis());
diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java
index f17aab309ba72..a62a23dac70b8 100644
--- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java
+++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java
@@ -229,7 +229,7 @@ public void testRollupAfterRestart() throws Exception {
final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-job-test");
String intervalType;
- if (getOldClusterVersion().onOrAfter(Version.V_8_0_0)) { // TODO change this after backport
+ if (getOldClusterVersion().onOrAfter(Version.V_7_2_0)) {
intervalType = "fixed_interval";
} else {
intervalType = "interval";
diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java
index 035e29ccf771c..08ad9f09d599c 100644
--- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java
+++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java
@@ -34,7 +34,7 @@ public class RollupDateHistoUpgradeIT extends AbstractUpgradeTestCase {
Version.fromString(System.getProperty("tests.upgrade_from_version"));
public void testDateHistoIntervalUpgrade() throws Exception {
- assumeTrue("DateHisto interval changed in 7.1", UPGRADE_FROM_VERSION.before(Version.V_7_2_0));
+ assumeTrue("DateHisto interval changed in 7.2", UPGRADE_FROM_VERSION.before(Version.V_7_2_0));
switch (CLUSTER_TYPE) {
case OLD:
break;
diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml
index 4d732015d47f4..2ff9b08e9b13f 100644
--- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml
+++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml
@@ -1,8 +1,3 @@
-setup:
- - skip:
- version: "all"
- reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258"
-
---
"Test old cluster datafeed without aggs":
- do:
@@ -114,6 +109,7 @@ setup:
- do:
warnings:
- '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.'
+ - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.'
ml.put_datafeed:
datafeed_id: mixed-cluster-datafeed-with-aggs
body: >
diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml
index 2a7b56adb9a16..4918dde9ba899 100644
--- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml
+++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml
@@ -1,8 +1,3 @@
-setup:
- - skip:
- version: "all"
- reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258"
-
---
"Put job and datafeed without aggs in old cluster":
@@ -53,8 +48,8 @@ setup:
---
"Put job and datafeed with aggs in old cluster - pre-deprecated interval":
- skip:
- version: "all" #TODO change this after backport
- reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258; calendar_interval introduced in 7.2.0"
+ version: "7.1.99 - "
+ reason: "calendar_interval introduced in 7.2.0"
- do:
ml.put_job:
@@ -123,8 +118,8 @@ setup:
---
"Put job and datafeed with aggs in old cluster - deprecated interval with warning":
- skip:
- version: " - 7.99.99" #TODO change this after backport
- reason: calendar_interval introduced in 7.1.0
+ version: " - 7.1.99"
+ reason: calendar_interval introduced in 7.2.0
features: warnings
- do:
diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml
index 4b742e10de61f..5dc71ecb0679e 100644
--- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml
+++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml
@@ -1,8 +1,4 @@
setup:
- - skip:
- version: "all"
- reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258"
-
- do:
cluster.health:
wait_for_status: green
From 1e9221da84ad0da66de87eb82c95c1255a81a530 Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Wed, 22 May 2019 14:12:25 +0200
Subject: [PATCH 038/224] Remove Obsolete BwC Logic from BlobStoreRepository
(#42193)
* Remove Obsolete BwC Logic from BlobStoreRepository
* We can't restore 1.3.3 files anyway -> no point in doing the dance of computing a hash here
* Some other minor+obvious cleanups
---
.../blobstore/BlobStoreRepository.java | 43 +------------------
.../blobstore/FileRestoreContext.java | 40 +----------------
2 files changed, 3 insertions(+), 80 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
index 86409ebac7d31..49b551b26b796 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -26,8 +26,6 @@
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.RateLimiter;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
@@ -954,8 +952,6 @@ protected void finalize(final List snapshots,
final Map blobs,
final String reason) {
final String indexGeneration = Integer.toString(fileListGeneration);
- final String currentIndexGen = indexShardSnapshotsFormat.blobName(indexGeneration);
-
final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots);
try {
// Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier
@@ -998,7 +994,8 @@ protected void finalize(final List snapshots,
snapshotId, shardId), e);
}
} catch (IOException e) {
- String message = "Failed to finalize " + reason + " with shard index [" + currentIndexGen + "]";
+ String message =
+ "Failed to finalize " + reason + " with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]";
throw new IndexShardSnapshotFailedException(shardId, message, e);
}
}
@@ -1135,16 +1132,6 @@ public void snapshot(final IndexCommit snapshotIndexCommit) {
List filesInfo = snapshots.findPhysicalIndexFiles(fileName);
if (filesInfo != null) {
for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) {
- try {
- // in 1.3.3 we added additional hashes for .si / segments_N files
- // to ensure we don't double the space in the repo since old snapshots
- // don't have this hash we try to read that hash from the blob store
- // in a bwc compatible way.
- maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata);
- } catch (Exception e) {
- logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]",
- shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
- }
if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) {
// a commit point file with the same name, size and checksum was already copied to repository
// we will reuse it for this snapshot
@@ -1315,32 +1302,6 @@ private void checkAborted() {
}
}
- /**
- * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them.
- * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the
- * comparison of the files on a per-segment / per-commit level.
- */
- private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo,
- Store.MetadataSnapshot snapshot) throws Exception {
- final StoreFileMetaData metadata;
- if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) {
- if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) {
- // we have a hash - check if our repo has a hash too otherwise we have
- // to calculate it.
- // we might have multiple parts even though the file is small... make sure we read all of it.
- try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) {
- BytesRefBuilder builder = new BytesRefBuilder();
- Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length());
- BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash
- assert hash.length == 0;
- hash.bytes = builder.bytes();
- hash.offset = 0;
- hash.length = builder.length();
- }
- }
- }
- }
-
private static final class PartSliceStream extends SlicedInputStream {
private final BlobContainer container;
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java
index f78ddab9ee44c..3abe4d7b50722 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java
@@ -27,8 +27,6 @@
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.shard.ShardId;
@@ -127,17 +125,6 @@ public void restore(SnapshotFiles snapshotFiles, Store store) throws IOException
final Map snapshotMetaData = new HashMap<>();
final Map fileInfos = new HashMap<>();
for (final BlobStoreIndexShardSnapshot.FileInfo fileInfo : snapshotFiles.indexFiles()) {
- try {
- // in 1.3.3 we added additional hashes for .si / segments_N files
- // to ensure we don't double the space in the repo since old snapshots
- // don't have this hash we try to read that hash from the blob store
- // in a bwc compatible way.
- maybeRecalculateMetadataHash(fileInfo, recoveryTargetMetadata);
- } catch (Exception e) {
- // if the index is broken we might not be able to read it
- logger.warn(new ParameterizedMessage("[{}] Can't calculate hash from blog for file [{}] [{}]", shardId,
- fileInfo.physicalName(), fileInfo.metadata()), e);
- }
snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata());
fileInfos.put(fileInfo.metadata().name(), fileInfo);
}
@@ -237,7 +224,7 @@ protected void restoreFiles(List filesToRe
protected abstract InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo);
@SuppressWarnings("unchecked")
- private Iterable concat(Store.RecoveryDiff diff) {
+ private static Iterable concat(Store.RecoveryDiff diff) {
return Iterables.concat(diff.different, diff.missing);
}
@@ -276,29 +263,4 @@ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, fi
}
}
- /**
- * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them.
- * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the
- * comparison of the files on a per-segment / per-commit level.
- */
- private void maybeRecalculateMetadataHash(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot)
- throws IOException {
- final StoreFileMetaData metadata;
- if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) {
- if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) {
- // we have a hash - check if our repo has a hash too otherwise we have
- // to calculate it.
- // we might have multiple parts even though the file is small... make sure we read all of it.
- try (InputStream stream = fileInputStream(fileInfo)) {
- BytesRefBuilder builder = new BytesRefBuilder();
- Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length());
- BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash
- assert hash.length == 0;
- hash.bytes = builder.bytes();
- hash.offset = 0;
- hash.length = builder.length();
- }
- }
- }
- }
}
From 05809deb490f71a90a4164c302955c5c2ab6d8ac Mon Sep 17 00:00:00 2001
From: Yannick Welsch
Date: Wed, 22 May 2019 14:21:48 +0200
Subject: [PATCH 039/224] Revert "Mute
SpecificMasterNodesIT.testElectOnlyBetweenMasterNodes()"
This reverts commit 2964ceaa0371d8bd1665e599c6395a7e7026d094.
---
.../java/org/elasticsearch/cluster/SpecificMasterNodesIT.java | 1 -
1 file changed, 1 deletion(-)
diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java
index f80a5befa83d9..38b9579eff046 100644
--- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java
+++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java
@@ -86,7 +86,6 @@ public void testSimpleOnlyMasterNodeElection() throws IOException {
.execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName));
}
- @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38331")
public void testElectOnlyBetweenMasterNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
logger.info("--> start data node / non master node");
From d292d95eaada378c216ce00d11e68db79954d359 Mon Sep 17 00:00:00 2001
From: Yannick Welsch
Date: Wed, 22 May 2019 14:36:17 +0200
Subject: [PATCH 040/224] Fix testCannotJoinIfMasterLostDataFolder
Relates to #41047
---
.../discovery/ClusterDisruptionIT.java | 23 +++++++++++++++----
1 file changed, 18 insertions(+), 5 deletions(-)
diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java
index 3a257ec5973f8..ad3b8006ed0c3 100644
--- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java
+++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java
@@ -31,6 +31,7 @@
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.coordination.ClusterBootstrapService;
+import org.elasticsearch.cluster.coordination.LagDetector;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -389,7 +390,6 @@ public void onFailure(Exception e) {
}
}
- @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41047")
public void testCannotJoinIfMasterLostDataFolder() throws Exception {
String masterNode = internalCluster().startMasterOnlyNode();
String dataNode = internalCluster().startDataOnlyNode();
@@ -402,7 +402,18 @@ public boolean clearData(String nodeName) {
@Override
public Settings onNodeStopped(String nodeName) {
- return Settings.builder().put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeName).build();
+ return Settings.builder()
+ .put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeName)
+ /*
+ * the data node might join while the master is still not fully established as master just yet and bypasses the join
+ * validation that is done before adding the node to the cluster. Only the join validation when handling the publish
+ * request takes place, but at this point the cluster state has been successfully committed, and will subsequently be
+ * exposed to the applier. The health check below therefore sees the cluster state with the 2 nodes and thinks all is
+ * good, even though the data node never accepted this state. What's worse is that it takes 90 seconds for the data
+ * node to be kicked out of the cluster (lag detection). We speed this up here.
+ */
+ .put(LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING.getKey(), "10s")
+ .build();
}
@Override
@@ -411,9 +422,11 @@ public boolean validateClusterForming() {
}
});
- assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut());
- assertTrue(internalCluster().client(masterNode).admin().cluster().prepareHealth().setWaitForNodes("2").setTimeout("2s").get()
- .isTimedOut());
+ assertBusy(() -> {
+ assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut());
+ assertTrue(internalCluster().client(masterNode).admin().cluster().prepareHealth().setWaitForNodes("2").setTimeout("2s").get()
+ .isTimedOut());
+ }, 30, TimeUnit.SECONDS);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); // otherwise we will fail during clean-up
}
From 40beecd1e04b81dfbc398e22132bf9411f54c6d5 Mon Sep 17 00:00:00 2001
From: markharwood
Date: Wed, 22 May 2019 13:37:47 +0100
Subject: [PATCH 041/224] Search - enable low_level_cancellation by default.
(#42291)
Benchmarking on worst-case queries (max agg on match_all or popular-term query with large index) was not noticeably slower.
Closes #26258
---
.../main/java/org/elasticsearch/search/SearchService.java | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java
index b703493b4d505..daf8e1faf7bb8 100644
--- a/server/src/main/java/org/elasticsearch/search/SearchService.java
+++ b/server/src/main/java/org/elasticsearch/search/SearchService.java
@@ -134,11 +134,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
/**
* Enables low-level, frequent search cancellation checks. Enabling low-level checks will make long running searches to react
- * to the cancellation request faster. However, since it will produce more cancellation checks it might slow the search performance
- * down.
+ * to the cancellation request faster. It will produce more cancellation checks but benchmarking has shown these did not
+ * noticeably slow down searches.
*/
public static final Setting LOW_LEVEL_CANCELLATION_SETTING =
- Setting.boolSetting("search.low_level_cancellation", false, Property.Dynamic, Property.NodeScope);
+ Setting.boolSetting("search.low_level_cancellation", true, Property.Dynamic, Property.NodeScope);
public static final TimeValue NO_TIMEOUT = timeValueMillis(-1);
public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING =
From b03d7b20928b481ee09418bcc39f1536b40493b0 Mon Sep 17 00:00:00 2001
From: Yannick Welsch
Date: Wed, 22 May 2019 14:45:26 +0200
Subject: [PATCH 042/224] Remove testNodeFailuresAreProcessedOnce
This test was not checking the thing it was supposed to anyway.
---
.../cluster/coordination/ZenDiscoveryIT.java | 36 -------------------
1 file changed, 36 deletions(-)
diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java
index 9a17c25f44cce..feffbfc792656 100644
--- a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java
+++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java
@@ -41,18 +41,14 @@
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.transport.RemoteTransportException;
-import java.io.IOException;
import java.util.EnumSet;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicInteger;
import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
@@ -97,38 +93,6 @@ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Excep
assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster));
}
- public void testNodeFailuresAreProcessedOnce() throws IOException {
- Settings masterNodeSettings = Settings.builder()
- .put(Node.NODE_DATA_SETTING.getKey(), false)
- .build();
- String master = internalCluster().startNode(masterNodeSettings);
- Settings dateNodeSettings = Settings.builder()
- .put(Node.NODE_MASTER_SETTING.getKey(), false)
- .build();
- internalCluster().startNodes(2, dateNodeSettings);
- client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
-
- ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
- final AtomicInteger numUpdates = new AtomicInteger();
- final CountDownLatch nodesStopped = new CountDownLatch(1);
- clusterService.addStateApplier(event -> {
- numUpdates.incrementAndGet();
- try {
- // block until both nodes have stopped to accumulate node failures
- nodesStopped.await();
- } catch (InterruptedException e) {
- //meh
- }
- });
-
- internalCluster().stopRandomNonMasterNode();
- internalCluster().stopRandomNonMasterNode();
- nodesStopped.countDown();
-
- client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // wait for all to be processed
- assertThat(numUpdates.get(), either(equalTo(1)).or(equalTo(2))); // due to batching, both nodes can be handled in same CS update
- }
-
public void testHandleNodeJoin_incompatibleClusterState()
throws InterruptedException, ExecutionException, TimeoutException {
String masterNode = internalCluster().startMasterOnlyNode();
From 94848d8a8c27d2f0af4e3da7aa155fce1896d562 Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Wed, 22 May 2019 15:31:29 +0200
Subject: [PATCH 043/224] Dump Stacktrace on Slow IO-Thread Operations (#42000)
* Dump Stacktrace on Slow IO-Thread Operations
* Follow up to #39729 extending the functionality to actually dump the
stack when the thread is blocked not afterwards
* Logging the stacktrace after the thread became unblocked is only of
limited use because we don't know what happened in the slow callback
from that (only whether we were blocked on a read,write,connect etc.)
* Relates #41745
---
.../transport/nio/MockNioTransport.java | 71 ++++++++++-
.../transport/nio/TestEventHandler.java | 114 ++++++++++--------
.../transport/nio/TestEventHandlerTests.java | 13 +-
3 files changed, 143 insertions(+), 55 deletions(-)
diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java
index dc0e14a4d2984..42dae39146605 100644
--- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java
+++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java
@@ -31,6 +31,7 @@
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.nio.BytesChannelContext;
@@ -57,11 +58,16 @@
import java.nio.ByteBuffer;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
+import java.util.Arrays;
import java.util.HashSet;
+import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.IntFunction;
+import java.util.stream.Collectors;
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
@@ -70,6 +76,7 @@ public class MockNioTransport extends TcpTransport {
private static final Logger logger = LogManager.getLogger(MockNioTransport.class);
private final ConcurrentMap profileToChannelFactory = newConcurrentMap();
+ private final TransportThreadWatchdog transportThreadWatchdog;
private volatile NioSelectorGroup nioGroup;
private volatile MockTcpChannelFactory clientChannelFactory;
@@ -77,6 +84,7 @@ public MockNioTransport(Settings settings, Version version, ThreadPool threadPoo
PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry,
CircuitBreakerService circuitBreakerService) {
super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService);
+ this.transportThreadWatchdog = new TransportThreadWatchdog(threadPool);
}
@Override
@@ -96,7 +104,7 @@ protected void doStart() {
boolean success = false;
try {
nioGroup = new NioSelectorGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2,
- (s) -> new TestEventHandler(this::onNonChannelException, s, System::nanoTime));
+ (s) -> new TestEventHandler(this::onNonChannelException, s, transportThreadWatchdog));
ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default");
clientChannelFactory = new MockTcpChannelFactory(true, clientProfileSettings, "client");
@@ -125,6 +133,7 @@ protected void doStart() {
@Override
protected void stopInternal() {
try {
+ transportThreadWatchdog.stop();
nioGroup.close();
} catch (Exception e) {
logger.warn("unexpected exception while stopping nio group", e);
@@ -311,4 +320,64 @@ public void sendMessage(BytesReference reference, ActionListener listener)
getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener));
}
}
+
+ static final class TransportThreadWatchdog {
+
+ private static final long WARN_THRESHOLD = TimeUnit.MILLISECONDS.toNanos(150);
+
+ // Only check every 2s to not flood the logs on a blocked thread.
+ // We mostly care about long blocks and not random slowness anyway and in tests would randomly catch slow operations that block for
+ // less than 2s eventually.
+ private static final TimeValue CHECK_INTERVAL = TimeValue.timeValueSeconds(2);
+
+ private final ThreadPool threadPool;
+ private final ConcurrentHashMap registry = new ConcurrentHashMap<>();
+
+ private volatile boolean stopped;
+
+ TransportThreadWatchdog(ThreadPool threadPool) {
+ this.threadPool = threadPool;
+ threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC);
+ }
+
+ public boolean register() {
+ Long previousValue = registry.put(Thread.currentThread(), threadPool.relativeTimeInNanos());
+ return previousValue == null;
+ }
+
+ public void unregister() {
+ Long previousValue = registry.remove(Thread.currentThread());
+ assert previousValue != null;
+ maybeLogElapsedTime(previousValue);
+ }
+
+ private void maybeLogElapsedTime(long startTime) {
+ long elapsedTime = threadPool.relativeTimeInNanos() - startTime;
+ if (elapsedTime > WARN_THRESHOLD) {
+ logger.warn(
+ new ParameterizedMessage("Slow execution on network thread [{} milliseconds]",
+ TimeUnit.NANOSECONDS.toMillis(elapsedTime)),
+ new RuntimeException("Slow exception on network thread"));
+ }
+ }
+
+ private void logLongRunningExecutions() {
+ for (Map.Entry entry : registry.entrySet()) {
+ final long elapsedTime = threadPool.relativeTimeInMillis() - entry.getValue();
+ if (elapsedTime > WARN_THRESHOLD) {
+ final Thread thread = entry.getKey();
+ logger.warn("Slow execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(),
+ TimeUnit.NANOSECONDS.toMillis(elapsedTime),
+ Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n")));
+ }
+ }
+ if (stopped == false) {
+ threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC);
+ }
+ }
+
+ public void stop() {
+ stopped = true;
+ }
+ }
}
diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java
index a70ecb0c59efa..069e19c34558c 100644
--- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java
+++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java
@@ -19,9 +19,6 @@
package org.elasticsearch.transport.nio;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.nio.ChannelContext;
import org.elasticsearch.nio.EventHandler;
import org.elasticsearch.nio.NioSelector;
@@ -32,185 +29,202 @@
import java.util.Collections;
import java.util.Set;
import java.util.WeakHashMap;
-import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
-import java.util.function.LongSupplier;
import java.util.function.Supplier;
public class TestEventHandler extends EventHandler {
- private static final Logger logger = LogManager.getLogger(TestEventHandler.class);
-
private final Set hasConnectedMap = Collections.newSetFromMap(new WeakHashMap<>());
private final Set hasConnectExceptionMap = Collections.newSetFromMap(new WeakHashMap<>());
- private final LongSupplier relativeNanosSupplier;
+ private final MockNioTransport.TransportThreadWatchdog transportThreadWatchdog;
- TestEventHandler(Consumer exceptionHandler, Supplier selectorSupplier, LongSupplier relativeNanosSupplier) {
+ TestEventHandler(Consumer exceptionHandler, Supplier selectorSupplier,
+ MockNioTransport.TransportThreadWatchdog transportThreadWatchdog) {
super(exceptionHandler, selectorSupplier);
- this.relativeNanosSupplier = relativeNanosSupplier;
+ this.transportThreadWatchdog = transportThreadWatchdog;
}
@Override
protected void acceptChannel(ServerChannelContext context) throws IOException {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.acceptChannel(context);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void acceptException(ServerChannelContext context, Exception exception) {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.acceptException(context, exception);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void handleRegistration(ChannelContext> context) throws IOException {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.handleRegistration(context);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void registrationException(ChannelContext> context, Exception exception) {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.registrationException(context, exception);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
public void handleConnect(SocketChannelContext context) throws IOException {
assert hasConnectedMap.contains(context) == false : "handleConnect should only be called is a channel is not yet connected";
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.handleConnect(context);
if (context.isConnectComplete()) {
hasConnectedMap.add(context);
}
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
public void connectException(SocketChannelContext context, Exception e) {
assert hasConnectExceptionMap.contains(context) == false : "connectException should only called at maximum once per channel";
+ final boolean registered = transportThreadWatchdog.register();
hasConnectExceptionMap.add(context);
- long startTime = relativeNanosSupplier.getAsLong();
try {
super.connectException(context, e);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void handleRead(SocketChannelContext context) throws IOException {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.handleRead(context);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void readException(SocketChannelContext context, Exception exception) {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.readException(context, exception);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void handleWrite(SocketChannelContext context) throws IOException {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.handleWrite(context);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void writeException(SocketChannelContext context, Exception exception) {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.writeException(context, exception);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void handleTask(Runnable task) {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.handleTask(task);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void taskException(Exception exception) {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.taskException(exception);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void handleClose(ChannelContext> context) throws IOException {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.handleClose(context);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void closeException(ChannelContext> context, Exception exception) {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.closeException(context, exception);
} finally {
- maybeLogElapsedTime(startTime);
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
@Override
protected void genericChannelException(ChannelContext> context, Exception exception) {
- long startTime = relativeNanosSupplier.getAsLong();
+ final boolean registered = transportThreadWatchdog.register();
try {
super.genericChannelException(context, exception);
} finally {
- maybeLogElapsedTime(startTime);
- }
- }
-
- private static final long WARN_THRESHOLD = 150;
-
- private void maybeLogElapsedTime(long startTime) {
- long elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeNanosSupplier.getAsLong() - startTime);
- if (elapsedTime > WARN_THRESHOLD) {
- logger.warn(new ParameterizedMessage("Slow execution on network thread [{} milliseconds]", elapsedTime),
- new RuntimeException("Slow exception on network thread"));
+ if (registered) {
+ transportThreadWatchdog.unregister();
+ }
}
}
}
diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java
index 2a570eb59b6f6..424d4922f024e 100644
--- a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java
+++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java
@@ -27,6 +27,7 @@
import org.elasticsearch.nio.SocketChannelContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.MockLogAppender;
+import org.elasticsearch.threadpool.ThreadPool;
import java.util.HashMap;
import java.util.Map;
@@ -34,6 +35,7 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.LongSupplier;
+import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
public class TestEventHandlerTests extends ESTestCase {
@@ -43,12 +45,12 @@ public class TestEventHandlerTests extends ESTestCase {
public void setUp() throws Exception {
super.setUp();
appender = new MockLogAppender();
- Loggers.addAppender(LogManager.getLogger(TestEventHandler.class), appender);
+ Loggers.addAppender(LogManager.getLogger(MockNioTransport.class), appender);
appender.start();
}
public void tearDown() throws Exception {
- Loggers.removeAppender(LogManager.getLogger(TestEventHandler.class), appender);
+ Loggers.removeAppender(LogManager.getLogger(MockNioTransport.class), appender);
appender.stop();
super.tearDown();
}
@@ -65,7 +67,10 @@ public void testLogOnElapsedTime() throws Exception {
}
throw new IllegalStateException("Cannot update isStart");
};
- TestEventHandler eventHandler = new TestEventHandler((e) -> {}, () -> null, timeSupplier);
+ final ThreadPool threadPool = mock(ThreadPool.class);
+ doAnswer(i -> timeSupplier.getAsLong()).when(threadPool).relativeTimeInNanos();
+ TestEventHandler eventHandler =
+ new TestEventHandler((e) -> {}, () -> null, new MockNioTransport.TransportThreadWatchdog(threadPool));
ServerChannelContext serverChannelContext = mock(ServerChannelContext.class);
SocketChannelContext socketChannelContext = mock(SocketChannelContext.class);
@@ -91,7 +96,7 @@ public void testLogOnElapsedTime() throws Exception {
for (Map.Entry> entry : tests.entrySet()) {
String message = "*Slow execution on network thread*";
MockLogAppender.LoggingExpectation slowExpectation =
- new MockLogAppender.SeenEventExpectation(entry.getKey(), TestEventHandler.class.getCanonicalName(), Level.WARN, message);
+ new MockLogAppender.SeenEventExpectation(entry.getKey(), MockNioTransport.class.getCanonicalName(), Level.WARN, message);
appender.addExpectation(slowExpectation);
entry.getValue().run();
appender.assertAllExpectationsMatched();
From 4a9438762a562d20e938d2ea82538805f33e85b1 Mon Sep 17 00:00:00 2001
From: David Kyle
Date: Wed, 22 May 2019 14:56:14 +0100
Subject: [PATCH 044/224] Mute Data Frame integration tests
Relates to https://github.com/elastic/elasticsearch/issues/42344
---
.../xpack/dataframe/integration/DataFrameTransformIT.java | 1 +
.../xpack/dataframe/integration/DataFrameAuditorIT.java | 2 ++
.../dataframe/integration/DataFrameConfigurationIndexIT.java | 2 ++
.../xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java | 2 ++
.../xpack/dataframe/integration/DataFrameMetaDataIT.java | 2 ++
.../xpack/dataframe/integration/DataFramePivotRestIT.java | 2 ++
.../xpack/dataframe/integration/DataFrameTaskFailedStateIT.java | 2 ++
.../dataframe/integration/DataFrameTransformProgressIT.java | 2 ++
.../xpack/dataframe/integration/DataFrameUsageIT.java | 2 ++
9 files changed, 17 insertions(+)
diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java
index ecb2025c6a9c5..cc2e8c4436e06 100644
--- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java
+++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java
@@ -28,6 +28,7 @@ public void cleanTransforms() {
cleanUp();
}
+ @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344")
public void testDataFrameTransformCrud() throws Exception {
createReviewsIndex();
diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java
index 9884c9bb6793b..7dc79c1ae8fbe 100644
--- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java
+++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.dataframe.integration;
+import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.client.Request;
import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex;
import org.junit.Before;
@@ -22,6 +23,7 @@
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.is;
+@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344")
public class DataFrameAuditorIT extends DataFrameRestTestCase {
private static final String TEST_USER_NAME = "df_admin_plus_data";
diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java
index 681599331c8af..d7e12cf2bee4d 100644
--- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java
+++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java
@@ -8,6 +8,7 @@
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
+import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
@@ -22,6 +23,7 @@
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344")
public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase {
/**
diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java
index d9927cd09ed8f..9bac6ca0b4049 100644
--- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java
+++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.dataframe.integration;
+import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.client.Request;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.xpack.core.dataframe.DataFrameField;
@@ -21,6 +22,7 @@
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
+@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344")
public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase {
private static final String TEST_USER_NAME = "df_user";
diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java
index 26a957ea055c2..5b95d1daead53 100644
--- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java
+++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.dataframe.integration;
+import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.Version;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
@@ -15,6 +16,7 @@
import java.io.IOException;
import java.util.Map;
+@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344")
public class DataFrameMetaDataIT extends DataFrameRestTestCase {
private boolean indicesCreated = false;
diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java
index 770eaec7bd141..dab7e819881d2 100644
--- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java
+++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.dataframe.integration;
+import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.client.Request;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.junit.Before;
@@ -21,6 +22,7 @@
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
+@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344")
public class DataFramePivotRestIT extends DataFrameRestTestCase {
private static final String TEST_USER_NAME = "df_admin_plus_data";
diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java
index 96aeeda8755f4..7b63644dd34ad 100644
--- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java
+++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.dataframe.integration;
+import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.rest.RestStatus;
@@ -19,6 +20,7 @@
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.Matchers.equalTo;
+@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344")
public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase {
public void testDummy() {
diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java
index 194d35e8ba636..7d0fb179a2228 100644
--- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java
+++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.dataframe.integration;
+import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
@@ -45,6 +46,7 @@
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
+@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344")
public class DataFrameTransformProgressIT extends ESIntegTestCase {
protected void createReviewsIndex() throws Exception {
diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java
index 4f209c5a9f3f4..f98fa6a271365 100644
--- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java
+++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.dataframe.integration;
+import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
@@ -22,6 +23,7 @@
import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE;
import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS;
+@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344")
public class DataFrameUsageIT extends DataFrameRestTestCase {
private boolean indicesCreated = false;
From a568c3c5dac681fc93cff6c64204c3d00b3c1bb1 Mon Sep 17 00:00:00 2001
From: David Kyle
Date: Wed, 22 May 2019 15:35:08 +0100
Subject: [PATCH 045/224] [ML Data Frame] Persist data frame after state
changes (#42347)
---
.../transforms/DataFrameTransformTask.java | 28 +++++++------------
1 file changed, 10 insertions(+), 18 deletions(-)
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java
index 9df6b5e3ab337..926f233c454d1 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java
@@ -444,7 +444,6 @@ static class ClientDataFrameIndexer extends DataFrameIndexer {
private final DataFrameTransformsCheckpointService transformsCheckpointService;
private final String transformId;
private final DataFrameTransformTask transformTask;
- private volatile DataFrameIndexerTransformStats previouslyPersistedStats = null;
private final AtomicInteger failureCount;
// Keeps track of the last exception that was written to our audit, keeps us from spamming the audit index
private volatile String lastAuditedExceptionMessage = null;
@@ -552,25 +551,18 @@ protected void doSaveState(IndexerState indexerState, Map positi
// only every-so-often when doing the bulk indexing calls. See AsyncTwoPhaseIndexer#onBulkResponse for current periodicity
ActionListener> updateClusterStateListener = ActionListener.wrap(
task -> {
- // Only persist the stats if something has actually changed
- if (previouslyPersistedStats == null || previouslyPersistedStats.equals(getStats()) == false) {
- transformsConfigManager.putOrUpdateTransformStats(
- new DataFrameTransformStateAndStats(transformId, state, getStats(),
- DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null
+ transformsConfigManager.putOrUpdateTransformStats(
+ new DataFrameTransformStateAndStats(transformId, state, getStats(),
+ DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null
ActionListener.wrap(
- r -> {
- previouslyPersistedStats = getStats();
- next.run();
- },
- statsExc -> {
- logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc);
- next.run();
- }
+ r -> {
+ next.run();
+ },
+ statsExc -> {
+ logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc);
+ next.run();
+ }
));
- // The stats that we have previously written to the doc is the same as as it is now, no need to update it
- } else {
- next.run();
- }
},
exc -> {
logger.error("Updating persistent state of transform [" + transformConfig.getId() + "] failed", exc);
From c1d980cf3a37ae803d2e2ef6d87450039bf0ff7c Mon Sep 17 00:00:00 2001
From: Yannick Welsch
Date: Wed, 22 May 2019 17:31:59 +0200
Subject: [PATCH 046/224] Fix testAutoFollowManyIndices
On a slow CI worker, the test was failing an assertion.
Closes #41234
---
.../java/org/elasticsearch/xpack/ccr/AutoFollowIT.java | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java
index 4fdb1fa00ab9a..0bcb3daac6284 100644
--- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java
+++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java
@@ -31,6 +31,7 @@
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
@@ -149,7 +150,7 @@ public void testAutoFollowManyIndices() throws Exception {
AutoFollowMetadata autoFollowMetadata = metaData[0].custom(AutoFollowMetadata.TYPE);
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), hasSize((int) expectedVal1));
assertThat(autoFollowStats[0].getNumberOfSuccessfulFollowIndices(), equalTo(expectedVal1));
- });
+ }, 30, TimeUnit.SECONDS);
} catch (AssertionError ae) {
logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class)));
logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0]));
@@ -168,7 +169,7 @@ public void testAutoFollowManyIndices() throws Exception {
AutoFollowMetadata autoFollowMetadata = metaData[0].custom(AutoFollowMetadata.TYPE);
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), nullValue());
assertThat(autoFollowStats[0].getAutoFollowedClusters().size(), equalTo(0));
- });
+ }, 30, TimeUnit.SECONDS);
} catch (AssertionError ae) {
logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class)));
logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0]));
@@ -199,7 +200,7 @@ public void testAutoFollowManyIndices() throws Exception {
// Ensure that there are no auto follow errors:
// (added specifically to see that there are no leader indices auto followed multiple times)
assertThat(autoFollowStats[0].getRecentAutoFollowErrors().size(), equalTo(0));
- });
+ }, 30, TimeUnit.SECONDS);
} catch (AssertionError ae) {
logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class)));
logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0]));
From 145c3bec7898f9a4e9bb43ade48d9103d8e30d88 Mon Sep 17 00:00:00 2001
From: Mengwei Ding
Date: Wed, 22 May 2019 08:53:25 -0700
Subject: [PATCH 047/224] Add .code_internal-* index pattern to kibana user
(#42247)
---
.../xpack/core/security/authz/store/ReservedRolesStore.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java
index 2c86971b529f9..49d4159f13968 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java
@@ -118,8 +118,9 @@ private static Map initializeReservedRoles() {
.indices(".monitoring-*").privileges("read", "read_cross_cluster").build(),
RoleDescriptor.IndicesPrivileges.builder()
.indices(".management-beats").privileges("create_index", "read", "write").build(),
+ // .code_internal-* is for Code's internal worker queue index creation.
RoleDescriptor.IndicesPrivileges.builder()
- .indices(".code-*").privileges("all").build(),
+ .indices(".code-*", ".code_internal-*").privileges("all").build(),
},
null,
new ConditionalClusterPrivilege[] { new ManageApplicationPrivileges(Collections.singleton("kibana-*")) },
From d5888b23d73a245f40fa124a39d474b34c042156 Mon Sep 17 00:00:00 2001
From: mushao999
Date: Thu, 23 May 2019 00:05:48 +0800
Subject: [PATCH 048/224] Fix alpha version error message (#40406)
---
server/src/main/java/org/elasticsearch/Version.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java
index 5089a7fe0cec9..ce0fc1559c18b 100644
--- a/server/src/main/java/org/elasticsearch/Version.java
+++ b/server/src/main/java/org/elasticsearch/Version.java
@@ -257,7 +257,7 @@ public static Version fromString(String version) {
if (buildStr.startsWith("alpha")) {
assert rawMajor >= 5 : "major must be >= 5 but was " + major;
build = Integer.parseInt(buildStr.substring(5));
- assert build < 25 : "expected a beta build but " + build + " >= 25";
+ assert build < 25 : "expected a alpha build but " + build + " >= 25";
} else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
build = betaOffset + Integer.parseInt(buildStr.substring(4));
assert build < 50 : "expected a beta build but " + build + " >= 50";
From 148df31639a983058b758f5eef2c9df2f9346e94 Mon Sep 17 00:00:00 2001
From: Julie Tibshirani
Date: Wed, 22 May 2019 09:19:14 -0700
Subject: [PATCH 049/224] Fix a rendering issue in the geo envelope docs.
(#42332)
Previously the formatting information didn't display in the docs, and the
sentence just rendered as "bounding rectangle in the format :".
---
docs/reference/mapping/types/geo-shape.asciidoc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc
index 1cf85e305a95d..26f59e1058c09 100644
--- a/docs/reference/mapping/types/geo-shape.asciidoc
+++ b/docs/reference/mapping/types/geo-shape.asciidoc
@@ -615,7 +615,7 @@ POST /example/_doc
Elasticsearch supports an `envelope` type, which consists of coordinates
for upper left and lower right points of the shape to represent a
-bounding rectangle in the format [[minLon, maxLat],[maxLon, minLat]]:
+bounding rectangle in the format `[[minLon, maxLat], [maxLon, minLat]]`:
[source,js]
--------------------------------------------------
From 943344fa48d8d1f83776863250af8b8fb52417fd Mon Sep 17 00:00:00 2001
From: swstepp <49322243+swstepp@users.noreply.github.com>
Date: Wed, 22 May 2019 10:44:41 -0600
Subject: [PATCH 050/224] Fix grammar problem in stemming reference. (#42148)
---
docs/reference/how-to/recipes/stemming.asciidoc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc
index e8c213646578c..d7ddda116327e 100644
--- a/docs/reference/how-to/recipes/stemming.asciidoc
+++ b/docs/reference/how-to/recipes/stemming.asciidoc
@@ -171,7 +171,7 @@ the query need to be matched exactly while other parts should still take
stemming into account?
Fortunately, the `query_string` and `simple_query_string` queries have a feature
-that solve this exact problem: `quote_field_suffix`. This tell Elasticsearch
+that solves this exact problem: `quote_field_suffix`. This tells Elasticsearch
that the words that appear in between quotes are to be redirected to a different
field, see below:
From 458aa6409f37ea636c1b099c99ff4369599cb17c Mon Sep 17 00:00:00 2001
From: Jake Landis
Date: Wed, 22 May 2019 11:58:50 -0500
Subject: [PATCH 051/224] add 7_3 as version (#42368)
---
server/src/main/java/org/elasticsearch/Version.java | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java
index ce0fc1559c18b..e3381a3384c0e 100644
--- a/server/src/main/java/org/elasticsearch/Version.java
+++ b/server/src/main/java/org/elasticsearch/Version.java
@@ -92,6 +92,8 @@ public class Version implements Comparable, ToXContentFragment {
public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final int V_7_2_0_ID = 7020099;
public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
+ public static final int V_7_3_0_ID = 7030099;
+ public static final Version V_7_3_0 = new Version(V_7_3_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final int V_8_0_0_ID = 8000099;
public static final Version V_8_0_0 = new Version(V_8_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_1_0);
public static final Version CURRENT = V_8_0_0;
@@ -110,6 +112,8 @@ public static Version fromId(int id) {
switch (id) {
case V_8_0_0_ID:
return V_8_0_0;
+ case V_7_3_0_ID:
+ return V_7_3_0;
case V_7_2_0_ID:
return V_7_2_0;
case V_7_1_1_ID:
From d49d9b53d6e0ac8acda61913489fa55e5118f0c5 Mon Sep 17 00:00:00 2001
From: Yannick Welsch
Date: Wed, 22 May 2019 19:07:56 +0200
Subject: [PATCH 052/224] Ensure testAckedIndexing uses disruption index
settings
AbstractDisruptionTestCase set a lower global checkpoint sync interval setting, but this was ignored by
testAckedIndexing, which has led to spurious test failures
Relates #41068, #38931
---
.../indices/recovery/PeerRecoveryTargetService.java | 2 +-
.../java/org/elasticsearch/discovery/ClusterDisruptionIT.java | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java
index 1ba854fdb2b13..6b1a893667f2c 100644
--- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java
+++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java
@@ -356,7 +356,7 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove
logger.trace("{} preparing for file-based recovery from [{}]", recoveryTarget.shardId(), recoveryTarget.sourceNode());
} else {
logger.trace(
- "{} preparing for sequence-number-based recovery starting at local checkpoint [{}] from [{}]",
+ "{} preparing for sequence-number-based recovery starting at sequence number [{}] from [{}]",
recoveryTarget.shardId(),
startingSeqNo,
recoveryTarget.sourceNode());
diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java
index ad3b8006ed0c3..5bc5efc96c661 100644
--- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java
+++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java
@@ -119,6 +119,7 @@ public void testAckedIndexing() throws Exception {
assertAcked(prepareCreate("test")
.setSettings(Settings.builder()
+ .put(indexSettings())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2))
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2))
));
From 35c4c9efb0705e1e6b080d16d78ed0c5967b80e6 Mon Sep 17 00:00:00 2001
From: Zachary Tong
Date: Tue, 21 May 2019 10:25:23 -0400
Subject: [PATCH 053/224] Re-mute all ml_datafeed_crud rolling upgrade tests
AwaitsFix https://github.com/elastic/elasticsearch/issues/42258
Thought this was fixed, but throwing deprecation warnings at
an unexpected time so putting this back on mute until we
figure it out.
---
.../rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml | 5 +++++
.../rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml | 5 +++++
.../test/upgraded_cluster/40_ml_datafeed_crud.yml | 4 ++++
3 files changed, 14 insertions(+)
diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml
index 2ff9b08e9b13f..4d2254a1ba8c3 100644
--- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml
+++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml
@@ -1,3 +1,8 @@
+setup:
+ - skip:
+ version: "all"
+ reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258"
+
---
"Test old cluster datafeed without aggs":
- do:
diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml
index 4918dde9ba899..62a9d33a511e6 100644
--- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml
+++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml
@@ -1,3 +1,8 @@
+setup:
+ - skip:
+ version: "all"
+ reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258"
+
---
"Put job and datafeed without aggs in old cluster":
diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml
index 5dc71ecb0679e..4b742e10de61f 100644
--- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml
+++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml
@@ -1,4 +1,8 @@
setup:
+ - skip:
+ version: "all"
+ reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258"
+
- do:
cluster.health:
wait_for_status: green
From c9d04ccb3a13eeaccdf056df0243477d06da013d Mon Sep 17 00:00:00 2001
From: Alpar Torok
Date: Wed, 22 May 2019 22:00:51 +0300
Subject: [PATCH 054/224] Make packer cache branches explicit (#41990)
Before this change we would recurse to cache bwc versions.
This proved to be problematic due to the number of steps it was
generating taking too long.
Also this required tricky maintenance to break the recursion for old
branches we don't really care about.
With this change we now cache specific branches only.
---
.ci/packer_cache.sh | 1 +
distribution/bwc/build.gradle | 7 +++++--
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh
index 4533213920c3a..adc4f80d4960d 100755
--- a/.ci/packer_cache.sh
+++ b/.ci/packer_cache.sh
@@ -22,3 +22,4 @@ export JAVA8_HOME="${HOME}"/.java/java8
export JAVA11_HOME="${HOME}"/.java/java11
export JAVA12_HOME="${HOME}"/.java/openjdk12
./gradlew --parallel clean --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies
+
diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle
index 87644fb7f6785..1a4e4161418ab 100644
--- a/distribution/bwc/build.gradle
+++ b/distribution/bwc/build.gradle
@@ -239,12 +239,15 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased
createBuildBwcTask(projectName, "${baseDir}/${projectName}", projectArtifact)
}
-
createRunBwcGradleTask("resolveAllBwcDependencies") {
args 'resolveAllDependencies'
}
- resolveAllDependencies.dependsOn resolveAllBwcDependencies
+ Version currentVersion = Version.fromString(version)
+ if (currentVersion.getMinor() == 0 && currentVersion.getRevision() == 0) {
+ // We only want to resolve dependencies for live versions of master, without cascading this to older versions
+ resolveAllDependencies.dependsOn resolveAllBwcDependencies
+ }
for (e in artifactFiles) {
String projectName = e.key
From da77b97c56c948fea5909e60170a1680c791ce1b Mon Sep 17 00:00:00 2001
From: David Kyle
Date: Thu, 23 May 2019 08:42:06 +0100
Subject: [PATCH 055/224] [ML Data Frame] Account for completed data frames in
test (#42351)
When asserting on the checkpoint value if the DF has completed the checkpoint will be 1 else 0.
Similarly state may be started or indexing. Closes #42309
---
.../rest-api-spec/test/data_frame/transforms_stats.yml | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml
index f552e4710c781..79aa14cb6f628 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml
@@ -42,9 +42,6 @@ teardown:
---
"Test get transform stats":
- - skip:
- version: "all"
- reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309"
- do:
data_frame.get_data_frame_transform_stats:
transform_id: "airline-transform-stats"
@@ -52,7 +49,7 @@ teardown:
- match: { transforms.0.id: "airline-transform-stats" }
- match: { transforms.0.state.indexer_state: "/started|indexing/" }
- match: { transforms.0.state.task_state: "started" }
- - match: { transforms.0.state.checkpoint: 0 }
+ - lte: { transforms.0.state.checkpoint: 1 }
- lte: { transforms.0.stats.pages_processed: 1 }
- match: { transforms.0.stats.documents_processed: 0 }
- match: { transforms.0.stats.documents_indexed: 0 }
@@ -149,9 +146,6 @@ teardown:
---
"Test get multiple transform stats where one does not have a task":
- - skip:
- version: "all"
- reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309"
- do:
data_frame.put_data_frame_transform:
transform_id: "airline-transform-stats-dos"
@@ -169,7 +163,7 @@ teardown:
transform_id: "*"
- match: { count: 2 }
- match: { transforms.0.id: "airline-transform-stats" }
- - match: { transforms.0.state.indexer_state: "started" }
+ - match: { transforms.0.state.indexer_state: "/started|indexing/" }
- match: { transforms.1.id: "airline-transform-stats-dos" }
- match: { transforms.1.state.indexer_state: "stopped" }
From e75ff0c748e6b68232c2b08e19ac4a4934918264 Mon Sep 17 00:00:00 2001
From: Marios Trivyzas
Date: Thu, 23 May 2019 10:10:07 +0200
Subject: [PATCH 056/224] Allow `fields` to be set to `*` (#42301)
Allow for SimpleQueryString, QueryString and MultiMatchQuery
to set the `fields` parameter to the wildcard `*`. If so, set
the leniency to `true`, to achieve the same behaviour as from the
`"default_field" : "*" setting.
Furthermore, check if `*` is in the list of the `default_field` but
not necessarily as the 1st element.
Closes: #39577
---
.../index/query/MultiMatchQueryBuilder.java | 15 +-
.../index/query/QueryStringQueryBuilder.java | 9 +-
.../index/query/SimpleQueryStringBuilder.java | 12 +-
.../index/search/QueryParserHelper.java | 8 ++
.../query/MultiMatchQueryBuilderTests.java | 128 ++++++++++++------
.../query/QueryStringQueryBuilderTests.java | 55 +++++++-
.../query/SimpleQueryStringBuilderTests.java | 79 ++++++++---
7 files changed, 227 insertions(+), 79 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java
index 5537df2fdf874..7827c032ea0d7 100644
--- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java
@@ -28,7 +28,6 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
@@ -783,18 +782,20 @@ protected Query doToQuery(QueryShardContext context) throws IOException {
multiMatchQuery.setTranspositions(fuzzyTranspositions);
Map newFieldsBoosts;
+ boolean isAllField;
if (fieldsBoosts.isEmpty()) {
// no fields provided, defaults to index.query.default_field
List defaultFields = context.defaultFields();
- boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0));
- if (isAllField && lenient == null) {
- // Sets leniency to true if not explicitly
- // set in the request
- multiMatchQuery.setLenient(true);
- }
newFieldsBoosts = QueryParserHelper.resolveMappingFields(context, QueryParserHelper.parseFieldsAndWeights(defaultFields));
+ isAllField = QueryParserHelper.hasAllFieldsWildcard(defaultFields);
} else {
newFieldsBoosts = QueryParserHelper.resolveMappingFields(context, fieldsBoosts);
+ isAllField = QueryParserHelper.hasAllFieldsWildcard(fieldsBoosts.keySet());
+ }
+ if (isAllField && lenient == null) {
+ // Sets leniency to true if not explicitly
+ // set in the request
+ multiMatchQuery.setLenient(true);
}
return multiMatchQuery.parse(type, newFieldsBoosts, value, minimumShouldMatch);
}
diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
index f129ccbec7254..1d1d139ceef1c 100644
--- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
@@ -847,11 +847,14 @@ protected Query doToQuery(QueryShardContext context) throws IOException {
}
} else if (fieldsAndWeights.size() > 0) {
final Map resolvedFields = QueryParserHelper.resolveMappingFields(context, fieldsAndWeights);
- queryParser = new QueryStringQueryParser(context, resolvedFields, isLenient);
+ if (QueryParserHelper.hasAllFieldsWildcard(fieldsAndWeights.keySet())) {
+ queryParser = new QueryStringQueryParser(context, resolvedFields, lenient == null ? true : lenient);
+ } else {
+ queryParser = new QueryStringQueryParser(context, resolvedFields, isLenient);
+ }
} else {
List defaultFields = context.defaultFields();
- boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0));
- if (isAllField) {
+ if (QueryParserHelper.hasAllFieldsWildcard(defaultFields)) {
queryParser = new QueryStringQueryParser(context, lenient == null ? true : lenient);
} else {
final Map resolvedFields = QueryParserHelper.resolveMappingFields(context,
diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
index bd74d34196345..beae19a4403ac 100644
--- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
@@ -28,7 +28,6 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.search.Queries;
-import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.search.QueryParserHelper;
@@ -399,16 +398,19 @@ public SimpleQueryStringBuilder fuzzyTranspositions(boolean fuzzyTranspositions)
protected Query doToQuery(QueryShardContext context) throws IOException {
Settings newSettings = new Settings(settings);
final Map resolvedFieldsAndWeights;
+ boolean isAllField;
if (fieldsAndWeights.isEmpty() == false) {
resolvedFieldsAndWeights = QueryParserHelper.resolveMappingFields(context, fieldsAndWeights);
+ isAllField = QueryParserHelper.hasAllFieldsWildcard(fieldsAndWeights.keySet());
} else {
List defaultFields = context.defaultFields();
- boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0));
- if (isAllField) {
- newSettings.lenient(lenientSet ? settings.lenient() : true);
- }
resolvedFieldsAndWeights = QueryParserHelper.resolveMappingFields(context,
QueryParserHelper.parseFieldsAndWeights(defaultFields));
+ isAllField = QueryParserHelper.hasAllFieldsWildcard(defaultFields);
+ }
+
+ if (isAllField) {
+ newSettings.lenient(lenientSet ? settings.lenient() : true);
}
final SimpleQueryStringQueryParser sqp;
diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java
index adc1691608b23..3acf2929687c5 100644
--- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java
+++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java
@@ -161,4 +161,12 @@ private static void checkForTooManyFields(Map fields, QueryShardC
throw new IllegalArgumentException("field expansion matches too many fields, limit: " + limit + ", got: " + fields.size());
}
}
+
+ /**
+ * Returns true if any of the fields is the wildcard {@code *}, false otherwise.
+ * @param fields A collection of field names
+ */
+ public static boolean hasAllFieldsWildcard(Collection fields) {
+ return fields.stream().anyMatch(Regex::isMatchAllPattern);
+ }
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java
index ab9b3c732135d..6590a5609353a 100644
--- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java
@@ -55,6 +55,7 @@
import static org.hamcrest.CoreMatchers.anyOf;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.collection.IsCollectionWithSize.hasSize;
@@ -409,52 +410,79 @@ public void testToFuzzyQuery() throws Exception {
public void testDefaultField() throws Exception {
QueryShardContext context = createShardContext();
MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("hello");
- // should pass because we set lenient to true when default field is `*`
+ // default value `*` sets leniency to true
Query query = builder.toQuery(context);
- assertThat(query, instanceOf(DisjunctionMaxQuery.class));
-
- context.getIndexSettings().updateIndexMetaData(
- newIndexMeta("index", context.getIndexSettings().getSettings(),
- Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5")
- .build())
- );
-
- MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello");
- query = qb.toQuery(context);
- DisjunctionMaxQuery expected = new DisjunctionMaxQuery(
- Arrays.asList(
- new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
- new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f)
- ), 0.0f
- );
- assertEquals(expected, query);
+ assertQueryWithAllFieldsWildcard(query);
+
+ try {
+ // `*` is in the list of the default_field => leniency set to true
+ context.getIndexSettings().updateIndexMetaData(
+ newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
+ STRING_FIELD_NAME, "*", STRING_FIELD_NAME_2).build())
+ );
+ query = new MultiMatchQueryBuilder("hello")
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
+
+ context.getIndexSettings().updateIndexMetaData(
+ newIndexMeta("index", context.getIndexSettings().getSettings(),
+ Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5")
+ .build())
+ );
+ MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello");
+ query = qb.toQuery(context);
+ DisjunctionMaxQuery expected = new DisjunctionMaxQuery(
+ Arrays.asList(
+ new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
+ new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f)
+ ), 0.0f
+ );
+ assertEquals(expected, query);
+
+ context.getIndexSettings().updateIndexMetaData(
+ newIndexMeta("index", context.getIndexSettings().getSettings(),
+ Settings.builder().putList("index.query.default_field",
+ STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build())
+ );
+ // should fail because lenient defaults to false
+ IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> qb.toQuery(context));
+ assertThat(exc, instanceOf(NumberFormatException.class));
+ assertThat(exc.getMessage(), equalTo("For input string: \"hello\""));
+
+ // explicitly sets lenient
+ qb.lenient(true);
+ query = qb.toQuery(context);
+ expected = new DisjunctionMaxQuery(
+ Arrays.asList(
+ new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
+ new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f),
+ new MatchNoDocsQuery("failed [mapped_int] query, caused by number_format_exception:[For input string: \"hello\"]")
+ ), 0.0f
+ );
+ assertEquals(expected, query);
+
+ } finally {
+ // Reset to the default value
+ context.getIndexSettings().updateIndexMetaData(
+ newIndexMeta("index", context.getIndexSettings().getSettings(),
+ Settings.builder().putNull("index.query.default_field").build())
+ );
+ }
+ }
- context.getIndexSettings().updateIndexMetaData(
- newIndexMeta("index", context.getIndexSettings().getSettings(),
- Settings.builder().putList("index.query.default_field",
- STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build())
- );
- // should fail because lenient defaults to false
- IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> qb.toQuery(context));
- assertThat(exc, instanceOf(NumberFormatException.class));
- assertThat(exc.getMessage(), equalTo("For input string: \"hello\""));
-
- // explicitly sets lenient
- qb.lenient(true);
- query = qb.toQuery(context);
- expected = new DisjunctionMaxQuery(
- Arrays.asList(
- new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
- new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f),
- new MatchNoDocsQuery("failed [mapped_int] query, caused by number_format_exception:[For input string: \"hello\"]")
- ), 0.0f
- );
- assertEquals(expected, query);
+ public void testAllFieldsWildcard() throws Exception {
+ QueryShardContext context = createShardContext();
+ Query query = new MultiMatchQueryBuilder("hello")
+ .field("*")
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
- context.getIndexSettings().updateIndexMetaData(
- newIndexMeta("index", context.getIndexSettings().getSettings(),
- Settings.builder().putNull("index.query.default_field").build())
- );
+ query = new MultiMatchQueryBuilder("hello")
+ .field(STRING_FIELD_NAME)
+ .field("*")
+ .field(STRING_FIELD_NAME_2)
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
}
public void testWithStopWords() throws Exception {
@@ -536,4 +564,18 @@ private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings
.build();
return IndexMetaData.builder(name).settings(build).build();
}
+
+ private void assertQueryWithAllFieldsWildcard(Query query) {
+ assertEquals(DisjunctionMaxQuery.class, query.getClass());
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query;
+ int noMatchNoDocsQueries = 0;
+ for (Query q : disjunctionMaxQuery.getDisjuncts()) {
+ if (q.getClass() == MatchNoDocsQuery.class) {
+ noMatchNoDocsQueries++;
+ }
+ }
+ assertEquals(11, noMatchNoDocsQueries);
+ assertThat(disjunctionMaxQuery.getDisjuncts(), hasItems(new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
+ new TermQuery(new Term(STRING_FIELD_NAME_2, "hello"))));
+ }
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java
index 001df6deb5647..ee4e0f9540451 100644
--- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java
@@ -79,6 +79,7 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDisjunctionSubQuery;
import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.instanceOf;
@@ -1255,12 +1256,27 @@ public void testUnmappedFieldRewriteToMatchNoDocs() throws IOException {
public void testDefaultField() throws Exception {
QueryShardContext context = createShardContext();
- context.getIndexSettings().updateIndexMetaData(
- newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
- STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build())
- );
+ // default value `*` sets leniency to true
+ Query query = new QueryStringQueryBuilder("hello")
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
+
try {
- Query query = new QueryStringQueryBuilder("hello")
+ // `*` is in the list of the default_field => leniency set to true
+ context.getIndexSettings().updateIndexMetaData(
+ newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
+ STRING_FIELD_NAME, "*", STRING_FIELD_NAME_2).build())
+ );
+ query = new QueryStringQueryBuilder("hello")
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
+
+
+ context.getIndexSettings().updateIndexMetaData(
+ newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
+ STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build())
+ );
+ query = new QueryStringQueryBuilder("hello")
.toQuery(context);
Query expected = new DisjunctionMaxQuery(
Arrays.asList(
@@ -1278,6 +1294,21 @@ public void testDefaultField() throws Exception {
}
}
+ public void testAllFieldsWildcard() throws Exception {
+ QueryShardContext context = createShardContext();
+ Query query = new QueryStringQueryBuilder("hello")
+ .field("*")
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
+
+ query = new QueryStringQueryBuilder("hello")
+ .field(STRING_FIELD_NAME)
+ .field("*")
+ .field(STRING_FIELD_NAME_2)
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
+ }
+
/**
* the quote analyzer should overwrite any other forced analyzer in quoted parts of the query
*/
@@ -1513,4 +1544,18 @@ private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings
.build();
return IndexMetaData.builder(name).settings(build).build();
}
+
+ private void assertQueryWithAllFieldsWildcard(Query query) {
+ assertEquals(DisjunctionMaxQuery.class, query.getClass());
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query;
+ int noMatchNoDocsQueries = 0;
+ for (Query q : disjunctionMaxQuery.getDisjuncts()) {
+ if (q.getClass() == MatchNoDocsQuery.class) {
+ noMatchNoDocsQueries++;
+ }
+ }
+ assertEquals(11, noMatchNoDocsQueries);
+ assertThat(disjunctionMaxQuery.getDisjuncts(), hasItems(new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
+ new TermQuery(new Term(STRING_FIELD_NAME_2, "hello"))));
+ }
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java
index 0adac9db8287e..ab479d89fe9d6 100644
--- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java
@@ -56,6 +56,7 @@
import java.util.Set;
import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.equalTo;
@@ -576,24 +577,56 @@ public void testQuoteFieldSuffix() {
public void testDefaultField() throws Exception {
QueryShardContext context = createShardContext();
- context.getIndexSettings().updateIndexMetaData(
- newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
- STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build())
- );
+ // default value `*` sets leniency to true
Query query = new SimpleQueryStringBuilder("hello")
.toQuery(context);
- Query expected = new DisjunctionMaxQuery(
- Arrays.asList(
- new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
- new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f)
- ), 1.0f
- );
- assertEquals(expected, query);
- // Reset the default value
- context.getIndexSettings().updateIndexMetaData(
- newIndexMeta("index",
- context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", "*").build())
- );
+ assertQueryWithAllFieldsWildcard(query);
+
+ try {
+ // `*` is in the list of the default_field => leniency set to true
+ context.getIndexSettings().updateIndexMetaData(
+ newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
+ STRING_FIELD_NAME, "*", STRING_FIELD_NAME_2).build())
+ );
+ query = new SimpleQueryStringBuilder("hello")
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
+
+ context.getIndexSettings().updateIndexMetaData(
+ newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
+ STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build())
+ );
+ query = new SimpleQueryStringBuilder("hello")
+ .toQuery(context);
+ Query expected = new DisjunctionMaxQuery(
+ Arrays.asList(
+ new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
+ new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f)
+ ), 1.0f
+ );
+ assertEquals(expected, query);
+ } finally {
+ // Reset to the default value
+ context.getIndexSettings().updateIndexMetaData(
+ newIndexMeta("index",
+ context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", "*").build())
+ );
+ }
+ }
+
+ public void testAllFieldsWildcard() throws Exception {
+ QueryShardContext context = createShardContext();
+ Query query = new SimpleQueryStringBuilder("hello")
+ .field("*")
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
+
+ query = new SimpleQueryStringBuilder("hello")
+ .field(STRING_FIELD_NAME)
+ .field("*")
+ .field(STRING_FIELD_NAME_2)
+ .toQuery(context);
+ assertQueryWithAllFieldsWildcard(query);
}
public void testToFuzzyQuery() throws Exception {
@@ -739,4 +772,18 @@ private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings
.build();
return IndexMetaData.builder(name).settings(build).build();
}
+
+ private void assertQueryWithAllFieldsWildcard(Query query) {
+ assertEquals(DisjunctionMaxQuery.class, query.getClass());
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query;
+ int noMatchNoDocsQueries = 0;
+ for (Query q : disjunctionMaxQuery.getDisjuncts()) {
+ if (q.getClass() == MatchNoDocsQuery.class) {
+ noMatchNoDocsQueries++;
+ }
+ }
+ assertEquals(11, noMatchNoDocsQueries);
+ assertThat(disjunctionMaxQuery.getDisjuncts(), hasItems(new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
+ new TermQuery(new Term(STRING_FIELD_NAME_2, "hello"))));
+ }
}
From f5a6aa7ad7bba178b7ac1d5aa34fac84ab097b43 Mon Sep 17 00:00:00 2001
From: markharwood
Date: Thu, 23 May 2019 10:34:13 +0100
Subject: [PATCH 057/224] Test fix - results equality failed because of subtle
scoring differences between replicas. (#42366)
Diverging merge policies means the segments and therefore scores are not the same.
Fixed the test by ensuring there are zero replicas.
Closes #32492
---
.../elasticsearch/search/profile/query/QueryProfilerIT.java | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java
index 664f5a09fa947..040e16b6e957f 100644
--- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java
+++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java
@@ -26,6 +26,7 @@
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
@@ -110,7 +111,9 @@ public void testProfileQuery() throws Exception {
* to make sure the profiling doesn't interfere with the hits being returned
*/
public void testProfileMatchesRegular() throws Exception {
- createIndex("test");
+ createIndex("test", Settings.builder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0).build());
ensureGreen();
int numDocs = randomIntBetween(100, 150);
From 4e999d7514e701c7cf5790e9b484d9f8d7b83297 Mon Sep 17 00:00:00 2001
From: Jim Ferenczi
Date: Thu, 23 May 2019 11:41:05 +0200
Subject: [PATCH 058/224] Upgrade to Lucene 8.1.0 (#42214)
This commit upgrades to the GA release of Lucene 8.1.0
---
buildSrc/version.properties | 2 +-
...ene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../licenses/lucene-expressions-8.1.0.jar.sha1 | 1 +
...e-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../licenses/lucene-analyzers-icu-8.1.0.jar.sha1 | 1 +
...lyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 | 1 +
...-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../licenses/lucene-analyzers-nori-8.1.0.jar.sha1 | 1 +
...lyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 | 1 +
...alyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 | 1 +
...alyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 | 1 +
...zers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../lucene-analyzers-morfologik-8.1.0.jar.sha1 | 1 +
...nalyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../licenses/lucene-analyzers-common-8.1.0.jar.sha1 | 1 +
...backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 | 1 +
.../lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-core-8.1.0.jar.sha1 | 1 +
...lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-grouping-8.1.0.jar.sha1 | 1 +
...ene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-highlighter-8.1.0.jar.sha1 | 1 +
.../lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-join-8.1.0.jar.sha1 | 1 +
.../lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-memory-8.1.0.jar.sha1 | 1 +
.../lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-misc-8.1.0.jar.sha1 | 1 +
.../lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-queries-8.1.0.jar.sha1 | 1 +
...ene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-queryparser-8.1.0.jar.sha1 | 1 +
.../lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-sandbox-8.1.0.jar.sha1 | 1 +
.../lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-spatial-8.1.0.jar.sha1 | 1 +
...-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 | 1 +
...ucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-spatial3d-8.1.0.jar.sha1 | 1 +
.../lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
server/licenses/lucene-suggest-8.1.0.jar.sha1 | 1 +
.../org/elasticsearch/index/codec/CodecService.java | 3 +--
.../codec/PerFieldMappingPostingFormatCodec.java | 3 +--
.../index/engine/InternalEngineTests.java | 11 -----------
.../elasticsearch/validate/SimpleValidateQueryIT.java | 4 ++--
.../lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 | 1 -
.../sql-action/licenses/lucene-core-8.1.0.jar.sha1 | 1 +
53 files changed, 29 insertions(+), 42 deletions(-)
delete mode 100644 modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1
delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1
delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1
delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1
delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1
delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1
delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1
delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-analyzers-common-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-backward-codecs-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-core-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-grouping-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-highlighter-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-join-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-memory-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-misc-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-queries-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-queryparser-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-sandbox-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-spatial-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-spatial-extras-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-spatial3d-8.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 server/licenses/lucene-suggest-8.1.0.jar.sha1
delete mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1
create mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index 471cb3a705cf5..a3214c789a47d 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -1,5 +1,5 @@
elasticsearch = 8.0.0
-lucene = 8.1.0-snapshot-e460356abe
+lucene = 8.1.0
bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691
diff --git a/modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 48446e877e309..0000000000000
--- a/modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-0a1addebde14147501b7d24a581a7a7288bc585d
\ No newline at end of file
diff --git a/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..2554e8ce52652
--- /dev/null
+++ b/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1
@@ -0,0 +1 @@
+0c98e3b9d25f27ab05ac643cfb28756daa516bc7
\ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index c03380c6cf36c..0000000000000
--- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b8b7d744e6294706b379ec7fdd2d6f1b6badc95b
\ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..e4657681667f1
--- /dev/null
+++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1
@@ -0,0 +1 @@
+d61364290eb1c28696e62b7df3a7d041d3be2fa5
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index e3195509e493f..0000000000000
--- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-c9dcc5568ccd4589f4a6871d2019661546106c83
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..fff37598a0861
--- /dev/null
+++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1
@@ -0,0 +1 @@
+7f78b18890a0a0e74a8249806a6cfcabd2fae304
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 77cd0b32ed9ea..0000000000000
--- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-bef6d901a9c8b4c6139de7419b3024e0c9fd4ad3
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..47b0c633fdc79
--- /dev/null
+++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1
@@ -0,0 +1 @@
+bfc6b5d67a792aa23ee134fe93307696aad94223
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 1f090e9ca523f..0000000000000
--- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-074c06d6f2254edae04acdd53bf6989a4343acc8
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..d24096b883fc9
--- /dev/null
+++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1
@@ -0,0 +1 @@
+6fac1ff799b86f872b67e7fad55120d338daa86f
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 42a1e22cdfbc0..0000000000000
--- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5cd2a341ab4524ec7ff40ba29faa4ead5e805413
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..9ed51a53f6226
--- /dev/null
+++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1
@@ -0,0 +1 @@
+72941af5e1bfb012aec04dd518a2deb43402702c
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index c2468bbdd7cac..0000000000000
--- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ba55aba7d278f6201b4ebd6dafbc7edb6fe94f8c
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..190a7031928b8
--- /dev/null
+++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1
@@ -0,0 +1 @@
+0ac885595cfdc0267d7d9cb843c22dabf7215ff0
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 176e9533edde9..0000000000000
--- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-543d99fd2ba4302f3555792236350b201514d821
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..7f2d4c5e8647e
--- /dev/null
+++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1
@@ -0,0 +1 @@
+e260cff7f48e350e1ec037dec1c260ce05ddb53e
\ No newline at end of file
diff --git a/server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 08507536ac134..0000000000000
--- a/server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-c20a8ae0c3bd769aa6c415ebea94ba466d9a631d
\ No newline at end of file
diff --git a/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 b/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..6eb7722fec744
--- /dev/null
+++ b/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1
@@ -0,0 +1 @@
+262f20cb2786cdf7015a4ba1a64ce90ff2d746f5
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 6b0a3854c6f38..0000000000000
--- a/server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6e8921ab37facdcc5c4b71f2612d72300d6de217
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 b/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..c232e0fbdfdb9
--- /dev/null
+++ b/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1
@@ -0,0 +1 @@
+c5610306f8eff182b399b9aed7a60b82668a8395
\ No newline at end of file
diff --git a/server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index fea3658cf61bd..0000000000000
--- a/server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3e85f77d8f8ed1db53dba387fbdec55a9f912639
\ No newline at end of file
diff --git a/server/licenses/lucene-core-8.1.0.jar.sha1 b/server/licenses/lucene-core-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..4a6aa7b098686
--- /dev/null
+++ b/server/licenses/lucene-core-8.1.0.jar.sha1
@@ -0,0 +1 @@
+46d614acdeb42f4661e91347100217bc72aae11e
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 0bcc1ebab16de..0000000000000
--- a/server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-426a1822d888a6341f6bafccaad19e4a2ad88e25
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-8.1.0.jar.sha1 b/server/licenses/lucene-grouping-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..f3c49cb193aba
--- /dev/null
+++ b/server/licenses/lucene-grouping-8.1.0.jar.sha1
@@ -0,0 +1 @@
+443f63d9038eea0601b493fa37fc599d74b035eb
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index b2478a52c7a85..0000000000000
--- a/server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f83fa4b264198dfb12436a803309a60f5588481d
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-8.1.0.jar.sha1 b/server/licenses/lucene-highlighter-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..6b174859e1834
--- /dev/null
+++ b/server/licenses/lucene-highlighter-8.1.0.jar.sha1
@@ -0,0 +1 @@
+e3e52591f8d44a4e1006ced4dd4a67f7a572990a
\ No newline at end of file
diff --git a/server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index ea3f6353ce09e..0000000000000
--- a/server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f381131abef51f77d26bccbb213d1c8563c19ec4
\ No newline at end of file
diff --git a/server/licenses/lucene-join-8.1.0.jar.sha1 b/server/licenses/lucene-join-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..75232f1fc0a72
--- /dev/null
+++ b/server/licenses/lucene-join-8.1.0.jar.sha1
@@ -0,0 +1 @@
+2e885b1e3e55f94ccc2744f85738563a577a4e21
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 0bc96c932c18b..0000000000000
--- a/server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8d8733551b9eb71e1f59688b8e78e0b481974d7a
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-8.1.0.jar.sha1 b/server/licenses/lucene-memory-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..4b2c65af32da5
--- /dev/null
+++ b/server/licenses/lucene-memory-8.1.0.jar.sha1
@@ -0,0 +1 @@
+e58d0092da1c4744627d57d022f4e07d8b80d11b
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index fdde3da39a264..0000000000000
--- a/server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-13da0b22f01dff4a01c9907425464a440695104b
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-8.1.0.jar.sha1 b/server/licenses/lucene-misc-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..37afcfadb7e12
--- /dev/null
+++ b/server/licenses/lucene-misc-8.1.0.jar.sha1
@@ -0,0 +1 @@
+07833aee2c5feb6fa1a16a21d27c8f15c01d0b4c
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index c50232482b5c1..0000000000000
--- a/server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6c3de4dbb98b5cc00875d76e817929374bb9e710
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-8.1.0.jar.sha1 b/server/licenses/lucene-queries-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..7f09849b67693
--- /dev/null
+++ b/server/licenses/lucene-queries-8.1.0.jar.sha1
@@ -0,0 +1 @@
+63096d40298b8b8245a602d344b57bfa14b929fd
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 4a6c53845fc24..0000000000000
--- a/server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-539ef199c74ae6891ac93f55632fe140b9d4c291
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-8.1.0.jar.sha1 b/server/licenses/lucene-queryparser-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..ada3ec974e031
--- /dev/null
+++ b/server/licenses/lucene-queryparser-8.1.0.jar.sha1
@@ -0,0 +1 @@
+9bb4fb3c7035a877e4a87ed86870894509d26d65
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 198b93230fb7c..0000000000000
--- a/server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-0371141f658e2157babd490f0a8ddbcd5114b371
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-8.1.0.jar.sha1 b/server/licenses/lucene-sandbox-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..422195c73c69d
--- /dev/null
+++ b/server/licenses/lucene-sandbox-8.1.0.jar.sha1
@@ -0,0 +1 @@
+1033737c97703516134ba4c99d41724729854df4
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index ad6558f167d1c..0000000000000
--- a/server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1bae56fbce29d6c597c00889dab1909f51f4aaac
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-8.1.0.jar.sha1 b/server/licenses/lucene-spatial-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..e0d8f362a1ecf
--- /dev/null
+++ b/server/licenses/lucene-spatial-8.1.0.jar.sha1
@@ -0,0 +1 @@
+968d2fb35b0c2e68ac07c1ec187ab38a74b6602a
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 66d5cc808a1ac..0000000000000
--- a/server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6eaed1dea9a18502ab9dffe55f081da6060373f7
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 b/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..0a45cfe117a3a
--- /dev/null
+++ b/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1
@@ -0,0 +1 @@
+551b7fa327645d3fd59ae1321320153b2f858766
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 0e1c69171e07e..0000000000000
--- a/server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e54c6be78275637544a3080874dd04b0d92755e5
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 b/server/licenses/lucene-spatial3d-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..9cdde5a308e22
--- /dev/null
+++ b/server/licenses/lucene-spatial3d-8.1.0.jar.sha1
@@ -0,0 +1 @@
+45e63df708be458e95d9da3e6054189c50c30dff
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index 2d1491c40dd0d..0000000000000
--- a/server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e4c95d0bb740f18af520faebcebb968da3e0a687
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-8.1.0.jar.sha1 b/server/licenses/lucene-suggest-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..c4ac6e68080ab
--- /dev/null
+++ b/server/licenses/lucene-suggest-8.1.0.jar.sha1
@@ -0,0 +1 @@
+d5cd0e619b473e132f03e3577d1b422f050f99c0
\ No newline at end of file
diff --git a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java
index c43f733f916cb..485c40d5d9bbd 100644
--- a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java
+++ b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java
@@ -21,7 +21,6 @@
import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.FSTLoadMode;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
import org.apache.lucene.codecs.lucene80.Lucene80Codec;
import org.elasticsearch.common.Nullable;
@@ -49,7 +48,7 @@ public CodecService(@Nullable MapperService mapperService, Logger logger) {
final var codecs = new HashMap();
if (mapperService == null) {
codecs.put(DEFAULT_CODEC, new Lucene80Codec());
- codecs.put(BEST_COMPRESSION_CODEC, new Lucene80Codec(Mode.BEST_COMPRESSION, FSTLoadMode.AUTO));
+ codecs.put(BEST_COMPRESSION_CODEC, new Lucene80Codec(Mode.BEST_COMPRESSION));
} else {
codecs.put(DEFAULT_CODEC,
new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger));
diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java
index 705141f1fb925..4a154abd8eadd 100644
--- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java
+++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java
@@ -22,7 +22,6 @@
import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.FSTLoadMode;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
import org.apache.lucene.codecs.lucene80.Lucene80Codec;
import org.elasticsearch.common.lucene.Lucene;
@@ -48,7 +47,7 @@ public class PerFieldMappingPostingFormatCodec extends Lucene80Codec {
}
public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, Logger logger) {
- super(compressionMode, FSTLoadMode.AUTO);
+ super(compressionMode);
this.mapperService = mapperService;
this.logger = logger;
}
diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index 82c4035cfa7db..db9de3765b1e7 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -28,8 +28,6 @@
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.filter.RegexFilter;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.NumericDocValuesField;
@@ -133,7 +131,6 @@
import org.elasticsearch.test.IndexSettingsModule;
import org.elasticsearch.test.VersionUtils;
import org.hamcrest.MatcherAssert;
-import org.hamcrest.Matchers;
import java.io.Closeable;
import java.io.IOException;
@@ -302,14 +299,6 @@ public void testSegments() throws Exception {
assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
assertThat(segments.get(0).isCompound(), equalTo(true));
assertThat(segments.get(0).ramTree, nullValue());
- assertThat(segments.get(0).getAttributes().keySet(),
- Matchers.contains(
- // TODO: Lucene50PostingsFormat#MODE_KEY should be public ?
- Lucene50PostingsFormat.class.getSimpleName() + ".fstMode",
- Lucene50StoredFieldsFormat.MODE_KEY
- )
- );
-
engine.flush();
segments = engine.segments(false);
diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java
index 54d9a015b4e4a..5f730ad138f96 100644
--- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java
+++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java
@@ -207,13 +207,13 @@ public void testExplainWithRewriteValidateQuery() throws Exception {
// common terms queries
assertExplanation(QueryBuilders.commonTermsQuery("field", "huge brown pidgin").cutoffFrequency(1),
- containsString("+field:pidgin (field:huge field:brown)"), true);
+ containsString("+field:pidgin field:huge field:brown"), true);
assertExplanation(QueryBuilders.commonTermsQuery("field", "the brown").analyzer("stop"),
containsString("field:brown"), true);
// match queries with cutoff frequency
assertExplanation(QueryBuilders.matchQuery("field", "huge brown pidgin").cutoffFrequency(1),
- containsString("+field:pidgin (field:huge field:brown)"), true);
+ containsString("+field:pidgin field:huge field:brown"), true);
assertExplanation(QueryBuilders.matchQuery("field", "the brown").analyzer("stop"),
containsString("field:brown"), true);
diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1
deleted file mode 100644
index fea3658cf61bd..0000000000000
--- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3e85f77d8f8ed1db53dba387fbdec55a9f912639
\ No newline at end of file
diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1
new file mode 100644
index 0000000000000..4a6aa7b098686
--- /dev/null
+++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1
@@ -0,0 +1 @@
+46d614acdeb42f4661e91347100217bc72aae11e
\ No newline at end of file
From 72ab7b6f26b8f76f9534db3ca8ecd6633b271eb9 Mon Sep 17 00:00:00 2001
From: Luca Cavanna
Date: Thu, 23 May 2019 11:52:39 +0200
Subject: [PATCH 059/224] Rename SearchRequest#crossClusterSearch (#42363)
The SearchRequest#crossClusterSearch method is currently used only as
part of cross cluster search request, when minimizing roundtrips.
It will soon be used also when splitting a search into two: one for
throttled and one for non throttled indices. It will probably be used
for other usecases as well in the future, hence it makes sense to generalize its name to subSearchRequest.
---
.../action/search/SearchRequest.java | 15 ++++++++-------
.../action/search/TransportSearchAction.java | 6 +++---
.../action/search/SearchPhaseControllerTests.java | 2 +-
.../action/search/SearchRequestTests.java | 14 +++++++-------
.../TransportSearchActionSingleNodeTests.java | 14 +++++++-------
5 files changed, 26 insertions(+), 25 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java
index 6b641906d2e32..53dafc153fc4b 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java
@@ -134,9 +134,10 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) {
}
/**
- * Creates a new search request by providing the search request to copy all fields from, the indices to search against, the alias of
- * the cluster where it will be executed, as well as the start time in milliseconds from the epoch time and whether the reduction
- * should be final or not. Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request
+ * Creates a new sub-search request starting from the original search request that is provided.
+ * For internal use only, allows to fork a search request into multiple search requests that will be executed independently.
+ * Such requests will not be finally reduced, so that their results can be merged together in one response at completion.
+ * Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request
* performing reduction on each cluster in order to minimize network round-trips between the coordinating node and the remote clusters.
*
* @param originalSearchRequest the original search request
@@ -145,8 +146,8 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) {
* @param absoluteStartMillis the absolute start time to be used on the remote clusters to ensure that the same value is used
* @param finalReduce whether the reduction should be final or not
*/
- static SearchRequest crossClusterSearch(SearchRequest originalSearchRequest, String[] indices,
- String clusterAlias, long absoluteStartMillis, boolean finalReduce) {
+ static SearchRequest subSearchRequest(SearchRequest originalSearchRequest, String[] indices,
+ String clusterAlias, long absoluteStartMillis, boolean finalReduce) {
Objects.requireNonNull(originalSearchRequest, "search request must not be null");
validateIndices(indices);
Objects.requireNonNull(clusterAlias, "cluster alias must not be null");
@@ -284,7 +285,7 @@ boolean isFinalReduce() {
/**
* Returns the current time in milliseconds from the time epoch, to be used for the execution of this search request. Used to
* ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search
- * request. When created through {@link #crossClusterSearch(SearchRequest, String[], String, long, boolean)}, this method returns
+ * request. When created through {@link #subSearchRequest(SearchRequest, String[], String, long, boolean)}, this method returns
* the provided current time, otherwise it will return {@link System#currentTimeMillis()}.
*/
long getOrCreateAbsoluteStartMillis() {
@@ -292,7 +293,7 @@ long getOrCreateAbsoluteStartMillis() {
}
/**
- * Returns the provided absoluteStartMillis
when created through {@link #crossClusterSearch} and
+ * Returns the provided absoluteStartMillis
when created through {@link #subSearchRequest} and
* -1 otherwise.
*/
long getAbsoluteStartMillis() {
diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
index d37e10a71f3b9..a7c0a785c7fce 100644
--- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
@@ -270,7 +270,7 @@ static void ccsRemoteReduce(SearchRequest searchRequest, OriginalIndices localIn
String clusterAlias = entry.getKey();
boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias);
OriginalIndices indices = entry.getValue();
- SearchRequest ccsSearchRequest = SearchRequest.crossClusterSearch(searchRequest, indices.indices(),
+ SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(searchRequest, indices.indices(),
clusterAlias, timeProvider.getAbsoluteStartMillis(), true);
Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias);
remoteClusterClient.search(ccsSearchRequest, new ActionListener() {
@@ -306,7 +306,7 @@ public void onFailure(Exception e) {
String clusterAlias = entry.getKey();
boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias);
OriginalIndices indices = entry.getValue();
- SearchRequest ccsSearchRequest = SearchRequest.crossClusterSearch(searchRequest, indices.indices(),
+ SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(searchRequest, indices.indices(),
clusterAlias, timeProvider.getAbsoluteStartMillis(), false);
ActionListener ccsListener = createCCSListener(clusterAlias, skipUnavailable, countDown,
skippedClusters, exceptions, searchResponseMerger, totalClusters, listener);
@@ -316,7 +316,7 @@ public void onFailure(Exception e) {
if (localIndices != null) {
ActionListener ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY,
false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener);
- SearchRequest ccsLocalSearchRequest = SearchRequest.crossClusterSearch(searchRequest, localIndices.indices(),
+ SearchRequest ccsLocalSearchRequest = SearchRequest.subSearchRequest(searchRequest, localIndices.indices(),
RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis(), false);
localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener);
}
diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java
index 084a45267b5c5..3a1adf9748a06 100644
--- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java
+++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java
@@ -330,7 +330,7 @@ private static AtomicArray generateFetchResults(int nShards,
}
private static SearchRequest randomSearchRequest() {
- return randomBoolean() ? new SearchRequest() : SearchRequest.crossClusterSearch(new SearchRequest(),
+ return randomBoolean() ? new SearchRequest() : SearchRequest.subSearchRequest(new SearchRequest(),
Strings.EMPTY_ARRAY, "remote", 0, randomBoolean());
}
diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java
index 8f1d89a37daaa..06231db26d67e 100644
--- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java
@@ -48,21 +48,21 @@ protected SearchRequest createSearchRequest() throws IOException {
return request;
}
//clusterAlias and absoluteStartMillis do not have public getters/setters hence we randomize them only in this test specifically.
- return SearchRequest.crossClusterSearch(request, request.indices(),
+ return SearchRequest.subSearchRequest(request, request.indices(),
randomAlphaOfLengthBetween(5, 10), randomNonNegativeLong(), randomBoolean());
}
public void testWithLocalReduction() {
- expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(null, Strings.EMPTY_ARRAY, "", 0, randomBoolean()));
+ expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(null, Strings.EMPTY_ARRAY, "", 0, randomBoolean()));
SearchRequest request = new SearchRequest();
- expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(request, null, "", 0, randomBoolean()));
- expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(request,
+ expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(request, null, "", 0, randomBoolean()));
+ expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(request,
new String[]{null}, "", 0, randomBoolean()));
- expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(request,
+ expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(request,
Strings.EMPTY_ARRAY, null, 0, randomBoolean()));
- expectThrows(IllegalArgumentException.class, () -> SearchRequest.crossClusterSearch(request,
+ expectThrows(IllegalArgumentException.class, () -> SearchRequest.subSearchRequest(request,
Strings.EMPTY_ARRAY, "", -1, randomBoolean()));
- SearchRequest searchRequest = SearchRequest.crossClusterSearch(request, Strings.EMPTY_ARRAY, "", 0, randomBoolean());
+ SearchRequest searchRequest = SearchRequest.subSearchRequest(request, Strings.EMPTY_ARRAY, "", 0, randomBoolean());
assertNull(searchRequest.validate());
}
diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java
index 82f7c513bf0ce..10f252c30dc3b 100644
--- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java
+++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java
@@ -46,7 +46,7 @@ public void testLocalClusterAlias() {
assertEquals(RestStatus.CREATED, indexResponse.status());
{
- SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), Strings.EMPTY_ARRAY,
+ SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY,
"local", nowInMillis, randomBoolean());
SearchResponse searchResponse = client().search(searchRequest).actionGet();
assertEquals(1, searchResponse.getHits().getTotalHits().value);
@@ -58,7 +58,7 @@ public void testLocalClusterAlias() {
assertEquals("1", hit.getId());
}
{
- SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), Strings.EMPTY_ARRAY,
+ SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY,
"", nowInMillis, randomBoolean());
SearchResponse searchResponse = client().search(searchRequest).actionGet();
assertEquals(1, searchResponse.getHits().getTotalHits().value);
@@ -100,13 +100,13 @@ public void testAbsoluteStartMillis() {
assertEquals(0, searchResponse.getTotalShards());
}
{
- SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(),
+ SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(),
Strings.EMPTY_ARRAY, "", 0, randomBoolean());
SearchResponse searchResponse = client().search(searchRequest).actionGet();
assertEquals(2, searchResponse.getHits().getTotalHits().value);
}
{
- SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(),
+ SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(),
Strings.EMPTY_ARRAY, "", 0, randomBoolean());
searchRequest.indices("");
SearchResponse searchResponse = client().search(searchRequest).actionGet();
@@ -114,7 +114,7 @@ public void testAbsoluteStartMillis() {
assertEquals("test-1970.01.01", searchResponse.getHits().getHits()[0].getIndex());
}
{
- SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(),
+ SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(),
Strings.EMPTY_ARRAY, "", 0, randomBoolean());
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
RangeQueryBuilder rangeQuery = new RangeQueryBuilder("date");
@@ -156,7 +156,7 @@ public void testFinalReduce() {
source.aggregation(terms);
{
- SearchRequest searchRequest = randomBoolean() ? originalRequest : SearchRequest.crossClusterSearch(originalRequest,
+ SearchRequest searchRequest = randomBoolean() ? originalRequest : SearchRequest.subSearchRequest(originalRequest,
Strings.EMPTY_ARRAY, "remote", nowInMillis, true);
SearchResponse searchResponse = client().search(searchRequest).actionGet();
assertEquals(2, searchResponse.getHits().getTotalHits().value);
@@ -165,7 +165,7 @@ public void testFinalReduce() {
assertEquals(1, longTerms.getBuckets().size());
}
{
- SearchRequest searchRequest = SearchRequest.crossClusterSearch(originalRequest,
+ SearchRequest searchRequest = SearchRequest.subSearchRequest(originalRequest,
Strings.EMPTY_ARRAY, "remote", nowInMillis, false);
SearchResponse searchResponse = client().search(searchRequest).actionGet();
assertEquals(2, searchResponse.getHits().getTotalHits().value);
From 5da6f5dfbfcce23ff5ea0b4131887792e24d37bd Mon Sep 17 00:00:00 2001
From: jimczi
Date: Thu, 23 May 2019 12:18:11 +0200
Subject: [PATCH 060/224] upgrade Lucene Version for ES 7.3.0 after backport of
#42214
---
server/src/main/java/org/elasticsearch/Version.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java
index e3381a3384c0e..7f939ca627a95 100644
--- a/server/src/main/java/org/elasticsearch/Version.java
+++ b/server/src/main/java/org/elasticsearch/Version.java
@@ -93,7 +93,7 @@ public class Version implements Comparable, ToXContentFragment {
public static final int V_7_2_0_ID = 7020099;
public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final int V_7_3_0_ID = 7030099;
- public static final Version V_7_3_0 = new Version(V_7_3_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
+ public static final Version V_7_3_0 = new Version(V_7_3_0_ID, org.apache.lucene.util.Version.LUCENE_8_1_0);
public static final int V_8_0_0_ID = 8000099;
public static final Version V_8_0_0 = new Version(V_8_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_1_0);
public static final Version CURRENT = V_8_0_0;
From cb402220d88127b35152f4567beec41b219b96d3 Mon Sep 17 00:00:00 2001
From: Simon Willnauer
Date: Thu, 23 May 2019 12:29:39 +0200
Subject: [PATCH 061/224] Remove deprecated Repository methods (#42359)
We deprecated `restoreShard` and `snapshotShard` in #42213
This change removes the deprecated methods and their usage and adds
a note in the migration docs.
---
.../migration/migrate_8_0/java.asciidoc | 8 ++++
.../index/shard/StoreRecovery.java | 2 +-
.../repositories/Repository.java | 41 -------------------
.../snapshots/SnapshotShardsService.java | 3 +-
4 files changed, 11 insertions(+), 43 deletions(-)
diff --git a/docs/reference/migration/migrate_8_0/java.asciidoc b/docs/reference/migration/migrate_8_0/java.asciidoc
index 523e5b463d8bc..21d281acff97f 100644
--- a/docs/reference/migration/migrate_8_0/java.asciidoc
+++ b/docs/reference/migration/migrate_8_0/java.asciidoc
@@ -25,3 +25,11 @@ while silently truncating them to one of the three allowed edit distances 0, 1
or 2. This leniency is now removed and the class will throw errors when trying
to construct an instance with another value (e.g. floats like 1.3 used to get
accepted but truncated to 1). You should use one of the allowed values.
+
+
+[float]
+==== Changes to Repository
+
+Repository has no dependency on IndexShard anymore. The contract of restoreShard
+and snapshotShard has been reduced to Store and MappingService in order to improve
+testability.
diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
index aa49f7ecb60ce..fae3703027f9e 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
@@ -469,7 +469,7 @@ private void restore(final IndexShard indexShard, final Repository repository, f
}
final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName);
assert indexShard.getEngineOrNull() == null;
- repository.restoreShard(indexShard, indexShard.store(), restoreSource.snapshot().getSnapshotId(),
+ repository.restoreShard(indexShard.store(), restoreSource.snapshot().getSnapshotId(),
restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState());
final Store store = indexShard.store();
store.bootstrapNewHistory();
diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java
index 3aa19cb130cae..0eca92039fbf8 100644
--- a/server/src/main/java/org/elasticsearch/repositories/Repository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java
@@ -27,7 +27,6 @@
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
@@ -189,27 +188,6 @@ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long
*/
boolean isReadOnly();
- /**
- * Creates a snapshot of the shard based on the index commit point.
- *
- * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireLastIndexCommit} method.
- * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller.
- *
- * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check
- * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted.
- * @param indexShard the shard to be snapshotted
- * @param snapshotId snapshot id
- * @param indexId id for the index being snapshotted
- * @param snapshotIndexCommit commit point
- * @param snapshotStatus snapshot status
- * @deprecated use {@link #snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} instead
- */
- @Deprecated
- default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
- IndexShardSnapshotStatus snapshotStatus) {
- snapshotShard(indexShard.store(), indexShard.mapperService(), snapshotId, indexId, snapshotIndexCommit, snapshotStatus);
- }
-
/**
* Creates a snapshot of the shard based on the index commit point.
*
@@ -228,25 +206,6 @@ default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId
void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
IndexShardSnapshotStatus snapshotStatus);
- /**
- * Restores snapshot of the shard.
- *
- * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied.
- * @param shard the shard to restore the index into
- * @param store the store to restore the index into
- * @param snapshotId snapshot id
- * @param version version of elasticsearch that created this snapshot
- * @param indexId id of the index in the repository from which the restore is occurring
- * @param snapshotShardId shard id (in the snapshot)
- * @param recoveryState recovery state
- * @deprecated use {@link #restoreShard(Store, SnapshotId, Version, IndexId, ShardId, RecoveryState)} instead
- */
- @Deprecated
- default void restoreShard(IndexShard shard, Store store, SnapshotId snapshotId, Version version, IndexId indexId,
- ShardId snapshotShardId, RecoveryState recoveryState) {
- restoreShard(store, snapshotId, version, indexId, snapshotShardId, recoveryState);
- }
-
/**
* Restores snapshot of the shard.
*
diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
index f79b6da6ef626..b21df093fadd2 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
@@ -367,7 +367,8 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina
try {
// we flush first to make sure we get the latest writes snapshotted
try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) {
- repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus);
+ repository.snapshotShard(indexShard.store(), indexShard.mapperService(), snapshot.getSnapshotId(), indexId,
+ snapshotRef.getIndexCommit(), snapshotStatus);
if (logger.isDebugEnabled()) {
final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy();
logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus);
From 2721326d576ea8e2feaf278480580d2d83f29628 Mon Sep 17 00:00:00 2001
From: Jason Tedor
Date: Thu, 23 May 2019 05:15:40 -0700
Subject: [PATCH 062/224] Remove old assertion in resync replication request
(#42390)
This assertion was left behind from a previous cleanup. The assertion
was there to remove some stale logic not needed when master would not
talk to 6.x anymore. When that logic was removed, this assertion was
left behind. This commit removes that stale assertion.
---
.../elasticsearch/action/resync/ResyncReplicationRequest.java | 2 --
1 file changed, 2 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java
index f19bfe3ac6952..78b87435a4f34 100644
--- a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java
@@ -38,8 +38,6 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest
Date: Thu, 23 May 2019 07:54:00 -0500
Subject: [PATCH 063/224] Bulk processor concurrent requests (#41451)
`org.elasticsearch.action.bulk.BulkProcessor` is a threadsafe class that
allows for simple semantics to deal with sending bulk requests. Once a
bulk reaches it's pre-defined size, documents, or flush interval it will
execute sending the bulk. One configurable option is the number of concurrent
outstanding bulk requests. That concurrency is implemented in
`org.elasticsearch.action.bulk.BulkRequestHandler` via a semaphore. However,
the only code that currently calls into this code is blocked by `synchronized`
methods. This results in the in-ability for the BulkProcessor to behave concurrently
despite supporting configurable amounts of concurrent requests.
This change removes the `synchronized` method in favor an explicit
lock around the non-thread safe parts of the method. The call into
`org.elasticsearch.action.bulk.BulkRequestHandler` is no longer blocking, which
allows `org.elasticsearch.action.bulk.BulkRequestHandler` to handle it's own concurrency.
---
.../action/bulk/BulkProcessor.java | 108 +++++---
.../action/bulk/BulkProcessorTests.java | 251 +++++++++++++++++-
2 files changed, 328 insertions(+), 31 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
index b0ad87a8b744a..08c42c5ea40de 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
@@ -26,6 +26,7 @@
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
@@ -39,6 +40,7 @@
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BiConsumer;
import java.util.function.Supplier;
@@ -225,6 +227,7 @@ private static Scheduler buildScheduler(ScheduledThreadPoolExecutor scheduledThr
private final Runnable onClose;
private volatile boolean closed = false;
+ private final ReentrantLock lock = new ReentrantLock();
BulkProcessor(BiConsumer> consumer, BackoffPolicy backoffPolicy, Listener listener,
int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval,
@@ -264,21 +267,26 @@ public void close() {
* completed
* @throws InterruptedException If the current thread is interrupted
*/
- public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
- if (closed) {
- return true;
- }
- closed = true;
+ public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
+ lock.lock();
+ try {
+ if (closed) {
+ return true;
+ }
+ closed = true;
- this.cancellableFlushTask.cancel();
+ this.cancellableFlushTask.cancel();
- if (bulkRequest.numberOfActions() > 0) {
- execute();
- }
- try {
- return this.bulkRequestHandler.awaitClose(timeout, unit);
+ if (bulkRequest.numberOfActions() > 0) {
+ execute();
+ }
+ try {
+ return this.bulkRequestHandler.awaitClose(timeout, unit);
+ } finally {
+ onClose.run();
+ }
} finally {
- onClose.run();
+ lock.unlock();
}
}
@@ -315,10 +323,22 @@ protected void ensureOpen() {
}
}
- private synchronized void internalAdd(DocWriteRequest> request) {
- ensureOpen();
- bulkRequest.add(request);
- executeIfNeeded();
+ private void internalAdd(DocWriteRequest> request) {
+ //bulkRequest and instance swapping is not threadsafe, so execute the mutations under a lock.
+ //once the bulk request is ready to be shipped swap the instance reference unlock and send the local reference to the handler.
+ Tuple bulkRequestToExecute = null;
+ lock.lock();
+ try {
+ ensureOpen();
+ bulkRequest.add(request);
+ bulkRequestToExecute = newBulkRequestIfNeeded();
+ } finally {
+ lock.unlock();
+ }
+ //execute sending the local reference outside the lock to allow handler to control the concurrency via it's configuration.
+ if (bulkRequestToExecute != null) {
+ execute(bulkRequestToExecute.v1(), bulkRequestToExecute.v2());
+ }
}
/**
@@ -332,11 +352,23 @@ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nu
/**
* Adds the data from the bytes to be processed by the bulk processor
*/
- public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType,
+ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType,
@Nullable String defaultPipeline,
XContentType xContentType) throws Exception {
- bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, true, xContentType);
- executeIfNeeded();
+ Tuple bulkRequestToExecute = null;
+ lock.lock();
+ try {
+ ensureOpen();
+ bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline,
+ true, xContentType);
+ bulkRequestToExecute = newBulkRequestIfNeeded();
+ } finally {
+ lock.unlock();
+ }
+
+ if (bulkRequestToExecute != null) {
+ execute(bulkRequestToExecute.v1(), bulkRequestToExecute.v2());
+ }
return this;
}
@@ -358,23 +390,32 @@ public boolean isCancelled() {
return scheduler.scheduleWithFixedDelay(flushRunnable, flushInterval, ThreadPool.Names.GENERIC);
}
- private void executeIfNeeded() {
+ // needs to be executed under a lock
+ private Tuple newBulkRequestIfNeeded(){
ensureOpen();
if (!isOverTheLimit()) {
- return;
+ return null;
}
- execute();
+ final BulkRequest bulkRequest = this.bulkRequest;
+ this.bulkRequest = bulkRequestSupplier.get();
+ return new Tuple<>(bulkRequest,executionIdGen.incrementAndGet()) ;
+ }
+
+ // may be executed without a lock
+ private void execute(BulkRequest bulkRequest, long executionId ){
+ this.bulkRequestHandler.execute(bulkRequest, executionId);
}
- // (currently) needs to be executed under a lock
+ // needs to be executed under a lock
private void execute() {
final BulkRequest bulkRequest = this.bulkRequest;
final long executionId = executionIdGen.incrementAndGet();
this.bulkRequest = bulkRequestSupplier.get();
- this.bulkRequestHandler.execute(bulkRequest, executionId);
+ execute(bulkRequest, executionId);
}
+ // needs to be executed under a lock
private boolean isOverTheLimit() {
if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) {
return true;
@@ -388,18 +429,23 @@ private boolean isOverTheLimit() {
/**
* Flush pending delete or index requests.
*/
- public synchronized void flush() {
- ensureOpen();
- if (bulkRequest.numberOfActions() > 0) {
- execute();
+ public void flush() {
+ lock.lock();
+ try {
+ ensureOpen();
+ if (bulkRequest.numberOfActions() > 0) {
+ execute();
+ }
+ } finally {
+ lock.unlock();
}
}
class Flush implements Runnable {
-
@Override
public void run() {
- synchronized (BulkProcessor.this) {
+ lock.lock();
+ try {
if (closed) {
return;
}
@@ -407,6 +453,8 @@ public void run() {
return;
}
execute();
+ } finally {
+ lock.unlock();
}
}
}
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java
index e2527397a780a..6a58696534ed4 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java
@@ -19,26 +19,43 @@
package org.elasticsearch.action.bulk;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.threadpool.Scheduler;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.junit.After;
import org.junit.Before;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.List;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
public class BulkProcessorTests extends ESTestCase {
private ThreadPool threadPool;
+ private final Logger logger = LogManager.getLogger(BulkProcessorTests.class);
@Before
public void startThreadPool() {
@@ -90,10 +107,216 @@ public void testBulkProcessorFlushPreservesContext() throws InterruptedException
bulkProcessor.close();
}
+ public void testConcurrentExecutions() throws Exception {
+ final AtomicBoolean called = new AtomicBoolean(false);
+ final AtomicReference exceptionRef = new AtomicReference<>();
+ int estimatedTimeForTest = Integer.MAX_VALUE;
+ final int simulateWorkTimeInMillis = 5;
+ int concurrentClients = 0;
+ int concurrentBulkRequests = 0;
+ int expectedExecutions = 0;
+ int maxBatchSize = 0;
+ int maxDocuments = 0;
+ int iterations = 0;
+ boolean runTest = true;
+ //find some randoms that allow this test to take under ~ 10 seconds
+ while (estimatedTimeForTest > 10_000) {
+ if (iterations++ > 1_000) { //extremely unlikely
+ runTest = false;
+ break;
+ }
+ maxBatchSize = randomIntBetween(1, 100);
+ maxDocuments = randomIntBetween(maxBatchSize, 1_000_000);
+ concurrentClients = randomIntBetween(1, 20);
+ concurrentBulkRequests = randomIntBetween(0, 20);
+ expectedExecutions = maxDocuments / maxBatchSize;
+ estimatedTimeForTest = (expectedExecutions * simulateWorkTimeInMillis) /
+ Math.min(concurrentBulkRequests + 1, concurrentClients);
+ }
+ assumeTrue("failed to find random values that allows test to run quickly", runTest);
+ BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[]{ new BulkItemResponse() }, 0);
+ AtomicInteger failureCount = new AtomicInteger(0);
+ AtomicInteger successCount = new AtomicInteger(0);
+ AtomicInteger requestCount = new AtomicInteger(0);
+ AtomicInteger docCount = new AtomicInteger(0);
+ BiConsumer> consumer = (request, listener) ->
+ {
+ try {
+ Thread.sleep(simulateWorkTimeInMillis); //simulate work
+ listener.onResponse(bulkResponse);
+ } catch (InterruptedException e) {
+ //should never happen
+ Thread.currentThread().interrupt();
+ failureCount.getAndIncrement();
+ exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e));
+ }
+ };
+ try (BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(),
+ countingListener(requestCount, successCount, failureCount, docCount, exceptionRef),
+ concurrentBulkRequests, maxBatchSize, new ByteSizeValue(Integer.MAX_VALUE), null,
+ (command, delay, executor) -> null, () -> called.set(true), BulkRequest::new)) {
+
+ ExecutorService executorService = Executors.newFixedThreadPool(concurrentClients);
+ CountDownLatch startGate = new CountDownLatch(1 + concurrentClients);
+
+ IndexRequest indexRequest = new IndexRequest();
+ String bulkRequest = "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n" + "{ \"field1\" : \"value1\" }\n";
+ BytesReference bytesReference =
+ BytesReference.fromByteBuffers(new ByteBuffer[]{ ByteBuffer.wrap(bulkRequest.getBytes(StandardCharsets.UTF_8)) });
+ List futures = new ArrayList<>();
+ for (final AtomicInteger i = new AtomicInteger(0); i.getAndIncrement() < maxDocuments; ) {
+ futures.add(executorService.submit(() -> {
+ try {
+ //don't start any work until all tasks are submitted
+ startGate.countDown();
+ startGate.await();
+ //alternate between ways to add to the bulk processor
+ if (randomBoolean()) {
+ bulkProcessor.add(indexRequest);
+ } else {
+ bulkProcessor.add(bytesReference, null, null, XContentType.JSON);
+ }
+ } catch (Exception e) {
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ }));
+ }
+ startGate.countDown();
+ startGate.await();
+
+ for (Future f : futures) {
+ try {
+ f.get();
+ } catch (Exception e) {
+ failureCount.incrementAndGet();
+ exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e));
+ }
+ }
+ executorService.shutdown();
+ executorService.awaitTermination(10, TimeUnit.SECONDS);
+
+ if (failureCount.get() > 0 || successCount.get() != expectedExecutions || requestCount.get() != successCount.get()) {
+ if (exceptionRef.get() != null) {
+ logger.error("exception(s) caught during test", exceptionRef.get());
+ }
+ fail("\nExpected Bulks: " + expectedExecutions + "\n" +
+ "Requested Bulks: " + requestCount.get() + "\n" +
+ "Successful Bulks: " + successCount.get() + "\n" +
+ "Failed Bulks: " + failureCount.get() + "\n" +
+ "Max Documents: " + maxDocuments + "\n" +
+ "Max Batch Size: " + maxBatchSize + "\n" +
+ "Concurrent Clients: " + concurrentClients + "\n" +
+ "Concurrent Bulk Requests: " + concurrentBulkRequests + "\n"
+ );
+ }
+ }
+ //count total docs after processor is closed since there may have been partial batches that are flushed on close.
+ assertEquals(docCount.get(), maxDocuments);
+ }
+
+ public void testConcurrentExecutionsWithFlush() throws Exception {
+ final AtomicReference exceptionRef = new AtomicReference<>();
+ final int maxDocuments = 100_000;
+ final int concurrentClients = 2;
+ final int maxBatchSize = Integer.MAX_VALUE; //don't flush based on size
+ final int concurrentBulkRequests = randomIntBetween(0, 20);
+ final int simulateWorkTimeInMillis = 5;
+ BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[]{ new BulkItemResponse() }, 0);
+ AtomicInteger failureCount = new AtomicInteger(0);
+ AtomicInteger successCount = new AtomicInteger(0);
+ AtomicInteger requestCount = new AtomicInteger(0);
+ AtomicInteger docCount = new AtomicInteger(0);
+ BiConsumer> consumer = (request, listener) ->
+ {
+ try {
+ Thread.sleep(simulateWorkTimeInMillis); //simulate work
+ listener.onResponse(bulkResponse);
+ } catch (InterruptedException e) {
+ //should never happen
+ Thread.currentThread().interrupt();
+ failureCount.getAndIncrement();
+ exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e));
+ }
+ };
+ ScheduledExecutorService flushExecutor = Executors.newScheduledThreadPool(1);
+ try (BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(),
+ countingListener(requestCount, successCount, failureCount, docCount, exceptionRef),
+ concurrentBulkRequests, maxBatchSize, new ByteSizeValue(Integer.MAX_VALUE),
+ TimeValue.timeValueMillis(simulateWorkTimeInMillis * 2),
+ (command, delay, executor) ->
+ Scheduler.wrapAsScheduledCancellable(flushExecutor.schedule(command, delay.millis(), TimeUnit.MILLISECONDS)),
+ () ->
+ {
+ flushExecutor.shutdown();
+ try {
+ flushExecutor.awaitTermination(10L, TimeUnit.SECONDS);
+ if (flushExecutor.isTerminated() == false) {
+ flushExecutor.shutdownNow();
+ }
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ }
+ },
+ BulkRequest::new)) {
+
+ ExecutorService executorService = Executors.newFixedThreadPool(concurrentClients);
+ IndexRequest indexRequest = new IndexRequest();
+ String bulkRequest = "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n" + "{ \"field1\" : \"value1\" }\n";
+ BytesReference bytesReference =
+ BytesReference.fromByteBuffers(new ByteBuffer[]{ ByteBuffer.wrap(bulkRequest.getBytes(StandardCharsets.UTF_8)) });
+ List futures = new ArrayList<>();
+ CountDownLatch startGate = new CountDownLatch(1 + concurrentClients);
+ for (final AtomicInteger i = new AtomicInteger(0); i.getAndIncrement() < maxDocuments; ) {
+ futures.add(executorService.submit(() -> {
+ try {
+ //don't start any work until all tasks are submitted
+ startGate.countDown();
+ startGate.await();
+ //alternate between ways to add to the bulk processor
+ if (randomBoolean()) {
+ bulkProcessor.add(indexRequest);
+ } else {
+ bulkProcessor.add(bytesReference, null, null, XContentType.JSON);
+ }
+ } catch (Exception e) {
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ }));
+ }
+ startGate.countDown();
+ startGate.await();
+
+ for (Future f : futures) {
+ try {
+ f.get();
+ } catch (Exception e) {
+ failureCount.incrementAndGet();
+ exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e));
+ }
+ }
+ executorService.shutdown();
+ executorService.awaitTermination(10, TimeUnit.SECONDS);
+ }
+
+ if (failureCount.get() > 0 || requestCount.get() != successCount.get() || maxDocuments != docCount.get()) {
+ if (exceptionRef.get() != null) {
+ logger.error("exception(s) caught during test", exceptionRef.get());
+ }
+ fail("\nRequested Bulks: " + requestCount.get() + "\n" +
+ "Successful Bulks: " + successCount.get() + "\n" +
+ "Failed Bulks: " + failureCount.get() + "\n" +
+ "Total Documents: " + docCount.get() + "\n" +
+ "Max Documents: " + maxDocuments + "\n" +
+ "Max Batch Size: " + maxBatchSize + "\n" +
+ "Concurrent Clients: " + concurrentClients + "\n" +
+ "Concurrent Bulk Requests: " + concurrentBulkRequests + "\n"
+ );
+ }
+ }
public void testAwaitOnCloseCallsOnClose() throws Exception {
final AtomicBoolean called = new AtomicBoolean(false);
- BiConsumer> consumer = (request, listener) -> {};
+ BiConsumer> consumer = (request, listener) -> { };
BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), emptyListener(),
0, 10, new ByteSizeValue(1000), null,
(command, delay, executor) -> null, () -> called.set(true), BulkRequest::new);
@@ -118,4 +341,30 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure)
}
};
}
+
+ private BulkProcessor.Listener countingListener(AtomicInteger requestCount, AtomicInteger successCount, AtomicInteger failureCount,
+ AtomicInteger docCount, AtomicReference exceptionRef) {
+
+ return new BulkProcessor.Listener() {
+ @Override
+ public void beforeBulk(long executionId, BulkRequest request) {
+ requestCount.incrementAndGet();
+ }
+
+ @Override
+ public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
+ successCount.incrementAndGet();
+ docCount.addAndGet(request.requests().size());
+ }
+
+ @Override
+ public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
+ if (failure != null) {
+ failureCount.incrementAndGet();
+ exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), failure));
+
+ }
+ }
+ };
+ }
}
From cbb3bbdd78002a827011003c7ed14446eb3f4148 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20B=C3=BCscher?=
Date: Thu, 23 May 2019 09:53:16 -0400
Subject: [PATCH 064/224] Prevent normalizer from not being closed on exception
(#42375)
Currently AnalysisRegistry#processNormalizerFactory creates a normalizer and
only later checks whether it should be added to the normalizer map passed in. In
case we throw an exception it isn't closed. This can be prevented by moving the
check that throws the exception earlier.
---
.../org/elasticsearch/index/analysis/AnalysisRegistry.java | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java
index c4be6edd49069..d9c4b2c510bc9 100644
--- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java
+++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java
@@ -523,14 +523,14 @@ private void processNormalizerFactory(
if (normalizerFactory instanceof CustomNormalizerProvider) {
((CustomNormalizerProvider) normalizerFactory).build(tokenizerName, tokenizerFactory, charFilters, tokenFilters);
}
+ if (normalizers.containsKey(name)) {
+ throw new IllegalStateException("already registered analyzer with name: " + name);
+ }
Analyzer normalizerF = normalizerFactory.get();
if (normalizerF == null) {
throw new IllegalArgumentException("normalizer [" + normalizerFactory.name() + "] created null normalizer");
}
NamedAnalyzer normalizer = new NamedAnalyzer(name, normalizerFactory.scope(), normalizerF);
- if (normalizers.containsKey(name)) {
- throw new IllegalStateException("already registered analyzer with name: " + name);
- }
normalizers.put(name, normalizer);
}
}
From c459ea828f6419fba0469cc1569c5ead741e7dee Mon Sep 17 00:00:00 2001
From: Yannick Welsch
Date: Thu, 23 May 2019 16:02:12 +0200
Subject: [PATCH 065/224] Remove node.max_local_storage_nodes (#42428)
This setting, which prior to Elasticsearch 5 was enabled by default and caused all kinds of
confusion, has since been disabled by default and is not recommended for production use. The
preferred way going forward is for users to explicitly specify separate data folders for each started
node to ensure that each node is consistently assigned to the same data path.
Relates to #42426
---
docs/reference/commands/node-tool.asciidoc | 6 +-
docs/reference/migration/migrate_8_0.asciidoc | 2 +
.../migration/migrate_8_0/node.asciidoc | 16 +++
docs/reference/modules/node.asciidoc | 15 ---
.../env/NodeEnvironmentEvilTests.java | 4 +-
.../ElasticsearchNodeCommand.java | 14 +--
.../common/settings/ClusterSettings.java | 1 -
.../elasticsearch/env/NodeEnvironment.java | 97 +++++--------------
.../RemoveCorruptedShardDataCommand.java | 86 +++++++---------
.../elasticsearch/index/shard/ShardPath.java | 7 +-
.../UnsafeBootstrapAndDetachCommandIT.java | 12 +--
.../env/NodeEnvironmentTests.java | 37 +------
.../RemoveCorruptedShardDataCommandTests.java | 2 +-
13 files changed, 99 insertions(+), 200 deletions(-)
create mode 100644 docs/reference/migration/migrate_8_0/node.asciidoc
diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc
index ed810a4dac014..4dd2b0dfe0b6a 100644
--- a/docs/reference/commands/node-tool.asciidoc
+++ b/docs/reference/commands/node-tool.asciidoc
@@ -13,7 +13,7 @@ with the data on disk.
[source,shell]
--------------------------------------------------
bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster|override-version
- [--ordinal ] [-E ]
+ [-E ]
[-h, --help] ([-s, --silent] | [-v, --verbose])
--------------------------------------------------
@@ -290,10 +290,6 @@ it can join a different cluster.
`override-version`:: Overwrites the version number stored in the data path so
that a node can start despite being incompatible with the on-disk data.
-`--ordinal `:: If there is <> then this specifies which node to target. Defaults
-to `0`, meaning to use the first node in the data path.
-
`-E `:: Configures a setting.
`-h, --help`:: Returns all of the command parameters.
diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc
index ed40dddaae28e..84672da61635c 100644
--- a/docs/reference/migration/migrate_8_0.asciidoc
+++ b/docs/reference/migration/migrate_8_0.asciidoc
@@ -20,6 +20,7 @@ coming[8.0.0]
* <>
* <>
* <>
+* <>
* <>
* <>
* <>
@@ -54,6 +55,7 @@ include::migrate_8_0/security.asciidoc[]
include::migrate_8_0/ilm.asciidoc[]
include::migrate_8_0/java.asciidoc[]
include::migrate_8_0/network.asciidoc[]
+include::migrate_8_0/node.asciidoc[]
include::migrate_8_0/transport.asciidoc[]
include::migrate_8_0/http.asciidoc[]
include::migrate_8_0/reindex.asciidoc[]
diff --git a/docs/reference/migration/migrate_8_0/node.asciidoc b/docs/reference/migration/migrate_8_0/node.asciidoc
new file mode 100644
index 0000000000000..a1dcd654807e1
--- /dev/null
+++ b/docs/reference/migration/migrate_8_0/node.asciidoc
@@ -0,0 +1,16 @@
+[float]
+[[breaking_80_node_changes]]
+=== Node changes
+
+//NOTE: The notable-breaking-changes tagged regions are re-used in the
+//Installation and Upgrade Guide
+//tag::notable-breaking-changes[]
+
+// end::notable-breaking-changes[]
+
+[float]
+==== Removal of `node.max_local_storage_nodes` setting
+
+The `node.max_local_storage_nodes` setting was deprecated in 7.x and
+has been removed in 8.0. Nodes should be run on separate data paths
+to ensure that each node is consistently assigned to the same data path.
diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc
index f988e97ef553c..031138dada3f1 100644
--- a/docs/reference/modules/node.asciidoc
+++ b/docs/reference/modules/node.asciidoc
@@ -277,21 +277,6 @@ home directory, so that the home directory can be deleted without deleting
your data! The RPM and Debian distributions do this for you already.
-[float]
-[[max-local-storage-nodes]]
-=== `node.max_local_storage_nodes`
-
-The <> can be shared by multiple nodes, even by nodes from different
-clusters. This is very useful for testing failover and different configurations on your development
-machine. In production, however, it is recommended to run only one node of Elasticsearch per server.
-
-By default, Elasticsearch is configured to prevent more than one node from sharing the same data
-path. To allow for more than one node (e.g., on your development machine), use the setting
-`node.max_local_storage_nodes` and set this to a positive integer larger than one.
-
-WARNING: Never run different node types (i.e. master, data) from the same data directory. This can
-lead to unexpected data loss.
-
[float]
== Other node settings
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java
index 57d4a363cc8c7..44d3c2a88a55b 100644
--- a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java
@@ -51,10 +51,10 @@ public void testMissingWritePermission() throws IOException {
Settings build = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
- IOException ioException = expectThrows(IOException.class, () -> {
+ IOException exception = expectThrows(IOException.class, () -> {
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
});
- assertTrue(ioException.getMessage(), ioException.getMessage().startsWith(path.toString()));
+ assertTrue(exception.getMessage(), exception.getMessage().startsWith(path.toString()));
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java
index ec664c97067d1..a65934c767769 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java
@@ -20,7 +20,6 @@
import joptsimple.OptionParser;
import joptsimple.OptionSet;
-import joptsimple.OptionSpec;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.store.LockObtainFailedException;
@@ -59,22 +58,15 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?";
static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk";
protected static final String ABORTED_BY_USER_MSG = "aborted by user";
- final OptionSpec nodeOrdinalOption;
public ElasticsearchNodeCommand(String description) {
super(description);
- nodeOrdinalOption = parser.accepts("ordinal", "Optional node ordinal, 0 if not specified")
- .withRequiredArg().ofType(Integer.class);
namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables());
}
- protected void processNodePathsWithLock(Terminal terminal, OptionSet options, Environment env) throws IOException {
+ protected void processNodePaths(Terminal terminal, OptionSet options, Environment env) throws IOException {
terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node");
- Integer nodeOrdinal = nodeOrdinalOption.value(options);
- if (nodeOrdinal == null) {
- nodeOrdinal = 0;
- }
- try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(nodeOrdinal, logger, env, Files::exists)) {
+ try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(logger, env, Files::exists)) {
final Path[] dataPaths =
Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
if (dataPaths.length == 0) {
@@ -118,7 +110,7 @@ protected void confirm(Terminal terminal, String msg) {
protected final void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
terminal.println(STOP_WARNING_MSG);
if (validateBeforeLock(terminal, env)) {
- processNodePathsWithLock(terminal, options, env);
+ processNodePaths(terminal, options, env);
}
}
diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
index 6b50c0f1c112c..e29ceb7372bcf 100644
--- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
+++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
@@ -390,7 +390,6 @@ public void apply(Settings value, Settings current, Settings previous) {
ThreadContext.DEFAULT_HEADERS_SETTING,
Loggers.LOG_DEFAULT_LEVEL_SETTING,
Loggers.LOG_LEVEL_SETTING,
- NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
OsService.REFRESH_INTERVAL_SETTING,
ProcessService.REFRESH_INTERVAL_SETTING,
diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java
index 4cfd22ecb1a65..497c6a9e06459 100644
--- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java
+++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java
@@ -81,7 +81,6 @@
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -91,9 +90,9 @@
*/
public final class NodeEnvironment implements Closeable {
public static class NodePath {
- /* ${data.paths}/nodes/{node.id} */
+ /* ${data.paths}/nodes/0 */
public final Path path;
- /* ${data.paths}/nodes/{node.id}/indices */
+ /* ${data.paths}/nodes/0/indices */
public final Path indicesPath;
/** Cached FileStore from path */
public final FileStore fileStore;
@@ -152,18 +151,11 @@ public String toString() {
private final Path sharedDataPath;
private final Lock[] locks;
- private final int nodeLockId;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final Map shardLocks = new HashMap<>();
private final NodeMetaData nodeMetaData;
- /**
- * Maximum number of data nodes that should run in an environment.
- */
- public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 1, 1,
- Property.NodeScope);
-
/**
* Seed for determining a persisted unique uuid of this node. If the node has already a persisted uuid on disk,
* this seed will be ignored and the uuid from disk will be reused.
@@ -184,7 +176,6 @@ public String toString() {
public static class NodeLock implements Releasable {
- private final int nodeId;
private final Lock[] locks;
private final NodePath[] nodePaths;
@@ -192,17 +183,16 @@ public static class NodeLock implements Releasable {
* Tries to acquire a node lock for a node id, throws {@code IOException} if it is unable to acquire it
* @param pathFunction function to check node path before attempt of acquiring a node lock
*/
- public NodeLock(final int nodeId, final Logger logger,
+ public NodeLock(final Logger logger,
final Environment environment,
final CheckedFunction pathFunction) throws IOException {
- this.nodeId = nodeId;
nodePaths = new NodePath[environment.dataFiles().length];
locks = new Lock[nodePaths.length];
try {
final Path[] dataPaths = environment.dataFiles();
for (int dirIndex = 0; dirIndex < dataPaths.length; dirIndex++) {
Path dataDir = dataPaths[dirIndex];
- Path dir = resolveNodePath(dataDir, nodeId);
+ Path dir = resolveNodePath(dataDir);
if (pathFunction.apply(dir) == false) {
continue;
}
@@ -248,61 +238,35 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce
nodePaths = null;
sharedDataPath = null;
locks = null;
- nodeLockId = -1;
nodeMetaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT);
return;
}
boolean success = false;
- NodeLock nodeLock = null;
try {
sharedDataPath = environment.sharedDataFile();
- IOException lastException = null;
- int maxLocalStorageNodes = MAX_LOCAL_STORAGE_NODES_SETTING.get(settings);
- final AtomicReference onCreateDirectoriesException = new AtomicReference<>();
- for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
- try {
- nodeLock = new NodeLock(possibleLockId, logger, environment,
- dir -> {
- try {
- Files.createDirectories(dir);
- } catch (IOException e) {
- onCreateDirectoriesException.set(e);
- throw e;
- }
- return true;
- });
- break;
- } catch (LockObtainFailedException e) {
- // ignore any LockObtainFailedException
- } catch (IOException e) {
- if (onCreateDirectoriesException.get() != null) {
- throw onCreateDirectoriesException.get();
- }
- lastException = e;
- }
+ for (Path path : environment.dataFiles()) {
+ Files.createDirectories(resolveNodePath(path));
}
- if (nodeLock == null) {
+ final NodeLock nodeLock;
+ try {
+ nodeLock = new NodeLock(logger, environment, dir -> true);
+ } catch (IOException e) {
final String message = String.format(
Locale.ROOT,
- "failed to obtain node locks, tried [%s] with lock id%s;" +
- " maybe these locations are not writable or multiple nodes were started without increasing [%s] (was [%d])?",
- Arrays.toString(environment.dataFiles()),
- maxLocalStorageNodes == 1 ? " [0]" : "s [0--" + (maxLocalStorageNodes - 1) + "]",
- MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
- maxLocalStorageNodes);
- throw new IllegalStateException(message, lastException);
+ "failed to obtain node locks, tried %s;" +
+ " maybe these locations are not writable or multiple nodes were started on the same data path?",
+ Arrays.toString(environment.dataFiles()));
+ throw new IllegalStateException(message, e);
}
+
this.locks = nodeLock.locks;
this.nodePaths = nodeLock.nodePaths;
- this.nodeLockId = nodeLock.nodeId;
this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths);
- if (logger.isDebugEnabled()) {
- logger.debug("using node location [{}], local_lock_id [{}]", nodePaths, nodeLockId);
- }
+ logger.debug("using node location {}", Arrays.toString(nodePaths));
maybeLogPathDetails();
maybeLogHeapDetails();
@@ -334,11 +298,10 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce
* Resolve a specific nodes/{node.id} path for the specified path and node lock id.
*
* @param path the path
- * @param nodeLockId the node lock id
* @return the resolved path
*/
- public static Path resolveNodePath(final Path path, final int nodeLockId) {
- return path.resolve(NODES_FOLDER).resolve(Integer.toString(nodeLockId));
+ public static Path resolveNodePath(final Path path) {
+ return path.resolve(NODES_FOLDER).resolve("0");
}
private void maybeLogPathDetails() throws IOException {
@@ -805,14 +768,6 @@ public NodePath[] nodePaths() {
return nodePaths;
}
- public int getNodeLockId() {
- assertEnvIsLocked();
- if (nodePaths == null || locks == null) {
- throw new IllegalStateException("node is not configured to store local location");
- }
- return nodeLockId;
- }
-
/**
* Returns all index paths.
*/
@@ -1137,12 +1092,12 @@ private static boolean isIndexMetaDataPath(Path path) {
*
* @param indexSettings settings for the index
*/
- public static Path resolveBaseCustomLocation(IndexSettings indexSettings, Path sharedDataPath, int nodeLockId) {
+ public static Path resolveBaseCustomLocation(IndexSettings indexSettings, Path sharedDataPath) {
String customDataDir = indexSettings.customDataPath();
if (customDataDir != null) {
// This assert is because this should be caught by MetaDataCreateIndexService
assert sharedDataPath != null;
- return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(nodeLockId));
+ return sharedDataPath.resolve(customDataDir).resolve("0");
} else {
throw new IllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available");
}
@@ -1156,11 +1111,11 @@ public static Path resolveBaseCustomLocation(IndexSettings indexSettings, Path s
* @param indexSettings settings for the index
*/
private Path resolveIndexCustomLocation(IndexSettings indexSettings) {
- return resolveIndexCustomLocation(indexSettings, sharedDataPath, nodeLockId);
+ return resolveIndexCustomLocation(indexSettings, sharedDataPath);
}
- private static Path resolveIndexCustomLocation(IndexSettings indexSettings, Path sharedDataPath, int nodeLockId) {
- return resolveBaseCustomLocation(indexSettings, sharedDataPath, nodeLockId).resolve(indexSettings.getUUID());
+ private static Path resolveIndexCustomLocation(IndexSettings indexSettings, Path sharedDataPath) {
+ return resolveBaseCustomLocation(indexSettings, sharedDataPath).resolve(indexSettings.getUUID());
}
/**
@@ -1172,11 +1127,11 @@ private static Path resolveIndexCustomLocation(IndexSettings indexSettings, Path
* @param shardId shard to resolve the path to
*/
public Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId) {
- return resolveCustomLocation(indexSettings, shardId, sharedDataPath, nodeLockId);
+ return resolveCustomLocation(indexSettings, shardId, sharedDataPath);
}
- public static Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId, Path sharedDataPath, int nodeLockId) {
- return resolveIndexCustomLocation(indexSettings, sharedDataPath, nodeLockId).resolve(Integer.toString(shardId.id()));
+ public static Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId, Path sharedDataPath) {
+ return resolveIndexCustomLocation(indexSettings, sharedDataPath).resolve(Integer.toString(shardId.id()));
}
/**
diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java
index 7242198633be2..16db596515b4c 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java
@@ -126,8 +126,6 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen
final String indexName;
final int shardId;
- final int fromNodeId;
- final int toNodeId;
if (options.has(folderOption)) {
final Path path = getPath(folderOption.value(options)).getParent();
@@ -150,8 +148,6 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen
) {
shardId = Integer.parseInt(shardIdFileName);
indexName = indexMetaData.getIndex().getName();
- fromNodeId = Integer.parseInt(nodeIdFileName);
- toNodeId = fromNodeId + 1;
} else {
throw new ElasticsearchException("Unable to resolve shard id. Wrong folder structure at [ " + path.toString()
+ " ], expected .../nodes/[NODE-ID]/indices/[INDEX-UUID]/[SHARD-ID]");
@@ -160,59 +156,49 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen
// otherwise resolve shardPath based on the index name and shard id
indexName = Objects.requireNonNull(indexNameOption.value(options), "Index name is required");
shardId = Objects.requireNonNull(shardIdOption.value(options), "Shard ID is required");
-
- // resolve shard path in case of multi-node layout per environment
- fromNodeId = 0;
- toNodeId = NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.get(settings);
}
- // have to iterate over possibleLockId as NodeEnvironment; on a contrast to it - we have to fail if node is busy
- for (int possibleLockId = fromNodeId; possibleLockId < toNodeId; possibleLockId++) {
- try {
- try (NodeEnvironment.NodeLock nodeLock = new NodeEnvironment.NodeLock(possibleLockId, logger, environment, Files::exists)) {
- final NodeEnvironment.NodePath[] nodePaths = nodeLock.getNodePaths();
- for (NodeEnvironment.NodePath nodePath : nodePaths) {
- if (Files.exists(nodePath.indicesPath)) {
- // have to scan all index uuid folders to resolve from index name
- try (DirectoryStream stream = Files.newDirectoryStream(nodePath.indicesPath)) {
- for (Path file : stream) {
- if (Files.exists(file.resolve(MetaDataStateFormat.STATE_DIR_NAME)) == false) {
- continue;
- }
-
- final IndexMetaData indexMetaData =
- IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, file);
- if (indexMetaData == null) {
- continue;
- }
- final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
- final Index index = indexMetaData.getIndex();
- if (indexName.equals(index.getName()) == false) {
- continue;
- }
- final ShardId shId = new ShardId(index, shardId);
-
- final Path shardPathLocation = nodePath.resolve(shId);
- if (Files.exists(shardPathLocation) == false) {
- continue;
- }
- final ShardPath shardPath = ShardPath.loadShardPath(logger, shId, indexSettings,
- new Path[]{shardPathLocation}, possibleLockId, nodePath.path);
- if (shardPath != null) {
- consumer.accept(shardPath);
- return;
- }
- }
+ try (NodeEnvironment.NodeLock nodeLock = new NodeEnvironment.NodeLock(logger, environment, Files::exists)) {
+ final NodeEnvironment.NodePath[] nodePaths = nodeLock.getNodePaths();
+ for (NodeEnvironment.NodePath nodePath : nodePaths) {
+ if (Files.exists(nodePath.indicesPath)) {
+ // have to scan all index uuid folders to resolve from index name
+ try (DirectoryStream