Skip to content

Commit

Permalink
Don't mark backing indices of overlapping data streams as conflicts. (#…
Browse files Browse the repository at this point in the history
…69668)

Backport #69625) to 7.11 branch.

Today when upgrading from 7.9.x or 7.10.x version to 7.11.x or later and
if two data (or more) data streams exist that have a overlapping prefix and
one data stream name ends with the a date suffix that matches with backing index
date pattern (uuuu.MM.dd) then new upgraded nodes may refuse to join. Essentially
preventing upgrade of the cluster to continue.

In this case the validation logic in `Metadata#validateDataStreams(...)` confuses
backing indices of one data stream as regular indices and thinks these indices
collide with another data stream.

In this validation only incorrectly fails if {data-stream-name} and
{data-steam-name}-{uuuu.MM.dd} name exist and later has been rolled
over more than the former and then upgrade cluster to 7.11+.

A 7.10.2 cluster with:

Data stream 1: logs-foobar
Backing indices: logs-foobar-000001

Data stream 2: logs-foobar-2021.01.13
Backing indices: logs-foobar-2021.01.13-000001, logs-foobar-2021.01.13-000002

When upgrading, then the new node will not join, because it thinks that
'logs-foobar-2021.01.13-000002' index collides with the backing index space
of data stream 'logs-foobar'.

This change tries to address this.
  • Loading branch information
martijnvg committed Mar 1, 2021
1 parent 8851e61 commit 28743ad
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1558,6 +1558,23 @@ static void validateDataStreams(SortedMap<String, IndexAbstraction> indicesLooku
.keySet().stream()
.filter(s -> BACKING_INDEX_SUFFIX.matcher(s.substring(prefix.length())).matches())
.filter(s -> IndexMetadata.parseIndexNameCounter(s) > ds.getGeneration())
.filter(indexName -> {
// Logic to avoid marking backing indices of other data streams as conflict:

// Backing index pattern is either .ds-[ds-name]-[date]-[generation] for 7.11 and up or
// .ds-[ds-name]-[generation] for 7.9 to 7.10.2. So two step process to capture the data stream name:
String dataStreamName =
indexName.substring(DataStream.BACKING_INDEX_PREFIX.length(), indexName.lastIndexOf('-'));
if (dsMetadata.dataStreams().containsKey(dataStreamName)) {
return false;
}
dataStreamName = indexName.substring(0, indexName.lastIndexOf('-'));
if (dsMetadata.dataStreams().containsKey(dataStreamName)) {
return false;
} else {
return true;
}
})
.collect(Collectors.toSet());

if (conflicts.size() > 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
Expand Down Expand Up @@ -1049,6 +1050,38 @@ public void testBuilderForDataStreamWithRandomlyNumberedBackingIndices() {
assertThat(metadata.dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName));
}

public void testOverlappingDataStreamNamesWithBackingIndexDatePattern() {
final String dataStreamName1 = "logs-foobar-2021.01.13";
Metadata.Builder b = Metadata.builder();
IndexMetadata ds1Index1 = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName1, 1, Version.V_7_10_2))
.settings(settings(Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(1)
.build();
b.put(ds1Index1, false);
IndexMetadata ds1Index2 = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName1, 2, Version.V_7_10_2))
.settings(settings(Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(1)
.build();
b.put(ds1Index2, false);
b.put(new DataStream(dataStreamName1, createTimestampField("@timestamp"),
Arrays.asList(ds1Index1.getIndex(), ds1Index2.getIndex()), 2, null));

final String dataStreamName2 = "logs-foobar";
IndexMetadata ds2Index1 = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName2, 1, Version.V_7_10_2))
.settings(settings(Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(1)
.build();
b.put(ds2Index1, false);
b.put(new DataStream(dataStreamName2, createTimestampField("@timestamp"),
Collections.singletonList(ds2Index1.getIndex()), 1, null));

Metadata metadata = b.build();
assertThat(metadata.dataStreams().size(), equalTo(2));
}

public void testBuildIndicesLookupForDataStreams() {
Metadata.Builder b = Metadata.builder();
int numDataStreams = randomIntBetween(2, 8);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,19 @@ public void testDataStreams() throws IOException {
b.append("{\"create\":{\"_index\":\"").append("logs-foobar").append("\"}}\n");
b.append("{\"@timestamp\":\"2020-12-12\",\"test\":\"value").append(i).append("\"}\n");
}

b.append("{\"create\":{\"_index\":\"").append("logs-foobar-2021.01.13").append("\"}}\n");
b.append("{\"@timestamp\":\"2020-12-12\",\"test\":\"value").append(0).append("\"}\n");

Request bulk = new Request("POST", "/_bulk");
bulk.addParameter("refresh", "true");
bulk.addParameter("filter_path", "errors");
bulk.setJsonEntity(b.toString());
Response response = client().performRequest(bulk);
assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8));

Request rolloverRequest = new Request("POST", "/logs-foobar-2021.01.13/_rollover");
client().performRequest(rolloverRequest);
} else if (CLUSTER_TYPE == ClusterType.MIXED) {
long nowMillis = System.currentTimeMillis();
Request rolloverRequest = new Request("POST", "/logs-foobar/_rollover");
Expand Down

0 comments on commit 28743ad

Please sign in to comment.