Skip to content

Commit

Permalink
Allow listing older repositories (#78244)
Browse files Browse the repository at this point in the history
Allows listing repositories with snapshots down to ES 5.0. This does not mean that these snapshots can be
restored, just that you can inspect the metadata of older repositories and list their snapshots.

Note that this already worked (except for /_status), but was untested.
  • Loading branch information
ywelsch committed Oct 26, 2021
1 parent 5ffc4b5 commit 8599e6e
Show file tree
Hide file tree
Showing 6 changed files with 402 additions and 18 deletions.
124 changes: 124 additions & 0 deletions qa/repository-old-versions/build.gradle
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/


import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.Architecture
import org.elasticsearch.gradle.OS
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.internal.info.BuildParams
import org.elasticsearch.gradle.internal.test.AntFixture
import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask

apply plugin: 'elasticsearch.jdk-download'
apply plugin: 'elasticsearch.internal-testclusters'
apply plugin: 'elasticsearch.standalone-rest-test'

configurations {
oldesFixture
}

dependencies {
oldesFixture project(':test:fixtures:old-elasticsearch')
testImplementation project(':client:rest-high-level')
}

jdks {
legacy {
vendor = 'adoptium'
version = '8u302+b08'
platform = OS.current().name().toLowerCase()
architecture = Architecture.current().name().toLowerCase()
}
}

if (Os.isFamily(Os.FAMILY_WINDOWS)) {
logger.warn("Disabling repository-old-versions tests because we can't get the pid file on windows")
} else {
/* Set up tasks to unzip and run the old versions of ES before running the integration tests.
* To avoid testing against too many old versions, always pick first and last version per major
*/
for (String versionString : ['5.0.0', '5.6.16', '6.0.0', '6.8.20']) {
Version version = Version.fromString(versionString)
String packageName = 'org.elasticsearch.distribution.zip'
String artifact = "${packageName}:elasticsearch:${version}@zip"
String versionNoDots = version.toString().replace('.', '_')
String configName = "es${versionNoDots}"

configurations.create(configName)

dependencies.add(configName, artifact)

// TODO Rene: we should be able to replace these unzip tasks with gradle artifact transforms
TaskProvider<Sync> unzip = tasks.register("unzipEs${versionNoDots}", Sync) {
Configuration oldEsDependency = configurations[configName]
dependsOn oldEsDependency
/* Use a closure here to delay resolution of the dependency until we need
* it */
from {
oldEsDependency.collect { zipTree(it) }
}
into temporaryDir
}

String repoLocation = "${buildDir}/cluster/shared/repo/${versionNoDots}"

String clusterName = versionNoDots

def testClusterProvider = testClusters.register(clusterName) {
setting 'path.repo', repoLocation
setting 'xpack.security.enabled', 'false'
}

TaskProvider<AntFixture> fixture = tasks.register("oldES${versionNoDots}Fixture", AntFixture) {
dependsOn project.configurations.oldesFixture, jdks.legacy
dependsOn unzip
executable = "${BuildParams.runtimeJavaHome}/bin/java"
env 'CLASSPATH', "${-> project.configurations.oldesFixture.asPath}"
// old versions of Elasticsearch need JAVA_HOME
env 'JAVA_HOME', jdks.legacy.javaHomePath
// If we are running on certain arm systems we need to explicitly set the stack size to overcome JDK page size bug
if (Architecture.current() == Architecture.AARCH64) {
env 'ES_JAVA_OPTS', '-Xss512k'
}
args 'oldes.OldElasticsearch',
baseDir,
unzip.get().temporaryDir,
false,
"path.repo: ${repoLocation}"
maxWaitInSeconds 60
waitCondition = { fixture, ant ->
// the fixture writes the ports file when Elasticsearch's HTTP service
// is ready, so we can just wait for the file to exist
return fixture.portsFile.exists()
}
}

tasks.register("javaRestTest#${versionNoDots}", StandaloneRestIntegTestTask) {
useCluster testClusterProvider
dependsOn fixture
doFirst {
delete(repoLocation)
mkdir(repoLocation)
}
systemProperty "tests.repo.location", repoLocation
systemProperty "tests.es.version", version.toString()
/* Use a closure on the string to delay evaluation until right before we
* run the integration tests so that we can be sure that the file is
* ready. */
nonInputProperties.systemProperty "tests.es.port", "${-> fixture.get().addressAndPort}"
nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusterProvider.get().allHttpSocketURI.join(",")}")
nonInputProperties.systemProperty('tests.clustername', "${-> testClusterProvider.get().getName()}")
}

tasks.named("check").configure {
dependsOn "javaRestTest#${versionNoDots}"
}
}
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/

package org.elasticsearch.oldrepos;

import org.apache.http.HttpHost;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest;
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest;
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotState;
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import org.elasticsearch.test.rest.ESRestTestCase;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;

import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasSize;

public class OldRepositoryAccessIT extends ESRestTestCase {
@Override
protected Map<String, List<Map<?, ?>>> wipeSnapshots() {
return Collections.emptyMap();
}

@SuppressWarnings("removal")
public void testOldRepoAccess() throws IOException {
String repoLocation = System.getProperty("tests.repo.location");
Version oldVersion = Version.fromString(System.getProperty("tests.es.version"));

int oldEsPort = Integer.parseInt(System.getProperty("tests.es.port"));
try (
RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(adminClient().getNodes().toArray(new Node[0])));
RestClient oldEs = RestClient.builder(new HttpHost("127.0.0.1", oldEsPort)).build()
) {
try {
Request createIndex = new Request("PUT", "/test");
int numberOfShards = randomIntBetween(1, 3);
createIndex.setJsonEntity("{\"settings\":{\"number_of_shards\": " + numberOfShards + "}}");
oldEs.performRequest(createIndex);

for (int i = 0; i < 5; i++) {
Request doc = new Request("PUT", "/test/doc/testdoc" + i);
doc.addParameter("refresh", "true");
doc.setJsonEntity("{\"test\":\"test" + i + "\", \"val\":" + i + "}");
oldEs.performRequest(doc);
}

// register repo on old ES and take snapshot
Request createRepoRequest = new Request("PUT", "/_snapshot/testrepo");
createRepoRequest.setJsonEntity("{\"type\":\"fs\",\"settings\":{\"location\":\"" + repoLocation + "\"}}");
oldEs.performRequest(createRepoRequest);

Request createSnapshotRequest = new Request("PUT", "/_snapshot/testrepo/snap1");
createSnapshotRequest.addParameter("wait_for_completion", "true");
createSnapshotRequest.setJsonEntity("{\"indices\":\"test\"}");
oldEs.performRequest(createSnapshotRequest);

// register repo on new ES
ElasticsearchAssertions.assertAcked(
client.snapshot()
.createRepository(
new PutRepositoryRequest("testrepo").type("fs")
.settings(Settings.builder().put("location", repoLocation).build()),
RequestOptions.DEFAULT
)
);

// list snapshots on new ES
List<SnapshotInfo> snapshotInfos = client.snapshot()
.get(new GetSnapshotsRequest("testrepo").snapshots(new String[] { "_all" }), RequestOptions.DEFAULT)
.getSnapshots();
assertThat(snapshotInfos, hasSize(1));
SnapshotInfo snapshotInfo = snapshotInfos.get(0);
assertEquals("snap1", snapshotInfo.snapshotId().getName());
assertEquals("testrepo", snapshotInfo.repository());
assertEquals(Arrays.asList("test"), snapshotInfo.indices());
assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
assertEquals(numberOfShards, snapshotInfo.successfulShards());
assertEquals(numberOfShards, snapshotInfo.totalShards());
assertEquals(0, snapshotInfo.failedShards());
assertEquals(oldVersion, snapshotInfo.version());

// list specific snapshot on new ES
snapshotInfos = client.snapshot()
.get(new GetSnapshotsRequest("testrepo").snapshots(new String[] { "snap1" }), RequestOptions.DEFAULT)
.getSnapshots();
assertThat(snapshotInfos, hasSize(1));
snapshotInfo = snapshotInfos.get(0);
assertEquals("snap1", snapshotInfo.snapshotId().getName());
assertEquals("testrepo", snapshotInfo.repository());
assertEquals(Arrays.asList("test"), snapshotInfo.indices());
assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
assertEquals(numberOfShards, snapshotInfo.successfulShards());
assertEquals(numberOfShards, snapshotInfo.totalShards());
assertEquals(0, snapshotInfo.failedShards());
assertEquals(oldVersion, snapshotInfo.version());

// list advanced snapshot info on new ES
SnapshotsStatusResponse snapshotsStatusResponse = client.snapshot()
.status(new SnapshotsStatusRequest("testrepo").snapshots(new String[] { "snap1" }), RequestOptions.DEFAULT);
assertThat(snapshotsStatusResponse.getSnapshots(), hasSize(1));
SnapshotStatus snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0);
assertEquals("snap1", snapshotStatus.getSnapshot().getSnapshotId().getName());
assertEquals("testrepo", snapshotStatus.getSnapshot().getRepository());
assertEquals(Sets.newHashSet("test"), snapshotStatus.getIndices().keySet());
assertEquals(SnapshotsInProgress.State.SUCCESS, snapshotStatus.getState());
assertEquals(numberOfShards, snapshotStatus.getShardsStats().getDoneShards());
assertEquals(numberOfShards, snapshotStatus.getShardsStats().getTotalShards());
assertEquals(0, snapshotStatus.getShardsStats().getFailedShards());
assertThat(snapshotStatus.getStats().getTotalSize(), greaterThan(0L));
assertThat(snapshotStatus.getStats().getTotalFileCount(), greaterThan(0));
} finally {
oldEs.performRequest(new Request("DELETE", "/test"));
}
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,7 @@
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xcontent.ToXContent;
import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParserUtils;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.gateway.MetadataStateFormat;
Expand All @@ -52,6 +47,11 @@
import org.elasticsearch.index.shard.IndexLongFieldRange;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.xcontent.ToXContent;
import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentFactory;
import org.elasticsearch.xcontent.XContentParser;

import java.io.IOException;
import java.time.Instant;
Expand Down Expand Up @@ -1638,6 +1638,75 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti
}
return builder.build();
}

/**
* Used to load legacy metadata from ES versions that are no longer index-compatible.
* Returns information on best-effort basis.
* Throws an exception if the metadata is index-compatible with the current version (in that case,
* {@link #fromXContent} should be used to load the content.
*/
public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOException {
if (parser.currentToken() == null) { // fresh parser? move to the first token
parser.nextToken();
}
if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token
parser.nextToken();
}
XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser);
Builder builder = new Builder(parser.currentName());

String currentFieldName = null;
XContentParser.Token token = parser.nextToken();
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser);
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("settings".equals(currentFieldName)) {
Settings settings = Settings.fromXContent(parser);
if (SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(Version.CURRENT.minimumIndexCompatibilityVersion())) {
throw new IllegalStateException("this method should only be used to parse older index metadata versions " +
"but got " + SETTING_INDEX_VERSION_CREATED.get(settings));
}
builder.settings(settings);
} else if ("mappings".equals(currentFieldName)) {
// don't try to parse these for now
parser.skipChildren();
} else {
// assume it's custom index metadata
parser.skipChildren();
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("mappings".equals(currentFieldName)) {
// don't try to parse these for now
parser.skipChildren();
} else {
parser.skipChildren();
}
} else if (token.isValue()) {
if ("state".equals(currentFieldName)) {
builder.state(State.fromString(parser.text()));
} else if ("version".equals(currentFieldName)) {
builder.version(parser.longValue());
} else if ("mapping_version".equals(currentFieldName)) {
builder.mappingVersion(parser.longValue());
} else if ("settings_version".equals(currentFieldName)) {
builder.settingsVersion(parser.longValue());
} else if ("routing_num_shards".equals(currentFieldName)) {
builder.setRoutingNumShards(parser.intValue());
} else {
// unknown, ignore
}
} else {
XContentParserUtils.throwUnknownToken(token, parser.getTokenLocation());
}
}
XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser);

IndexMetadata indexMetadata = builder.build();
assert indexMetadata.getCreationVersion().before(Version.CURRENT.minimumIndexCompatibilityVersion());
return indexMetadata;
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
public static final ChecksumBlobStoreFormat<IndexMetadata> INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>(
"index-metadata",
METADATA_NAME_FORMAT,
(repoName, parser) -> IndexMetadata.Builder.legacyFromXContent(parser),
(repoName, parser) -> IndexMetadata.fromXContent(parser)
);

Expand Down

0 comments on commit 8599e6e

Please sign in to comment.