diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 9469ac8b50983..01bd5e8a81888 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -366,7 +366,6 @@ public void apply(Settings value, Settings current, Settings previous) { Node.NODE_INGEST_SETTING, Node.NODE_ATTRIBUTES, URLRepository.ALLOWED_URLS_SETTING, - URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING, URLRepository.REPOSITORIES_URL_SETTING, URLRepository.SUPPORTED_PROTOCOLS_SETTING, TransportMasterNodeReadAction.FORCE_LOCAL_SETTING, diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c1efcb7801cf4..52372706d34cb 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -24,6 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Numbers; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; @@ -34,7 +35,6 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.compress.NotXContentException; import org.elasticsearch.common.io.Streams; @@ -83,7 +83,8 @@ *
* {@code
* STORE_ROOT
- * |- index - list of all snapshot name as JSON array
+ * |- index-N - list of all snapshot name as JSON array, N is the generation of the file
+ * |- index-latest - contains the numeric value of the latest generation of the index file (i.e. N from above)
* |- snapshot-20131010 - JSON serialized Snapshot for snapshot "20131010"
* |- meta-20131010.dat - JSON serialized MetaData for snapshot "20131010" (includes only global metadata)
* |- snapshot-20131011 - JSON serialized Snapshot for snapshot "20131011"
@@ -127,12 +128,14 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snapshotIds = snapshots().stream().filter(id -> snapshotId.equals(id) == false).collect(Collectors.toList());
- writeSnapshotList(snapshotIds);
+ writeSnapshotsToIndexGen(snapshotIds);
+
// Now delete all indices
for (String index : indices) {
BlobPath indexPath = basePath().add("indices").add(index);
@@ -386,8 +389,8 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId,
snapshotIds = new ArrayList<>(snapshotIds);
snapshotIds.add(snapshotId);
snapshotIds = Collections.unmodifiableList(snapshotIds);
+ writeSnapshotsToIndexGen(snapshotIds);
}
- writeSnapshotList(snapshotIds);
return blobStoreSnapshot;
} catch (IOException ex) {
throw new RepositoryException(this.repositoryName, "failed to update snapshot in repository", ex);
@@ -400,40 +403,12 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId,
@Override
public List snapshots() {
try {
- List snapshots = new ArrayList<>();
- Map blobs;
- try {
- blobs = snapshotsBlobContainer.listBlobsByPrefix(COMMON_SNAPSHOT_PREFIX);
- } catch (UnsupportedOperationException ex) {
- // Fall back in case listBlobsByPrefix isn't supported by the blob store
- return readSnapshotList();
- }
- int prefixLength = SNAPSHOT_PREFIX.length();
- int suffixLength = SNAPSHOT_SUFFIX.length();
- int legacyPrefixLength = LEGACY_SNAPSHOT_PREFIX.length();
- for (BlobMetaData md : blobs.values()) {
- String blobName = md.name();
- final String name;
- String uuid;
- if (blobName.startsWith(SNAPSHOT_PREFIX) && blobName.length() > legacyPrefixLength) {
- final String str = blobName.substring(prefixLength, blobName.length() - suffixLength);
- // TODO: this will go away once we make the snapshot file writes atomic and
- // use it as the source of truth for the snapshots list instead of listing blobs
- Tuple pair = parseNameUUIDFromBlobName(str);
- name = pair.v1();
- uuid = pair.v2();
- } else if (blobName.startsWith(LEGACY_SNAPSHOT_PREFIX) && blobName.length() > suffixLength + prefixLength) {
- name = blobName.substring(legacyPrefixLength);
- uuid = SnapshotId.UNASSIGNED_UUID;
- } else {
- // not sure what it was - ignore
- continue;
- }
- snapshots.add(new SnapshotId(name, uuid));
- }
- return Collections.unmodifiableList(snapshots);
- } catch (IOException ex) {
- throw new RepositoryException(repositoryName, "failed to list snapshots in repository", ex);
+ return Collections.unmodifiableList(readSnapshotsFromIndex());
+ } catch (NoSuchFileException | FileNotFoundException e) {
+ // its a fresh repository, no index file exists, so return an empty list
+ return Collections.emptyList();
+ } catch (IOException ioe) {
+ throw new RepositoryException(repositoryName, "failed to list snapshots in repository", ioe);
}
}
@@ -564,89 +539,6 @@ private BlobStoreFormat indexMetaDataFormat(Version version) {
private static final String NAME = "name";
private static final String UUID = "uuid";
- /**
- * Writes snapshot index file
- *
- * This file can be used by read-only repositories that are unable to list files in the repository
- *
- * @param snapshots list of snapshot ids
- * @throws IOException I/O errors
- */
- protected void writeSnapshotList(List snapshots) throws IOException {
- final BytesReference bRef;
- try(BytesStreamOutput bStream = new BytesStreamOutput()) {
- try(StreamOutput stream = new OutputStreamStreamOutput(bStream)) {
- XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
- builder.startObject();
- builder.startArray(SNAPSHOTS);
- for (SnapshotId snapshot : snapshots) {
- builder.startObject();
- builder.field(NAME, snapshot.getName());
- builder.field(UUID, snapshot.getUUID());
- builder.endObject();
- }
- builder.endArray();
- builder.endObject();
- builder.close();
- }
- bRef = bStream.bytes();
- }
- if (snapshotsBlobContainer.blobExists(SNAPSHOTS_FILE)) {
- snapshotsBlobContainer.deleteBlob(SNAPSHOTS_FILE);
- }
- snapshotsBlobContainer.writeBlob(SNAPSHOTS_FILE, bRef);
- }
-
- /**
- * Reads snapshot index file
- *
- * This file can be used by read-only repositories that are unable to list files in the repository
- *
- * @return list of snapshots in the repository
- * @throws IOException I/O errors
- */
- protected List readSnapshotList() throws IOException {
- try (InputStream blob = snapshotsBlobContainer.readBlob(SNAPSHOTS_FILE)) {
- BytesStreamOutput out = new BytesStreamOutput();
- Streams.copy(blob, out);
- ArrayList snapshots = new ArrayList<>();
- try (XContentParser parser = XContentHelper.createParser(out.bytes())) {
- if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
- if (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
- String currentFieldName = parser.currentName();
- if (SNAPSHOTS.equals(currentFieldName)) {
- if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
- while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
- // the new format from 5.0 which contains the snapshot name and uuid
- String name = null;
- String uuid = null;
- if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
- while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
- currentFieldName = parser.currentName();
- parser.nextToken();
- if (NAME.equals(currentFieldName)) {
- name = parser.text();
- } else if (UUID.equals(currentFieldName)) {
- uuid = parser.text();
- }
- }
- snapshots.add(new SnapshotId(name, uuid));
- }
- // the old format pre 5.0 that only contains the snapshot name, use the name as the uuid too
- else {
- name = parser.text();
- snapshots.add(new SnapshotId(name, SnapshotId.UNASSIGNED_UUID));
- }
- }
- }
- }
- }
- }
- }
- return Collections.unmodifiableList(snapshots);
- }
- }
-
@Override
public void onRestorePause(long nanos) {
restoreRateLimitingTimeInNanos.inc(nanos);
@@ -714,27 +606,99 @@ BlobContainer blobContainer() {
return snapshotsBlobContainer;
}
- // TODO: this will go away once readSnapshotsList uses the index file instead of listing blobs
- // to know which snapshots are part of a repository. See #18156
- // Package private for testing.
- static Tuple parseNameUUIDFromBlobName(final String str) {
- final String name;
- final String uuid;
- final int sizeOfUUID = 22; // uuid is 22 chars in length
- // unreliable, but highly unlikely to have a snapshot name with a dash followed by 22 characters,
- // and this will go away before a release (see #18156).
- //norelease
- if (str.length() > sizeOfUUID + 1 && str.charAt(str.length() - sizeOfUUID - 1) == '-') {
- // new naming convention, snapshot blob id has name and uuid
- final int idx = str.length() - sizeOfUUID - 1;
- name = str.substring(0, idx);
- uuid = str.substring(idx + 1);
+ protected void writeSnapshotsToIndexGen(final List snapshots) throws IOException {
+ assert readOnly() == false; // can not write to a read only repository
+ final BytesReference snapshotsBytes;
+ try (BytesStreamOutput bStream = new BytesStreamOutput()) {
+ try (StreamOutput stream = new OutputStreamStreamOutput(bStream)) {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
+ builder.startObject();
+ builder.startArray(SNAPSHOTS);
+ for (SnapshotId snapshot : snapshots) {
+ builder.startObject();
+ builder.field(NAME, snapshot.getName());
+ builder.field(UUID, snapshot.getUUID());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ builder.close();
+ }
+ snapshotsBytes = bStream.bytes();
+ }
+ final long gen = latestIndexBlobId() + 1;
+ // write the index file
+ writeAtomic(SNAPSHOTS_FILE_PREFIX + Long.toString(gen), snapshotsBytes);
+ // delete the N-2 index file if it exists, keep the previous one around as a backup
+ if (readOnly() == false && gen - 2 >= 0) {
+ final String oldSnapshotIndexFile = SNAPSHOTS_FILE_PREFIX + Long.toString(gen - 2);
+ if (snapshotsBlobContainer.blobExists(oldSnapshotIndexFile)) {
+ snapshotsBlobContainer.deleteBlob(oldSnapshotIndexFile);
+ }
+ }
+
+ // write the current generation to the index-latest file
+ final BytesReference genBytes;
+ try (BytesStreamOutput bStream = new BytesStreamOutput()) {
+ bStream.writeLong(gen);
+ genBytes = bStream.bytes();
+ }
+ if (snapshotsBlobContainer.blobExists(SNAPSHOTS_INDEX_LATEST_BLOB)) {
+ snapshotsBlobContainer.deleteBlob(SNAPSHOTS_INDEX_LATEST_BLOB);
+ }
+ writeAtomic(SNAPSHOTS_INDEX_LATEST_BLOB, genBytes);
+ }
+
+ protected List readSnapshotsFromIndex() throws IOException {
+ final long indexGen = latestIndexBlobId();
+ final String snapshotsIndexBlobName;
+ if (indexGen == -1) {
+ // index-N file doesn't exist, either its a fresh repository, or its in the
+ // old format, so look for the older index file before returning an empty list
+ snapshotsIndexBlobName = SNAPSHOTS_FILE;
} else {
- // old naming convention, before snapshots had UUIDs
- name = str;
- uuid = SnapshotId.UNASSIGNED_UUID;
+ snapshotsIndexBlobName = SNAPSHOTS_FILE_PREFIX + Long.toString(indexGen);
+ }
+
+ try (InputStream blob = snapshotsBlobContainer.readBlob(snapshotsIndexBlobName)) {
+ BytesStreamOutput out = new BytesStreamOutput();
+ Streams.copy(blob, out);
+ ArrayList snapshots = new ArrayList<>();
+ try (XContentParser parser = XContentHelper.createParser(out.bytes())) {
+ if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
+ if (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ String currentFieldName = parser.currentName();
+ if (SNAPSHOTS.equals(currentFieldName)) {
+ if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ // the new format from 5.0 which contains the snapshot name and uuid
+ String name = null;
+ String uuid = null;
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ currentFieldName = parser.currentName();
+ parser.nextToken();
+ if (NAME.equals(currentFieldName)) {
+ name = parser.text();
+ } else if (UUID.equals(currentFieldName)) {
+ uuid = parser.text();
+ }
+ }
+ snapshots.add(new SnapshotId(name, uuid));
+ }
+ // the old format pre 5.0 that only contains the snapshot name, use the name as the uuid too
+ else {
+ name = parser.text();
+ snapshots.add(new SnapshotId(name, SnapshotId.UNASSIGNED_UUID));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return Collections.unmodifiableList(snapshots);
}
- return Tuple.tuple(name, uuid);
}
// Package private for testing
@@ -746,4 +710,79 @@ static String blobId(final SnapshotId snapshotId) {
}
return snapshotId.getName() + "-" + uuid;
}
+
+ /**
+ * Get the latest snapshot index blob id. Snapshot index blobs are named index-N, where N is
+ * the next version number from when the index blob was written. Each individual index-N blob is
+ * only written once and never overwritten. The highest numbered index-N blob is the latest one
+ * that contains the current snapshots in the repository.
+ *
+ * Package private for testing
+ */
+ long latestIndexBlobId() throws IOException {
+ try {
+ // first, try listing the blobs and determining which index blob is the latest
+ return listBlobsToGetLatestIndexId();
+ } catch (UnsupportedOperationException e) {
+ // could not list the blobs because the repository does not support the operation,
+ // try reading from the index-latest file
+ try {
+ return readSnapshotIndexLatestBlob();
+ } catch (IOException ioe) {
+ // we likely could not find the blob, this can happen in two scenarios:
+ // (1) its an empty repository
+ // (2) when writing the index-latest blob, if the blob already exists,
+ // we first delete it, then atomically write the new blob. there is
+ // a small window in time when the blob is deleted and the new one
+ // written - if the node crashes during that time, we won't have an
+ // index-latest blob
+ // in a read-only repository, we can't know which of the two scenarios it is,
+ // but we will assume (1) because we can't do anything about (2) anyway
+ return -1;
+ }
+ }
+ }
+
+ // package private for testing
+ long readSnapshotIndexLatestBlob() throws IOException {
+ try (InputStream blob = snapshotsBlobContainer.readBlob(SNAPSHOTS_INDEX_LATEST_BLOB)) {
+ BytesStreamOutput out = new BytesStreamOutput();
+ Streams.copy(blob, out);
+ return Numbers.bytesToLong(out.bytes().toBytesRef());
+ }
+ }
+
+ private long listBlobsToGetLatestIndexId() throws IOException {
+ Map blobs = snapshotsBlobContainer.listBlobsByPrefix(SNAPSHOTS_FILE_PREFIX);
+ long latest = -1;
+ if (blobs.isEmpty()) {
+ // no snapshot index blobs have been written yet
+ return latest;
+ }
+ for (final BlobMetaData blobMetaData : blobs.values()) {
+ final String blobName = blobMetaData.name();
+ try {
+ final long curr = Long.parseLong(blobName.substring(SNAPSHOTS_FILE_PREFIX.length()));
+ latest = Math.max(latest, curr);
+ } catch (NumberFormatException nfe) {
+ // the index- blob wasn't of the format index-N where N is a number,
+ // no idea what this blob is but it doesn't belong in the repository!
+ logger.debug("[{}] Unknown blob in the repository: {}", repositoryName, blobName);
+ }
+ }
+ return latest;
+ }
+
+ private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException {
+ final String tempBlobName = "pending-" + blobName;
+ snapshotsBlobContainer.writeBlob(tempBlobName, bytesRef);
+ try {
+ snapshotsBlobContainer.move(tempBlobName, blobName);
+ } catch (IOException ex) {
+ // Move failed - try cleaning up
+ snapshotsBlobContainer.deleteBlob(tempBlobName);
+ throw ex;
+ }
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java
index b805727fd9c9d..0eb3006c5c3c2 100644
--- a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java
+++ b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java
@@ -19,7 +19,6 @@
package org.elasticsearch.repositories.uri;
-import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.url.URLBlobStore;
@@ -42,7 +41,6 @@
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
-import java.util.function.Predicate;
/**
* Read-only URL-based implementation of the BlobStoreRepository
@@ -69,11 +67,6 @@ public class URLRepository extends BlobStoreRepository {
new Setting<>("repositories.url.url", (s) -> s.get("repositories.uri.url", "http:"), URLRepository::parseURL,
Property.NodeScope);
- public static final Setting LIST_DIRECTORIES_SETTING =
- Setting.boolSetting("list_directories", true, Property.NodeScope);
- public static final Setting REPOSITORIES_LIST_DIRECTORIES_SETTING =
- Setting.boolSetting("repositories.uri.list_directories", true, Property.NodeScope);
-
private final List supportedProtocols;
private final URIPattern[] urlWhiteList;
@@ -84,8 +77,6 @@ public class URLRepository extends BlobStoreRepository {
private final BlobPath basePath;
- private boolean listDirectories;
-
/**
* Constructs new read-only URL-based repository
*
@@ -103,7 +94,6 @@ public URLRepository(RepositoryName name, RepositorySettings repositorySettings,
supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(settings);
urlWhiteList = ALLOWED_URLS_SETTING.get(settings).toArray(new URIPattern[]{});
this.environment = environment;
- listDirectories = LIST_DIRECTORIES_SETTING.exists(repositorySettings.settings()) ? LIST_DIRECTORIES_SETTING.get(repositorySettings.settings()) : REPOSITORIES_LIST_DIRECTORIES_SETTING.get(settings);
URL url = URL_SETTING.exists(repositorySettings.settings()) ? URL_SETTING.get(repositorySettings.settings()) : REPOSITORIES_URL_SETTING.get(settings);
URL normalizedURL = checkURL(url);
@@ -124,19 +114,6 @@ protected BlobPath basePath() {
return basePath;
}
- @Override
- public List snapshots() {
- if (listDirectories) {
- return super.snapshots();
- } else {
- try {
- return readSnapshotList();
- } catch (IOException ex) {
- throw new RepositoryException(repositoryName, "failed to get snapshot list in repository", ex);
- }
- }
- }
-
/**
* Makes sure that the url is white listed or if it points to the local file system it matches one on of the root path in path.repo
*/
diff --git a/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java
index 3d46c0bbacf58..17e2481b7d05b 100644
--- a/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java
+++ b/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java
@@ -24,7 +24,6 @@
import org.elasticsearch.client.Client;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -46,7 +45,6 @@
import java.util.stream.Collectors;
import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.blobId;
-import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.parseNameUUIDFromBlobName;
import static org.hamcrest.Matchers.equalTo;
/**
@@ -108,26 +106,13 @@ public void testRetrieveSnapshots() throws Exception {
assertThat(snapshotIds, equalTo(originalSnapshots));
}
- public void testSnapshotIndexFile() throws Exception {
- final Client client = client();
- final Path location = ESIntegTestCase.randomRepoPath(node().settings());
- final String repositoryName = "test-repo";
-
- PutRepositoryResponse putRepositoryResponse =
- client.admin().cluster().preparePutRepository(repositoryName)
- .setType("fs")
- .setSettings(Settings.builder().put(node().settings()).put("location", location))
- .get();
- assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
-
- final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class);
- @SuppressWarnings("unchecked") final BlobStoreRepository repository =
- (BlobStoreRepository) repositoriesService.repository(repositoryName);
+ public void testReadAndWriteSnapshotsThroughIndexFile() throws Exception {
+ final BlobStoreRepository repository = setupRepo();
// write to and read from a snapshot file with no entries
- repository.writeSnapshotList(Collections.emptyList());
- List readSnapshotIds = repository.readSnapshotList();
- assertThat(readSnapshotIds.size(), equalTo(0));
+ assertThat(repository.snapshots().size(), equalTo(0));
+ repository.writeSnapshotsToIndexGen(Collections.emptyList());
+ assertThat(repository.snapshots().size(), equalTo(0));
// write to and read from a snapshot file with a random number of entries
final int numSnapshots = randomIntBetween(1, 1000);
@@ -135,26 +120,43 @@ public void testSnapshotIndexFile() throws Exception {
for (int i = 0; i < numSnapshots; i++) {
snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()));
}
- repository.writeSnapshotList(snapshotIds);
- readSnapshotIds = repository.readSnapshotList();
- assertThat(readSnapshotIds, equalTo(snapshotIds));
+ repository.writeSnapshotsToIndexGen(snapshotIds);
+ assertThat(repository.snapshots(), equalTo(snapshotIds));
}
- public void testOldIndexFileFormat() throws Exception {
- final Client client = client();
- final Path location = ESIntegTestCase.randomRepoPath(node().settings());
- final String repositoryName = "test-repo";
+ public void testIndexGenerationalFiles() throws Exception {
+ final BlobStoreRepository repository = setupRepo();
- PutRepositoryResponse putRepositoryResponse =
- client.admin().cluster().preparePutRepository(repositoryName)
- .setType("fs")
- .setSettings(Settings.builder().put(node().settings()).put("location", location))
- .get();
- assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+ // write to index generational file
+ final int numSnapshots = randomIntBetween(1, 1000);
+ final List snapshotIds = new ArrayList<>(numSnapshots);
+ for (int i = 0; i < numSnapshots; i++) {
+ snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()));
+ }
+ repository.writeSnapshotsToIndexGen(snapshotIds);
+ assertThat(Sets.newHashSet(repository.readSnapshotsFromIndex()), equalTo(Sets.newHashSet(snapshotIds)));
+ assertThat(repository.latestIndexBlobId(), equalTo(0L));
+ assertThat(repository.readSnapshotIndexLatestBlob(), equalTo(0L));
- final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class);
- @SuppressWarnings("unchecked") final BlobStoreRepository repository =
- (BlobStoreRepository) repositoriesService.repository(repositoryName);
+ // adding more and writing to a new index generational file
+ for (int i = 0; i < 10; i++) {
+ snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()));
+ }
+ repository.writeSnapshotsToIndexGen(snapshotIds);
+ assertThat(Sets.newHashSet(repository.readSnapshotsFromIndex()), equalTo(Sets.newHashSet(snapshotIds)));
+ assertThat(repository.latestIndexBlobId(), equalTo(1L));
+ assertThat(repository.readSnapshotIndexLatestBlob(), equalTo(1L));
+
+ // removing a snapshot adn writing to a new index generational file
+ snapshotIds.remove(0);
+ repository.writeSnapshotsToIndexGen(snapshotIds);
+ assertThat(Sets.newHashSet(repository.readSnapshotsFromIndex()), equalTo(Sets.newHashSet(snapshotIds)));
+ assertThat(repository.latestIndexBlobId(), equalTo(2L));
+ assertThat(repository.readSnapshotIndexLatestBlob(), equalTo(2L));
+ }
+
+ public void testOldIndexFileFormat() throws Exception {
+ final BlobStoreRepository repository = setupRepo();
// write old index file format
final int numOldSnapshots = randomIntBetween(1, 50);
@@ -163,36 +165,15 @@ public void testOldIndexFileFormat() throws Exception {
snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), SnapshotId.UNASSIGNED_UUID));
}
writeOldFormat(repository, snapshotIds.stream().map(SnapshotId::getName).collect(Collectors.toList()));
- List readSnapshotIds = repository.readSnapshotList();
- assertThat(Sets.newHashSet(readSnapshotIds), equalTo(Sets.newHashSet(snapshotIds)));
+ assertThat(Sets.newHashSet(repository.snapshots()), equalTo(Sets.newHashSet(snapshotIds)));
// write to and read from a snapshot file with a random number of new entries added
final int numSnapshots = randomIntBetween(1, 1000);
for (int i = 0; i < numSnapshots; i++) {
snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()));
}
- repository.writeSnapshotList(snapshotIds);
- readSnapshotIds = repository.readSnapshotList();
- assertThat(Sets.newHashSet(readSnapshotIds), equalTo(Sets.newHashSet(snapshotIds)));
- }
-
- public void testParseUUIDFromBlobName() {
- String blobStr = "abc123";
- Tuple pair = parseNameUUIDFromBlobName(blobStr);
- assertThat(pair.v1(), equalTo(blobStr)); // snapshot name
- assertThat(pair.v2(), equalTo(SnapshotId.UNASSIGNED_UUID)); // snapshot uuid
- blobStr = "abcefghijklmnopqrstuvwxyz";
- pair = parseNameUUIDFromBlobName(blobStr);
- assertThat(pair.v1(), equalTo(blobStr));
- assertThat(pair.v2(), equalTo(SnapshotId.UNASSIGNED_UUID));
- blobStr = "abc123-xyz"; // not enough characters after '-' to have a uuid
- pair = parseNameUUIDFromBlobName(blobStr);
- assertThat(pair.v1(), equalTo(blobStr));
- assertThat(pair.v2(), equalTo(SnapshotId.UNASSIGNED_UUID));
- blobStr = "abc123-a1b2c3d4e5f6g7h8i9j0k1";
- pair = parseNameUUIDFromBlobName(blobStr);
- assertThat(pair.v1(), equalTo("abc123"));
- assertThat(pair.v2(), equalTo("a1b2c3d4e5f6g7h8i9j0k1"));
+ repository.writeSnapshotsToIndexGen(snapshotIds);
+ assertThat(Sets.newHashSet(repository.snapshots()), equalTo(Sets.newHashSet(snapshotIds)));
}
public void testBlobId() {
@@ -208,6 +189,24 @@ public void testBlobId() {
assertThat(blobId(snapshotId), equalTo("abc-123-" + uuid)); // snapshot name + '-' + uuid
}
+ private BlobStoreRepository setupRepo() {
+ final Client client = client();
+ final Path location = ESIntegTestCase.randomRepoPath(node().settings());
+ final String repositoryName = "test-repo";
+
+ PutRepositoryResponse putRepositoryResponse =
+ client.admin().cluster().preparePutRepository(repositoryName)
+ .setType("fs")
+ .setSettings(Settings.builder().put(node().settings()).put("location", location))
+ .get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class);
+ @SuppressWarnings("unchecked") final BlobStoreRepository repository =
+ (BlobStoreRepository) repositoriesService.repository(repositoryName);
+ return repository;
+ }
+
private void writeOldFormat(final BlobStoreRepository repository, final List snapshotNames) throws Exception {
final BytesReference bRef;
try (BytesStreamOutput bStream = new BytesStreamOutput()) {
diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
index 15623825887f5..a25c8e0593e55 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
@@ -284,8 +284,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
countDownLatch.await();
}
- private static interface ClusterStateUpdater {
- public ClusterState execute(ClusterState currentState) throws Exception;
+ private interface ClusterStateUpdater {
+ ClusterState execute(ClusterState currentState) throws Exception;
}
public void testSnapshotDuringNodeShutdown() throws Exception {
@@ -392,8 +392,11 @@ public void testSnapshotWithStuckNode() throws Exception {
logger.info("--> making sure that snapshot no longer exists");
assertThrows(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute(), SnapshotMissingException.class);
- // Subtract index file from the count
- assertThat("not all files were deleted during snapshot cancellation", numberOfFilesBeforeSnapshot, equalTo(numberOfFiles(repo) - 1));
+ // Subtract three files that will remain in the repository:
+ // (1) index-1
+ // (2) index-0 (because we keep the previous version) and
+ // (3) index-latest
+ assertThat("not all files were deleted during snapshot cancellation", numberOfFilesBeforeSnapshot, equalTo(numberOfFiles(repo) - 3));
logger.info("--> done");
}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
index 19b46710fea0a..e1efbdfaf81ae 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
@@ -818,8 +818,9 @@ public void testDeleteSnapshot() throws Exception {
logger.info("--> delete the last snapshot");
client.admin().cluster().prepareDeleteSnapshot("test-repo", lastSnapshot).get();
- logger.info("--> make sure that number of files is back to what it was when the first snapshot was made");
- assertThat(numberOfFiles(repo), equalTo(numberOfFiles[0]));
+ logger.info("--> make sure that number of files is back to what it was when the first snapshot was made, " +
+ "plus one because one backup index-N file should remain");
+ assertThat(numberOfFiles(repo), equalTo(numberOfFiles[0] + 1));
}
public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exception {
diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java
index 2809b8588f19d..510b6d20f0791 100644
--- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java
+++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java
@@ -34,6 +34,7 @@
import org.elasticsearch.repositories.RepositoryName;
import org.elasticsearch.repositories.RepositorySettings;
+import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URISyntaxException;
@@ -127,7 +128,7 @@ public void deleteBlob(String container, String blob) throws URISyntaxException,
this.client.deleteBlob(this.accountName, this.locMode, container, blob);
}
- public InputStream getInputStream(String container, String blob) throws URISyntaxException, StorageException
+ public InputStream getInputStream(String container, String blob) throws URISyntaxException, StorageException, IOException
{
return this.client.getInputStream(this.accountName, this.locMode, container, blob);
}
diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java
index 13db36aeb5a29..137ff20efbeac 100644
--- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java
+++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java
@@ -29,6 +29,7 @@
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
+import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URISyntaxException;
@@ -75,7 +76,7 @@ final class Storage {
void deleteBlob(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException;
InputStream getInputStream(String account, LocationMode mode, String container, String blob)
- throws URISyntaxException, StorageException;
+ throws URISyntaxException, StorageException, IOException;
OutputStream getOutputStream(String account, LocationMode mode, String container, String blob)
throws URISyntaxException, StorageException;
diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java
index 8160c560325ae..5d4f9c6f69b04 100644
--- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java
+++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java
@@ -31,6 +31,8 @@
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URISyntaxException;
@@ -79,7 +81,10 @@ public void deleteBlob(String account, LocationMode mode, String container, Stri
}
@Override
- public InputStream getInputStream(String account, LocationMode mode, String container, String blob) {
+ public InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws IOException {
+ if (!blobExists(account, mode, container, blob)) {
+ throw new FileNotFoundException("missing blob [" + blob + "]");
+ }
return new ByteArrayInputStream(blobs.get(blob).toByteArray());
}