diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index bb5e1e757812f..77ebcfec5328a 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
+import org.elasticsearch.gradle.test.AntFixture
esplugin {
description 'The Azure Repository plugin adds support for Azure storage repositories.'
@@ -42,9 +43,28 @@ thirdPartyAudit.excludes = [
'org.slf4j.LoggerFactory',
]
-integTestCluster {
- keystoreSetting 'azure.client.default.account', 'cloudazureresource'
- keystoreSetting 'azure.client.default.key', 'abcdefgh'
- keystoreSetting 'azure.client.secondary.account', 'cloudazureresource'
- keystoreSetting 'azure.client.secondary.key', 'abcdefgh'
+forbiddenApisTest {
+ // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
+ bundledSignatures -= 'jdk-non-portable'
+ bundledSignatures += 'jdk-internal'
+}
+
+/** A task to start the fixture which emulates an Azure Storage service **/
+task azureStorageFixture(type: AntFixture) {
+ dependsOn compileTestJava
+ env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
+ executable = new File(project.runtimeJavaHome, 'bin/java')
+ args 'org.elasticsearch.repositories.azure.AzureStorageFixture', baseDir, 'container_test'
}
+
+integTestCluster {
+ dependsOn azureStorageFixture
+
+ keystoreSetting 'azure.client.integration_test.account', "azure_integration_test_account"
+ /* The key is "azure_integration_test_key" encoded using base64 */
+ keystoreSetting 'azure.client.integration_test.key', "YXp1cmVfaW50ZWdyYXRpb25fdGVzdF9rZXk="
+ // Use a closure on the string to delay evaluation until tests are executed. The endpoint_suffix is used
+ // in a hacky way to change the protocol and endpoint. We must fix that.
+ setting 'azure.client.integration_test.endpoint_suffix',
+ "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${ -> azureStorageFixture.addressAndPort }"
+}
\ No newline at end of file
diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java
new file mode 100644
index 0000000000000..025ee45b9c3a0
--- /dev/null
+++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.azure;
+
+import com.microsoft.azure.storage.StorageException;
+import org.elasticsearch.cluster.metadata.RepositoryMetaData;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.repositories.ESBlobStoreTestCase;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+public class AzureBlobStoreTests extends ESBlobStoreTestCase {
+
+ @Override
+ protected BlobStore newBlobStore() throws IOException {
+ try {
+ RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY);
+ AzureStorageServiceMock client = new AzureStorageServiceMock();
+ return new AzureBlobStore(repositoryMetaData, Settings.EMPTY, client);
+ } catch (URISyntaxException | StorageException e) {
+ throw new IOException(e);
+ }
+ }
+}
diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java
deleted file mode 100644
index 981e0889e73e5..0000000000000
--- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.repositories.azure;
-
-import org.elasticsearch.core.internal.io.IOUtils;
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.MockNode;
-import org.elasticsearch.node.Node;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.concurrent.CountDownLatch;
-
-/**
- * Azure Repository
- * Main class to easily run Azure from a IDE.
- * It sets all the options to run the Azure plugin and access it from Sense.
- *
- * In order to run this class set configure the following:
- * 1) Set `-Des.path.home=` to a directory containing an ES config directory
- * 2) Set `-Dcloud.azure.storage.my_account.account=account_name`
- * 3) Set `-Dcloud.azure.storage.my_account.key=account_key`
- *
- * Then you can run REST calls like:
- *
- # Clean test env
- curl -XDELETE localhost:9200/foo?pretty
- curl -XDELETE localhost:9200/_snapshot/my_backup1?pretty
- curl -XDELETE localhost:9200/_snapshot/my_backup2?pretty
-
- # Create data
- curl -XPUT localhost:9200/foo/bar/1?pretty -d '{
- "foo": "bar"
- }'
- curl -XPOST localhost:9200/foo/_refresh?pretty
- curl -XGET localhost:9200/foo/_count?pretty
-
- # Create repository using default account
- curl -XPUT localhost:9200/_snapshot/my_backup1?pretty -d '{
- "type": "azure"
- }'
-
- # Backup
- curl -XPOST "localhost:9200/_snapshot/my_backup1/snap1?pretty&wait_for_completion=true"
-
- # Remove data
- curl -XDELETE localhost:9200/foo?pretty
-
- # Restore data
- curl -XPOST "localhost:9200/_snapshot/my_backup1/snap1/_restore?pretty&wait_for_completion=true"
- curl -XGET localhost:9200/foo/_count?pretty
-
- *
- * If you want to define a secondary repository:
- *
- * 4) Set `-Dcloud.azure.storage.my_account.default=true`
- * 5) Set `-Dcloud.azure.storage.my_account2.account=account_name`
- * 6) Set `-Dcloud.azure.storage.my_account2.key=account_key_secondary`
- *
- * Then you can run REST calls like:
- *
- # Remove data
- curl -XDELETE localhost:9200/foo?pretty
-
- # Create repository using account2 (secondary)
- curl -XPUT localhost:9200/_snapshot/my_backup2?pretty -d '{
- "type": "azure",
- "settings": {
- "account" : "my_account2",
- "location_mode": "secondary_only"
- }
- }'
-
- # Restore data from the secondary endpoint
- curl -XPOST "localhost:9200/_snapshot/my_backup2/snap1/_restore?pretty&wait_for_completion=true"
- curl -XGET localhost:9200/foo/_count?pretty
-
- */
-public class AzureRepositoryF {
- public static void main(String[] args) throws Throwable {
- Settings.Builder settings = Settings.builder();
- settings.put("http.cors.enabled", "true");
- settings.put("http.cors.allow-origin", "*");
- settings.put("cluster.name", AzureRepositoryF.class.getSimpleName());
-
- // Example for azure repo settings
- // settings.put("cloud.azure.storage.my_account1.account", "account_name");
- // settings.put("cloud.azure.storage.my_account1.key", "account_key");
- // settings.put("cloud.azure.storage.my_account1.default", true);
- // settings.put("cloud.azure.storage.my_account2.account", "account_name");
- // settings.put("cloud.azure.storage.my_account2.key", "account_key_secondary");
-
- final CountDownLatch latch = new CountDownLatch(1);
- final Node node = new MockNode(settings.build(), Collections.singletonList(AzureRepositoryPlugin.class));
- Runtime.getRuntime().addShutdownHook(new Thread() {
- @Override
- public void run() {
- try {
- IOUtils.close(node);
- } catch (IOException e) {
- throw new ElasticsearchException(e);
- } finally {
- latch.countDown();
- }
- }
- });
- node.start();
- latch.await();
- }
-}
diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java
index 01b26bad343d5..26b02278eddc0 100644
--- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java
+++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java
@@ -47,7 +47,6 @@ private AzureRepository azureRepository(Settings settings) throws StorageExcepti
TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, null);
}
-
public void testReadonlyDefault() throws StorageException, IOException, URISyntaxException {
assertThat(azureRepository(Settings.EMPTY).isReadOnly(), is(false));
}
diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java
new file mode 100644
index 0000000000000..ebd8241e710ea
--- /dev/null
+++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.azure;
+
+import com.sun.net.httpserver.HttpExchange;
+import com.sun.net.httpserver.HttpHandler;
+import com.sun.net.httpserver.HttpServer;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.mocksocket.MockHttpServer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+import java.util.List;
+import java.util.Map;
+
+import static java.util.Collections.singleton;
+import static java.util.Collections.singletonList;
+
+/**
+ * {@link AzureStorageFixture} is a fixture that emulates an Azure Storage service.
+ *
+ * It starts an asynchronous socket server that binds to a random local port. The server parses
+ * HTTP requests and uses a {@link AzureStorageTestServer} to handle them before returning
+ * them to the client as HTTP responses.
+ */
+public class AzureStorageFixture {
+
+ public static void main(String[] args) throws Exception {
+ if (args == null || args.length != 2) {
+ throw new IllegalArgumentException("AzureStorageFixture ");
+ }
+
+ final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0);
+ final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0);
+
+ try {
+ final Path workingDirectory = workingDir(args[0]);
+ /// Writes the PID of the current Java process in a `pid` file located in the working directory
+ writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]);
+
+ final String addressAndPort = addressToString(httpServer.getAddress());
+ // Writes the address and port of the http server in a `ports` file located in the working directory
+ writeFile(workingDirectory, "ports", addressAndPort);
+
+ // Emulates Azure
+ final String storageUrl = "http://" + addressAndPort;
+ final AzureStorageTestServer testServer = new AzureStorageTestServer(storageUrl);
+ testServer.createContainer(args[1]);
+
+ httpServer.createContext("/", new ResponseHandler(testServer));
+ httpServer.start();
+
+ // Wait to be killed
+ Thread.sleep(Long.MAX_VALUE);
+
+ } finally {
+ httpServer.stop(0);
+ }
+ }
+
+ @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here")
+ private static Path workingDir(final String dir) {
+ return Paths.get(dir);
+ }
+
+ private static void writeFile(final Path dir, final String fileName, final String content) throws IOException {
+ final Path tempPidFile = Files.createTempFile(dir, null, null);
+ Files.write(tempPidFile, singleton(content));
+ Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE);
+ }
+
+ private static String addressToString(final SocketAddress address) {
+ final InetSocketAddress inetSocketAddress = (InetSocketAddress) address;
+ if (inetSocketAddress.getAddress() instanceof Inet6Address) {
+ return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort();
+ } else {
+ return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort();
+ }
+ }
+
+ static class ResponseHandler implements HttpHandler {
+
+ private final AzureStorageTestServer server;
+
+ private ResponseHandler(final AzureStorageTestServer server) {
+ this.server = server;
+ }
+
+ @Override
+ public void handle(HttpExchange exchange) throws IOException {
+ String method = exchange.getRequestMethod();
+ String path = server.getEndpoint() + exchange.getRequestURI().getRawPath();
+ String query = exchange.getRequestURI().getRawQuery();
+ Map> headers = exchange.getRequestHeaders();
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ Streams.copy(exchange.getRequestBody(), out);
+
+ final AzureStorageTestServer.Response response = server.handle(method, path, query, headers, out.toByteArray());
+
+ Map> responseHeaders = exchange.getResponseHeaders();
+ responseHeaders.put("Content-Type", singletonList(response.contentType));
+ response.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v)));
+ exchange.sendResponseHeaders(response.status.getStatus(), response.body.length);
+ if (response.body.length > 0) {
+ exchange.getResponseBody().write(response.body);
+ }
+ exchange.close();
+ }
+ }
+}
diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java
index 68b84594d62ca..ce6d51b364339 100644
--- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java
+++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java
@@ -66,6 +66,8 @@ public void createContainer(String account, LocationMode mode, String container)
@Override
public void deleteFiles(String account, LocationMode mode, String container, String path) {
+ final Map blobs = listBlobsByPrefix(account, mode, container, path, null);
+ blobs.keySet().forEach(key -> deleteBlob(account, mode, container, key));
}
@Override
diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java
new file mode 100644
index 0000000000000..584428f9a45b0
--- /dev/null
+++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java
@@ -0,0 +1,425 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.azure;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.path.PathTrie;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.RestUtils;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.singletonMap;
+
+/**
+ * {@link AzureStorageTestServer} emulates an Azure Storage service through a {@link #handle(String, String, String, Map, byte[])}
+ * method that provides appropriate responses for specific requests like the real Azure platform would do.
+ * It is based on official documentation available at https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api.
+ */
+public class AzureStorageTestServer {
+
+ private static byte[] EMPTY_BYTE = new byte[0];
+
+ /** List of the containers stored on this test server **/
+ private final Map containers = ConcurrentCollections.newConcurrentMap();
+
+ /** Request handlers for the requests made by the Azure client **/
+ private final PathTrie handlers;
+
+ /** Server endpoint **/
+ private final String endpoint;
+
+ /** Increments for the requests ids **/
+ private final AtomicLong requests = new AtomicLong(0);
+
+ /**
+ * Creates a {@link AzureStorageTestServer} with a custom endpoint
+ */
+ AzureStorageTestServer(final String endpoint) {
+ this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null");
+ this.handlers = defaultHandlers(endpoint, containers);
+ }
+
+ /** Creates a container in the test server **/
+ void createContainer(final String containerName) {
+ containers.put(containerName, new Container(containerName));
+ }
+
+ public String getEndpoint() {
+ return endpoint;
+ }
+
+ /**
+ * Returns a response for the given request
+ *
+ * @param method the HTTP method of the request
+ * @param path the path of the URL of the request
+ * @param query the queryString of the URL of request
+ * @param headers the HTTP headers of the request
+ * @param body the HTTP request body
+ * @return a {@link Response}
+ * @throws IOException if something goes wrong
+ */
+ public Response handle(final String method,
+ final String path,
+ final String query,
+ final Map> headers,
+ byte[] body) throws IOException {
+
+ final long requestId = requests.incrementAndGet();
+
+ final Map params = new HashMap<>();
+ if (query != null) {
+ RestUtils.decodeQueryString(query, 0, params);
+ }
+
+ final RequestHandler handler = handlers.retrieve(method + " " + path, params);
+ if (handler != null) {
+ return handler.execute(params, headers, body, requestId);
+ } else {
+ return newInternalError(requestId);
+ }
+ }
+
+ @FunctionalInterface
+ interface RequestHandler {
+
+ /**
+ * Simulates the execution of a Azure Storage request and returns a corresponding response.
+ *
+ * @param params the request's query string parameters
+ * @param headers the request's headers
+ * @param body the request body provided as a byte array
+ * @param requestId a unique id for the incoming request
+ * @return the corresponding response
+ *
+ * @throws IOException if something goes wrong
+ */
+ Response execute(Map params, Map> headers, byte[] body, long requestId) throws IOException;
+ }
+
+ /** Builds the default request handlers **/
+ private static PathTrie defaultHandlers(final String endpoint, final Map containers) {
+ final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER);
+
+ // Get Blob Properties
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties
+ objectsPaths("HEAD " + endpoint + "/{container}").forEach(path ->
+ handlers.insert(path, (params, headers, body, requestId) -> {
+ final String containerName = params.get("container");
+
+ final Container container =containers.get(containerName);
+ if (container == null) {
+ return newContainerNotFoundError(requestId);
+ }
+
+ final String blobName = objectName(params);
+ for (Map.Entry object : container.objects.entrySet()) {
+ if (object.getKey().equals(blobName)) {
+ Map responseHeaders = new HashMap<>();
+ responseHeaders.put("x-ms-blob-content-length", String.valueOf(object.getValue().length));
+ responseHeaders.put("x-ms-blob-type", "blockblob");
+ return new Response(RestStatus.OK, responseHeaders, "text/plain", EMPTY_BYTE);
+ }
+ }
+ return newBlobNotFoundError(requestId);
+ })
+ );
+
+ // PUT Blob
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob
+ objectsPaths("PUT " + endpoint + "/{container}").forEach(path ->
+ handlers.insert(path, (params, headers, body, requestId) -> {
+ final String destContainerName = params.get("container");
+
+ final Container destContainer =containers.get(destContainerName);
+ if (destContainer == null) {
+ return newContainerNotFoundError(requestId);
+ }
+
+ final String destBlobName = objectName(params);
+
+ // Request is a copy request
+ List headerCopySource = headers.getOrDefault("x-ms-copy-source", emptyList());
+ if (headerCopySource.isEmpty() == false) {
+ String srcBlobName = headerCopySource.get(0);
+
+ Container srcContainer = null;
+ for (Container container : containers.values()) {
+ String prefix = endpoint + "/" + container.name + "/";
+ if (srcBlobName.startsWith(prefix)) {
+ srcBlobName = srcBlobName.replaceFirst(prefix, "");
+ srcContainer = container;
+ break;
+ }
+ }
+
+ if (srcContainer == null || srcContainer.objects.containsKey(srcBlobName) == false) {
+ return newBlobNotFoundError(requestId);
+ }
+
+ byte[] bytes = srcContainer.objects.get(srcBlobName);
+ if (bytes != null) {
+ destContainer.objects.put(destBlobName, bytes);
+ return new Response(RestStatus.ACCEPTED, singletonMap("x-ms-copy-status", "success"), "text/plain", EMPTY_BYTE);
+ } else {
+ return newBlobNotFoundError(requestId);
+ }
+ } else {
+ destContainer.objects.put(destBlobName, body);
+ }
+
+ return new Response(RestStatus.CREATED, emptyMap(), "text/plain", EMPTY_BYTE);
+ })
+ );
+
+ // GET Object
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
+ objectsPaths("GET " + endpoint + "/{container}").forEach(path ->
+ handlers.insert(path, (params, headers, body, requestId) -> {
+ final String containerName = params.get("container");
+
+ final Container container =containers.get(containerName);
+ if (container == null) {
+ return newContainerNotFoundError(requestId);
+ }
+
+ final String blobName = objectName(params);
+ if (container.objects.containsKey(blobName)) {
+ Map responseHeaders = new HashMap<>();
+ responseHeaders.put("x-ms-copy-status", "success");
+ responseHeaders.put("x-ms-blob-type", "blockblob");
+ return new Response(RestStatus.OK, responseHeaders, "application/octet-stream", container.objects.get(blobName));
+
+ }
+ return newBlobNotFoundError(requestId);
+ })
+ );
+
+ // Delete Blob
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob
+ objectsPaths("DELETE " + endpoint + "/{container}").forEach(path ->
+ handlers.insert(path, (params, headers, body, requestId) -> {
+ final String containerName = params.get("container");
+
+ final Container container =containers.get(containerName);
+ if (container == null) {
+ return newContainerNotFoundError(requestId);
+ }
+
+ final String blobName = objectName(params);
+ if (container.objects.remove(blobName) != null) {
+ return new Response(RestStatus.ACCEPTED, emptyMap(), "text/plain", EMPTY_BYTE);
+ }
+ return newBlobNotFoundError(requestId);
+ })
+ );
+
+ // List Blobs
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs
+ handlers.insert("GET " + endpoint + "/{container}/", (params, headers, body, requestId) -> {
+ final String containerName = params.get("container");
+
+ final Container container =containers.get(containerName);
+ if (container == null) {
+ return newContainerNotFoundError(requestId);
+ }
+
+ final String prefix = params.get("prefix");
+ return newEnumerationResultsResponse(requestId, container, prefix);
+ });
+
+ // Get Container Properties
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
+ handlers.insert("HEAD " + endpoint + "/{container}", (params, headers, body, requestId) -> {
+ String container = params.get("container");
+ if (Strings.hasText(container) && containers.containsKey(container)) {
+ return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE);
+ } else {
+ return newContainerNotFoundError(requestId);
+ }
+ });
+
+ return handlers;
+ }
+
+ /**
+ * Represents a Azure Storage container.
+ */
+ static class Container {
+
+ /** Container name **/
+ final String name;
+
+ /** Blobs contained in the container **/
+ final Map objects;
+
+ Container(final String name) {
+ this.name = Objects.requireNonNull(name);
+ this.objects = ConcurrentCollections.newConcurrentMap();
+ }
+ }
+
+ /**
+ * Represents a HTTP Response.
+ */
+ static class Response {
+
+ final RestStatus status;
+ final Map headers;
+ final String contentType;
+ final byte[] body;
+
+ Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) {
+ this.status = Objects.requireNonNull(status);
+ this.headers = Objects.requireNonNull(headers);
+ this.contentType = Objects.requireNonNull(contentType);
+ this.body = Objects.requireNonNull(body);
+ }
+ }
+
+ /**
+ * Decline a path like "http://host:port/{bucket}" into 10 derived paths like:
+ * - http://host:port/{bucket}/{path0}
+ * - http://host:port/{bucket}/{path0}/{path1}
+ * - http://host:port/{bucket}/{path0}/{path1}/{path2}
+ * - etc
+ */
+ private static List objectsPaths(final String path) {
+ final List paths = new ArrayList<>();
+ String p = path;
+ for (int i = 0; i < 10; i++) {
+ p = p + "/{path" + i + "}";
+ paths.add(p);
+ }
+ return paths;
+ }
+
+ /**
+ * Retrieves the object name from all derived paths named {pathX} where 0 <= X < 10.
+ *
+ * This is the counterpart of {@link #objectsPaths(String)}
+ */
+ private static String objectName(final Map params) {
+ final StringBuilder name = new StringBuilder();
+ for (int i = 0; i < 10; i++) {
+ String value = params.getOrDefault("path" + i, null);
+ if (value != null) {
+ if (name.length() > 0) {
+ name.append('/');
+ }
+ name.append(value);
+ }
+ }
+ return name.toString();
+ }
+
+
+ /**
+ * Azure EnumerationResults Response
+ */
+ private static Response newEnumerationResultsResponse(final long requestId, final Container container, final String prefix) {
+ final String id = Long.toString(requestId);
+ final StringBuilder response = new StringBuilder();
+ response.append("");
+ response.append("");
+ if (prefix != null) {
+ response.append("").append(prefix).append("");
+ } else {
+ response.append("");
+ }
+ response.append("").append(container.objects.size()).append("");
+ response.append("");
+
+ int count = 0;
+ for (Map.Entry object : container.objects.entrySet()) {
+ String objectName = object.getKey();
+ if (prefix == null || objectName.startsWith(prefix)) {
+ response.append("");
+ response.append("").append(objectName).append("");
+ response.append("");
+ response.append("").append(object.getValue().length).append("");
+ response.append("").append(count++).append("");
+ response.append("success");
+ response.append("BlockBlob");
+ response.append("");
+ response.append("");
+ }
+ }
+
+ response.append("");
+ response.append("");
+ response.append("");
+
+ return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8));
+ }
+
+ private static Response newContainerNotFoundError(final long requestId) {
+ return newError(requestId, RestStatus.NOT_FOUND, "ContainerNotFound", "The specified container does not exist");
+ }
+
+ private static Response newBlobNotFoundError(final long requestId) {
+ return newError(requestId, RestStatus.NOT_FOUND, "BlobNotFound", "The specified blob does not exist");
+ }
+
+ private static Response newInternalError(final long requestId) {
+ return newError(requestId, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", "The server encountered an internal error");
+ }
+
+ /**
+ * Azure Error
+ *
+ * https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2
+ */
+ private static Response newError(final long requestId,
+ final RestStatus status,
+ final String code,
+ final String message) {
+
+ final StringBuilder response = new StringBuilder();
+ response.append("");
+ response.append("");
+ response.append("").append(code).append("");
+ response.append("").append(message).append("");
+ response.append("");
+
+ final Map headers = new HashMap<>(2);
+ headers.put("x-ms-request-id", String.valueOf(requestId));
+ headers.put("x-ms-error-code", code);
+
+ return new Response(status, headers, "application/xml", response.toString().getBytes(UTF_8));
+ }
+}
diff --git a/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml b/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml
index fb929f1e822ff..25726fa8f9b96 100644
--- a/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml
+++ b/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml
@@ -1,6 +1,6 @@
-# Integration tests for Azure Repository component
+# Integration tests for repository-azure
#
-"Azure Repository loaded":
+"Plugin repository-azure is loaded":
- do:
cluster.state: {}
@@ -11,3 +11,177 @@
nodes.info: {}
- match: { nodes.$master.plugins.0.name: repository-azure }
+---
+"Snapshot/Restore with repository-azure":
+
+ # Register repository
+ - do:
+ snapshot.create_repository:
+ repository: repository
+ body:
+ type: azure
+ settings:
+ container: "container_test"
+ client: "integration_test"
+
+ - match: { acknowledged: true }
+
+ # Get repository
+ - do:
+ snapshot.get_repository:
+ repository: repository
+
+ - match: {repository.settings.container : "container_test"}
+ - match: {repository.settings.client : "integration_test"}
+
+ # Index documents
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - index:
+ _index: docs
+ _type: doc
+ _id: 1
+ - snapshot: one
+ - index:
+ _index: docs
+ _type: doc
+ _id: 2
+ - snapshot: one
+ - index:
+ _index: docs
+ _type: doc
+ _id: 3
+ - snapshot: one
+
+ - do:
+ count:
+ index: docs
+
+ - match: {count: 3}
+
+ # Create a first snapshot
+ - do:
+ snapshot.create:
+ repository: repository
+ snapshot: snapshot-one
+ wait_for_completion: true
+
+ - match: { snapshot.snapshot: snapshot-one }
+ - match: { snapshot.state : SUCCESS }
+ - match: { snapshot.include_global_state: true }
+ - match: { snapshot.shards.failed : 0 }
+
+ - do:
+ snapshot.status:
+ repository: repository
+ snapshot: snapshot-one
+
+ - is_true: snapshots
+ - match: { snapshots.0.snapshot: snapshot-one }
+ - match: { snapshots.0.state : SUCCESS }
+
+ # Index more documents
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - index:
+ _index: docs
+ _type: doc
+ _id: 4
+ - snapshot: two
+ - index:
+ _index: docs
+ _type: doc
+ _id: 5
+ - snapshot: two
+ - index:
+ _index: docs
+ _type: doc
+ _id: 6
+ - snapshot: two
+ - index:
+ _index: docs
+ _type: doc
+ _id: 7
+ - snapshot: two
+
+ - do:
+ count:
+ index: docs
+
+ - match: {count: 7}
+
+ # Create a second snapshot
+ - do:
+ snapshot.create:
+ repository: repository
+ snapshot: snapshot-two
+ wait_for_completion: true
+
+ - match: { snapshot.snapshot: snapshot-two }
+ - match: { snapshot.state : SUCCESS }
+ - match: { snapshot.shards.failed : 0 }
+
+ - do:
+ snapshot.get:
+ repository: repository
+ snapshot: snapshot-one,snapshot-two
+
+ - is_true: snapshots
+ - match: { snapshots.0.state : SUCCESS }
+ - match: { snapshots.1.state : SUCCESS }
+
+ # Delete the index
+ - do:
+ indices.delete:
+ index: docs
+
+ # Restore the second snapshot
+ - do:
+ snapshot.restore:
+ repository: repository
+ snapshot: snapshot-two
+ wait_for_completion: true
+
+ - do:
+ count:
+ index: docs
+
+ - match: {count: 7}
+
+ # Delete the index again
+ - do:
+ indices.delete:
+ index: docs
+
+ # Restore the first snapshot
+ - do:
+ snapshot.restore:
+ repository: repository
+ snapshot: snapshot-one
+ wait_for_completion: true
+
+ - do:
+ count:
+ index: docs
+
+ - match: {count: 3}
+
+ # Remove the snapshots
+ - do:
+ snapshot.delete:
+ repository: repository
+ snapshot: snapshot-two
+
+ - do:
+ snapshot.delete:
+ repository: repository
+ snapshot: snapshot-one
+
+ # Remove our repository
+ - do:
+ snapshot.delete_repository:
+ repository: repository