diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 67b5a7bd604ba..6bc1ce2882c92 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -145,14 +145,22 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate void addIndices(final RecoveryState.Index indexRecoveryStats, final Directory target, final Sort indexSort, final Directory[] sources, final long maxSeqNo, final long maxUnsafeAutoIdTimestamp, IndexMetaData indexMetaData, int shardId, boolean split, boolean hasNested) throws IOException { + + // clean target directory (if previous recovery attempt failed) and create a fresh segment file with the proper lucene version + Lucene.cleanLuceneIndex(target); + assert sources.length > 0; + final int luceneIndexCreatedVersionMajor = Lucene.readSegmentInfos(sources[0]).getIndexCreatedVersionMajor(); + new SegmentInfos(luceneIndexCreatedVersionMajor).commit(target); + final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); + IndexWriterConfig iwc = new IndexWriterConfig(null) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here // we also don't specify a codec here and merges should use the engines for this index .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(IndexWriterConfig.OpenMode.CREATE); + .setOpenMode(IndexWriterConfig.OpenMode.APPEND); if (indexSort != null) { iwc.setIndexSort(indexSort); } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 5c61cacd4e909..ec755cda6b8d1 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -423,6 +423,73 @@ public void testShrink() throws IOException { assertEquals(numDocs, totalHits); } + public void testShrinkAfterUpgrade() throws IOException { + String shrunkenIndex = index + "_shrunk"; + int numDocs; + if (runningAgainstOldCluster) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("mappings"); + mappingsAndSettings.startObject("doc"); + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("field"); + mappingsAndSettings.field("type", "text"); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + client().performRequest("PUT", "/" + index, Collections.emptyMap(), + new StringEntity(mappingsAndSettings.string(), ContentType.APPLICATION_JSON)); + + numDocs = randomIntBetween(512, 1024); + indexRandomDocuments(numDocs, true, true, i -> { + return JsonXContent.contentBuilder().startObject() + .field("field", "value") + .endObject(); + }); + } else { + String updateSettingsRequestBody = "{\"settings\": {\"index.blocks.write\": true}}"; + Response rsp = client().performRequest("PUT", "/" + index + "/_settings", Collections.emptyMap(), + new StringEntity(updateSettingsRequestBody, ContentType.APPLICATION_JSON)); + assertEquals(200, rsp.getStatusLine().getStatusCode()); + + String shrinkIndexRequestBody = "{\"settings\": {\"index.number_of_shards\": 1}}"; + rsp = client().performRequest("PUT", "/" + index + "/_shrink/" + shrunkenIndex, Collections.emptyMap(), + new StringEntity(shrinkIndexRequestBody, ContentType.APPLICATION_JSON)); + assertEquals(200, rsp.getStatusLine().getStatusCode()); + + numDocs = countOfIndexedRandomDocuments(); + } + + Response rsp = client().performRequest("POST", "/_refresh"); + assertEquals(200, rsp.getStatusLine().getStatusCode()); + + Map response = toMap(client().performRequest("GET", "/" + index + "/_search")); + assertNoFailures(response); + int totalShards = (int) XContentMapValues.extractValue("_shards.total", response); + assertThat(totalShards, greaterThan(1)); + int successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); + assertEquals(totalShards, successfulShards); + int totalHits = (int) XContentMapValues.extractValue("hits.total", response); + assertEquals(numDocs, totalHits); + + if (runningAgainstOldCluster == false) { + response = toMap(client().performRequest("GET", "/" + shrunkenIndex + "/_search")); + assertNoFailures(response); + totalShards = (int) XContentMapValues.extractValue("_shards.total", response); + assertEquals(1, totalShards); + successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); + assertEquals(1, successfulShards); + totalHits = (int) XContentMapValues.extractValue("hits.total", response); + assertEquals(numDocs, totalHits); + } + } + void assertBasicSearchWorks(int count) throws IOException { logger.info("--> testing basic search"); Map response = toMap(client().performRequest("GET", "/" + index + "/_search"));