2121
2222import org .apache .logging .log4j .LogManager ;
2323import org .apache .logging .log4j .Logger ;
24+ import org .apache .lucene .util .SetOnce ;
2425import org .elasticsearch .ExceptionsHelper ;
2526import org .elasticsearch .Version ;
2627import org .elasticsearch .action .ActionListener ;
143144import org .elasticsearch .env .TestEnvironment ;
144145import org .elasticsearch .gateway .MetaStateService ;
145146import org .elasticsearch .gateway .TransportNodesListGatewayStartedShards ;
147+ import org .elasticsearch .index .Index ;
146148import org .elasticsearch .index .analysis .AnalysisRegistry ;
147149import org .elasticsearch .index .seqno .GlobalCheckpointSyncAction ;
148150import org .elasticsearch .index .seqno .RetentionLeaseSyncer ;
211213import static org .hamcrest .Matchers .empty ;
212214import static org .hamcrest .Matchers .hasSize ;
213215import static org .hamcrest .Matchers .instanceOf ;
216+ import static org .hamcrest .Matchers .is ;
214217import static org .hamcrest .Matchers .lessThanOrEqualTo ;
215218import static org .mockito .Mockito .mock ;
216219
@@ -503,7 +506,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() {
503506 }
504507 }
505508
506- public void testConcurrentSnapshotDeleteAndDeleteIndex () {
509+ public void testConcurrentSnapshotDeleteAndDeleteIndex () throws IOException {
507510 setupTestCluster (randomFrom (1 , 3 , 5 ), randomIntBetween (2 , 10 ));
508511
509512 String repoName = "repo" ;
@@ -514,11 +517,13 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() {
514517 testClusterNodes .currentMaster (testClusterNodes .nodes .values ().iterator ().next ().clusterService .state ());
515518
516519 final StepListener <Collection <CreateIndexResponse >> createIndicesListener = new StepListener <>();
520+ final int indices = randomIntBetween (5 , 20 );
517521
522+ final SetOnce <Index > firstIndex = new SetOnce <>();
518523 continueOrDie (createRepoAndIndex (repoName , index , 1 ), createIndexResponse -> {
524+ firstIndex .set (masterNode .clusterService .state ().metaData ().index (index ).getIndex ());
519525 // create a few more indices to make it more likely that the subsequent index delete operation happens before snapshot
520526 // finalization
521- final int indices = randomIntBetween (5 , 20 );
522527 final GroupedActionListener <CreateIndexResponse > listener = new GroupedActionListener <>(createIndicesListener , indices );
523528 for (int i = 0 ; i < indices ; ++i ) {
524529 client ().admin ().indices ().create (new CreateIndexRequest ("index-" + i ), listener );
@@ -527,23 +532,54 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() {
527532
528533 final StepListener <CreateSnapshotResponse > createSnapshotResponseStepListener = new StepListener <>();
529534
535+ final boolean partialSnapshot = randomBoolean ();
536+
530537 continueOrDie (createIndicesListener , createIndexResponses ->
531538 client ().admin ().cluster ().prepareCreateSnapshot (repoName , snapshotName ).setWaitForCompletion (false )
532- .execute (createSnapshotResponseStepListener ));
539+ .setPartial ( partialSnapshot ). execute (createSnapshotResponseStepListener ));
533540
534541 continueOrDie (createSnapshotResponseStepListener ,
535- createSnapshotResponse -> client ().admin ().indices ().delete (new DeleteIndexRequest (index ), noopListener ()));
542+ createSnapshotResponse -> client ().admin ().indices ().delete (new DeleteIndexRequest (index ), new ActionListener <>() {
543+ @ Override
544+ public void onResponse (AcknowledgedResponse acknowledgedResponse ) {
545+ if (partialSnapshot ) {
546+ // Recreate index by the same name to test that we don't snapshot conflicting metadata in this scenario
547+ client ().admin ().indices ().create (new CreateIndexRequest (index ), noopListener ());
548+ }
549+ }
550+
551+ @ Override
552+ public void onFailure (Exception e ) {
553+ if (partialSnapshot ) {
554+ throw new AssertionError ("Delete index should always work during partial snapshots" , e );
555+ }
556+ }
557+ }));
536558
537559 deterministicTaskQueue .runAllRunnableTasks ();
538560
539561 SnapshotsInProgress finalSnapshotsInProgress = masterNode .clusterService .state ().custom (SnapshotsInProgress .TYPE );
540562 assertFalse (finalSnapshotsInProgress .entries ().stream ().anyMatch (entry -> entry .state ().completed () == false ));
541563 final Repository repository = masterNode .repositoriesService .repository (repoName );
542- Collection <SnapshotId > snapshotIds = getRepositoryData (repository ).getSnapshotIds ();
564+ final RepositoryData repositoryData = getRepositoryData (repository );
565+ Collection <SnapshotId > snapshotIds = repositoryData .getSnapshotIds ();
543566 assertThat (snapshotIds , hasSize (1 ));
544567
545568 final SnapshotInfo snapshotInfo = repository .getSnapshotInfo (snapshotIds .iterator ().next ());
546569 assertEquals (SnapshotState .SUCCESS , snapshotInfo .state ());
570+ if (partialSnapshot ) {
571+ // Single shard for each index so we either get all indices or all except for the deleted index
572+ assertThat (snapshotInfo .successfulShards (), either (is (indices + 1 )).or (is (indices )));
573+ if (snapshotInfo .successfulShards () == indices + 1 ) {
574+ final IndexMetaData indexMetaData =
575+ repository .getSnapshotIndexMetaData (snapshotInfo .snapshotId (), repositoryData .resolveIndexId (index ));
576+ // Make sure we snapshotted the metadata of this index and not the recreated version
577+ assertEquals (indexMetaData .getIndex (), firstIndex .get ());
578+ }
579+ } else {
580+ // Index delete must be blocked for non-partial snapshots and we get a snapshot for every index
581+ assertEquals (snapshotInfo .successfulShards (), indices + 1 );
582+ }
547583 assertEquals (0 , snapshotInfo .failedShards ());
548584 }
549585
0 commit comments