Skip to content

Commit 00228cc

Browse files
committed
Handle no such remote cluster exception in ccr (#53415)
A remote client can throw a NoSuchRemoteClusterException while fetching the cluster state from the leader cluster. We also need to handle that exception when retrying to add a retention lease to the leader shard. Closes #53225
1 parent 0bc95b0 commit 00228cc

File tree

1 file changed

+24
-22
lines changed

1 file changed

+24
-22
lines changed

x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java

Lines changed: 24 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -136,16 +136,7 @@ protected void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer h
136136
final Index followerIndex = params.getFollowShardId().getIndex();
137137
final Index leaderIndex = params.getLeaderShardId().getIndex();
138138
final Supplier<TimeValue> timeout = () -> isStopped() ? TimeValue.MINUS_ONE : waitForMetadataTimeOut;
139-
140-
final Client remoteClient;
141-
try {
142-
remoteClient = remoteClient(params);
143-
} catch (NoSuchRemoteClusterException e) {
144-
errorHandler.accept(e);
145-
return;
146-
}
147-
148-
CcrRequests.getIndexMetadata(remoteClient, leaderIndex, minRequiredMappingVersion, 0L, timeout, ActionListener.wrap(
139+
final ActionListener<IndexMetaData> listener = ActionListener.wrap(
149140
indexMetaData -> {
150141
if (indexMetaData.getMappings().isEmpty()) {
151142
assert indexMetaData.getMappingVersion() == 1;
@@ -161,7 +152,12 @@ protected void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer h
161152
errorHandler));
162153
},
163154
errorHandler
164-
));
155+
);
156+
try {
157+
CcrRequests.getIndexMetadata(remoteClient(params), leaderIndex, minRequiredMappingVersion, 0L, timeout, listener);
158+
} catch (NoSuchRemoteClusterException e) {
159+
errorHandler.accept(e);
160+
}
165161
}
166162

167163
@Override
@@ -311,21 +307,27 @@ protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final Lo
311307
"{} background adding retention lease [{}] while following",
312308
params.getFollowShardId(),
313309
retentionLeaseId);
314-
CcrRetentionLeases.asyncAddRetentionLease(
310+
try {
311+
final ActionListener<RetentionLeaseActions.Response> wrappedListener = ActionListener.wrap(
312+
r -> {},
313+
inner -> {
314+
/*
315+
* If this fails that the retention lease already exists, something highly unusual is
316+
* going on. Log it, and renew again after another renew interval has passed.
317+
*/
318+
final Throwable innerCause = ExceptionsHelper.unwrapCause(inner);
319+
logRetentionLeaseFailure(retentionLeaseId, innerCause);
320+
});
321+
CcrRetentionLeases.asyncAddRetentionLease(
315322
params.getLeaderShardId(),
316323
retentionLeaseId,
317324
followerGlobalCheckpoint.getAsLong(),
318325
remoteClient(params),
319-
ActionListener.wrap(
320-
r -> {},
321-
inner -> {
322-
/*
323-
* If this fails that the retention lease already exists, something highly unusual is
324-
* going on. Log it, and renew again after another renew interval has passed.
325-
*/
326-
final Throwable innerCause = ExceptionsHelper.unwrapCause(inner);
327-
logRetentionLeaseFailure(retentionLeaseId, innerCause);
328-
}));
326+
wrappedListener);
327+
} catch (NoSuchRemoteClusterException rce) {
328+
// we will attempt to renew again after another renew interval has passed
329+
logRetentionLeaseFailure(retentionLeaseId, rce);
330+
}
329331
} else {
330332
// if something else happened, we will attempt to renew again after another renew interval has passed
331333
}

0 commit comments

Comments
 (0)