@@ -178,7 +178,7 @@ static class ChunksCoordinator {
178178 private final LongConsumer processedGlobalCheckpointUpdater ;
179179
180180 private final AtomicInteger activeWorkers ;
181- private final AtomicLong lastPolledGlobalCheckpoint ;
181+ private final AtomicLong lastProcessedGlobalCheckpoint ;
182182 private final Queue <long []> chunks = new ConcurrentLinkedQueue <>();
183183
184184 ChunksCoordinator (Client followerClient ,
@@ -207,7 +207,7 @@ static class ChunksCoordinator {
207207 this .stateSupplier = runningSuppler ;
208208 this .processedGlobalCheckpointUpdater = processedGlobalCheckpointUpdater ;
209209 this .activeWorkers = new AtomicInteger ();
210- this .lastPolledGlobalCheckpoint = new AtomicLong ();
210+ this .lastProcessedGlobalCheckpoint = new AtomicLong ();
211211 }
212212
213213 void createChucks (long from , long to ) {
@@ -218,34 +218,34 @@ void createChucks(long from, long to) {
218218 }
219219 }
220220
221- void updateChunksQueue () {
221+ void updateChunksQueue (long previousGlobalcheckpoint ) {
222222 schedule (CHECK_LEADER_GLOBAL_CHECKPOINT_INTERVAL , () -> {
223223 if (stateSupplier .get () == false ) {
224+ chunks .clear ();
224225 return ;
225226 }
226227
227- fetchGlobalCheckpoint (leaderClient , leaderShard , leaderGlobalCheckPoint -> {
228- long followerGlobalCheckpoint = lastPolledGlobalCheckpoint .get ();
229- if (leaderGlobalCheckPoint != followerGlobalCheckpoint ) {
230- assert followerGlobalCheckpoint < leaderGlobalCheckPoint : "followGlobalCheckPoint [" + followerGlobalCheckpoint +
231- "] is not below leaderGlobalCheckPoint [" + leaderGlobalCheckPoint + "]" ;
232- createChucks (lastPolledGlobalCheckpoint .get (), leaderGlobalCheckPoint );
228+ fetchGlobalCheckpoint (leaderClient , leaderShard , currentGlobalCheckPoint -> {
229+ if (currentGlobalCheckPoint != previousGlobalcheckpoint ) {
230+ assert previousGlobalcheckpoint < currentGlobalCheckPoint : "followGlobalCheckPoint [" + previousGlobalcheckpoint +
231+ "] is not below leaderGlobalCheckPoint [" + currentGlobalCheckPoint + "]" ;
232+ createChucks (previousGlobalcheckpoint , currentGlobalCheckPoint );
233233 initiateChunkWorkers ();
234+ updateChunksQueue (currentGlobalCheckPoint );
234235 } else {
235236 LOGGER .debug ("{} no write operations to fetch" , followerShard );
237+ updateChunksQueue (previousGlobalcheckpoint );
236238 }
237- updateChunksQueue ();
238239 }, failureHandler );
239240 });
240241 }
241242
242243 void start (long followerGlobalCheckpoint , long leaderGlobalCheckPoint ) {
243244 createChucks (followerGlobalCheckpoint , leaderGlobalCheckPoint );
244- lastPolledGlobalCheckpoint .set (leaderGlobalCheckPoint );
245245 LOGGER .debug ("{} Start coordination of [{}] chunks with [{}] concurrent processors" ,
246246 leaderShard , chunks .size (), maxConcurrentWorker );
247247 initiateChunkWorkers ();
248- updateChunksQueue ();
248+ updateChunksQueue (leaderGlobalCheckPoint );
249249 }
250250
251251 void initiateChunkWorkers () {
@@ -275,10 +275,6 @@ protected void doRun() throws Exception {
275275 }
276276
277277 void processNextChunk () {
278- if (stateSupplier .get () == false ) {
279- return ;
280- }
281-
282278 long [] chunk = chunks .poll ();
283279 if (chunk == null ) {
284280 int activeWorkers = this .activeWorkers .decrementAndGet ();
@@ -289,7 +285,7 @@ void processNextChunk() {
289285 Consumer <Exception > processorHandler = e -> {
290286 if (e == null ) {
291287 LOGGER .debug ("{} Successfully processed chunk [{}/{}]" , leaderShard , chunk [0 ], chunk [1 ]);
292- if (lastPolledGlobalCheckpoint .updateAndGet (x -> x < chunk [1 ] ? chunk [1 ] : x ) == chunk [1 ]) {
288+ if (lastProcessedGlobalCheckpoint .updateAndGet (x -> x < chunk [1 ] ? chunk [1 ] : x ) == chunk [1 ]) {
293289 processedGlobalCheckpointUpdater .accept (chunk [1 ]);
294290 }
295291 processNextChunk ();
0 commit comments