6363import org .elasticsearch .index .merge .MergeStats ;
6464import org .elasticsearch .index .merge .OnGoingMerge ;
6565import org .elasticsearch .index .seqno .SeqNoStats ;
66+ import org .elasticsearch .index .seqno .SequenceNumbers ;
6667import org .elasticsearch .index .seqno .SequenceNumbersService ;
6768import org .elasticsearch .index .shard .ElasticsearchMergePolicy ;
6869import org .elasticsearch .index .shard .ShardId ;
@@ -119,8 +120,6 @@ public class InternalEngine extends Engine {
119120 private final IndexThrottle throttle ;
120121
121122 private final SequenceNumbersService seqNoService ;
122- static final String LOCAL_CHECKPOINT_KEY = "local_checkpoint" ;
123- static final String MAX_SEQ_NO = "max_seq_no" ;
124123
125124 // How many callers are currently requesting index throttling. Currently there are only two situations where we do this: when merges
126125 // are falling behind and when writing indexing buffer to disk is too slow. When this is 0, there is no throttling, else we throttling
@@ -365,7 +364,7 @@ private Translog.TranslogGeneration loadTranslogIdFromCommit(IndexWriter writer)
365364 private static SeqNoStats loadSeqNoStatsFromLuceneAndTranslog (
366365 final TranslogConfig translogConfig ,
367366 final IndexWriter indexWriter ) throws IOException {
368- long globalCheckpoint = Translog .readGlobalCheckpoint (translogConfig .getTranslogPath ());
367+ final long globalCheckpoint = Translog .readGlobalCheckpoint (translogConfig .getTranslogPath ());
369368 return loadSeqNoStatsFromLucene (globalCheckpoint , indexWriter );
370369 }
371370
@@ -378,20 +377,7 @@ private static SeqNoStats loadSeqNoStatsFromLuceneAndTranslog(
378377 * @return the sequence number stats
379378 */
380379 private static SeqNoStats loadSeqNoStatsFromLucene (final long globalCheckpoint , final IndexWriter indexWriter ) {
381- long maxSeqNo = SequenceNumbersService .NO_OPS_PERFORMED ;
382- long localCheckpoint = SequenceNumbersService .NO_OPS_PERFORMED ;
383- for (Map .Entry <String , String > entry : indexWriter .getLiveCommitData ()) {
384- final String key = entry .getKey ();
385- if (key .equals (LOCAL_CHECKPOINT_KEY )) {
386- assert localCheckpoint == SequenceNumbersService .NO_OPS_PERFORMED ;
387- localCheckpoint = Long .parseLong (entry .getValue ());
388- } else if (key .equals (MAX_SEQ_NO )) {
389- assert maxSeqNo == SequenceNumbersService .NO_OPS_PERFORMED : localCheckpoint ;
390- maxSeqNo = Long .parseLong (entry .getValue ());
391- }
392- }
393-
394- return new SeqNoStats (maxSeqNo , localCheckpoint , globalCheckpoint );
380+ return SequenceNumbers .loadSeqNoStatsFromLuceneCommit (globalCheckpoint , indexWriter .getLiveCommitData ());
395381 }
396382
397383 private SearcherManager createSearcherManager () throws EngineException {
@@ -684,13 +670,20 @@ private IndexResult innerIndex(Index index) throws IOException {
684670 final IndexResult indexResult ;
685671 if (checkVersionConflictResult .isPresent ()) {
686672 indexResult = checkVersionConflictResult .get ();
673+ // norelease: this is not correct as this does not force an fsync, and we need to handle failures including replication
674+ if (indexResult .hasFailure ()) {
675+ location = null ;
676+ } else {
677+ final Translog .NoOp operation = new Translog .NoOp (seqNo , index .primaryTerm (), "version conflict during recovery" );
678+ location = index .origin () != Operation .Origin .LOCAL_TRANSLOG_RECOVERY ? translog .add (operation ) : null ;
679+ }
687680 } else {
688681 // no version conflict
689682 if (index .origin () == Operation .Origin .PRIMARY ) {
690683 seqNo = seqNoService ().generateSeqNo ();
691684 }
692685
693- /**
686+ /*
694687 * Update the document's sequence number and primary term; the sequence number here is derived here from either the sequence
695688 * number service if this is on the primary, or the existing document's sequence number if this is on the replica. The
696689 * primary term here has already been set, see IndexShard#prepareIndex where the Engine$Index operation is created.
@@ -707,12 +700,11 @@ private IndexResult innerIndex(Index index) throws IOException {
707700 update (index .uid (), index .docs (), indexWriter );
708701 }
709702 indexResult = new IndexResult (updatedVersion , seqNo , deleted );
710- location = index .origin () != Operation .Origin .LOCAL_TRANSLOG_RECOVERY
711- ? translog .add (new Translog .Index (index , indexResult ))
712- : null ;
713703 versionMap .putUnderLock (index .uid ().bytes (), new VersionValue (updatedVersion ));
714- indexResult .setTranslogLocation (location );
704+ final Translog .Index operation = new Translog .Index (index , indexResult );
705+ location = index .origin () != Operation .Origin .LOCAL_TRANSLOG_RECOVERY ? translog .add (operation ) : null ;
715706 }
707+ indexResult .setTranslogLocation (location );
716708 indexResult .setTook (System .nanoTime () - index .startTime ());
717709 indexResult .freeze ();
718710 return indexResult ;
@@ -816,21 +808,26 @@ private DeleteResult innerDelete(Delete delete) throws IOException {
816808 final DeleteResult deleteResult ;
817809 if (result .isPresent ()) {
818810 deleteResult = result .get ();
811+ // norelease: this is not correct as this does not force an fsync, and we need to handle failures including replication
812+ if (deleteResult .hasFailure ()) {
813+ location = null ;
814+ } else {
815+ final Translog .NoOp operation = new Translog .NoOp (seqNo , delete .primaryTerm (), "version conflict during recovery" );
816+ location = delete .origin () != Operation .Origin .LOCAL_TRANSLOG_RECOVERY ? translog .add (operation ) : null ;
817+ }
819818 } else {
820819 if (delete .origin () == Operation .Origin .PRIMARY ) {
821820 seqNo = seqNoService ().generateSeqNo ();
822821 }
823-
824822 updatedVersion = delete .versionType ().updateVersion (currentVersion , expectedVersion );
825823 found = deleteIfFound (delete .uid (), currentVersion , deleted , versionValue );
826824 deleteResult = new DeleteResult (updatedVersion , seqNo , found );
827- location = delete .origin () != Operation .Origin .LOCAL_TRANSLOG_RECOVERY
828- ? translog .add (new Translog .Delete (delete , deleteResult ))
829- : null ;
830825 versionMap .putUnderLock (delete .uid ().bytes (),
831826 new DeleteVersionValue (updatedVersion , engineConfig .getThreadPool ().estimatedTimeInMillis ()));
832- deleteResult .setTranslogLocation (location );
827+ final Translog .Delete operation = new Translog .Delete (delete , deleteResult );
828+ location = delete .origin () != Operation .Origin .LOCAL_TRANSLOG_RECOVERY ? translog .add (operation ) : null ;
833829 }
830+ deleteResult .setTranslogLocation (location );
834831 deleteResult .setTook (System .nanoTime () - delete .startTime ());
835832 deleteResult .freeze ();
836833 return deleteResult ;
@@ -1552,11 +1549,11 @@ private void commitIndexWriter(IndexWriter writer, Translog translog, String syn
15521549 final Map <String , String > commitData = new HashMap <>(6 );
15531550 commitData .put (Translog .TRANSLOG_GENERATION_KEY , translogFileGen );
15541551 commitData .put (Translog .TRANSLOG_UUID_KEY , translogUUID );
1555- commitData .put (LOCAL_CHECKPOINT_KEY , localCheckpoint );
1552+ commitData .put (SequenceNumbers . LOCAL_CHECKPOINT_KEY , localCheckpoint );
15561553 if (syncId != null ) {
15571554 commitData .put (Engine .SYNC_COMMIT_ID , syncId );
15581555 }
1559- commitData .put (MAX_SEQ_NO , Long .toString (seqNoService ().getMaxSeqNo ()));
1556+ commitData .put (SequenceNumbers . MAX_SEQ_NO , Long .toString (seqNoService ().getMaxSeqNo ()));
15601557 if (logger .isTraceEnabled ()) {
15611558 logger .trace ("committing writer with commit data [{}]" , commitData );
15621559 }
0 commit comments