@@ -811,9 +811,30 @@ update_major_slice_work(intnat howmuch,
811
811
extra_work );
812
812
813
813
new_work = max3 (alloc_work , dependent_work , extra_work );
814
+ atomic_fetch_add (& alloc_counter , new_work );
815
+
814
816
atomic_fetch_add (& work_counter , dom_st -> major_work_done_between_slices );
815
817
dom_st -> major_work_done_between_slices = 0 ;
816
- atomic_fetch_add (& alloc_counter , new_work );
818
+
819
+ /* If the work_counter is falling far behind the alloc_counter,
820
+ * artificially catch up some of the difference. This is a band-aid
821
+ * for general GC pacing problems revealed by the mark-delay changes
822
+ * (PR #13580). */
823
+ int64_t pending = diffmod (atomic_load (& alloc_counter ),
824
+ atomic_load (& work_counter ));
825
+ if (pending > (int64_t )total_cycle_work * 2 ) {
826
+ intnat catchup = pending - total_cycle_work ;
827
+ CAML_GC_MESSAGE (SLICESIZE ,
828
+ "work counter %" ARCH_INTNAT_PRINTF_FORMAT "u falling behind "
829
+ "alloc counter %" ARCH_INTNAT_PRINTF_FORMAT "u by more than "
830
+ "twice a total cycle's work %" ARCH_INTNAT_PRINTF_FORMAT "d; "
831
+ "catching up by %" ARCH_INTNAT_PRINTF_FORMAT "d\n" ,
832
+ atomic_load (& work_counter ),
833
+ atomic_load (& alloc_counter ),
834
+ total_cycle_work , catchup );
835
+ atomic_fetch_add (& work_counter , catchup );
836
+ }
837
+
817
838
if (howmuch == AUTO_TRIGGERED_MAJOR_SLICE ||
818
839
howmuch == GC_CALCULATE_MAJOR_SLICE ) {
819
840
dom_st -> slice_target = atomic_load (& alloc_counter );
@@ -1874,7 +1895,7 @@ static void major_collection_slice(intnat howmuch,
1874
1895
if (log_events ) CAML_EV_END (EV_MAJOR_SWEEP );
1875
1896
}
1876
1897
1877
- if (domain_state -> sweeping_done ) {
1898
+ if (domain_state -> sweeping_done && ! caml_marking_started () ) {
1878
1899
/* We do not immediately trigger a minor GC, but instead wait for
1879
1900
* the next one to happen normally. This gives some chance that
1880
1901
* other domains will finish sweeping as well.
0 commit comments