@@ -203,7 +203,14 @@ pub trait Plan: 'static + Sync + Downcast {
203203 /// This is invoked once per GC by one worker thread. 'tls' is the worker thread that executes this method.
204204 fn release ( & mut self , tls : VMWorkerThread ) ;
205205
206- fn poll ( & self , space_full : bool , space : & dyn Space < Self :: VM > ) -> bool {
206+ /// This method is called periodically by the allocation subsystem
207+ /// (by default, each time a page is consumed), and provides the
208+ /// collector with an opportunity to collect.
209+ ///
210+ /// Arguments:
211+ /// * `space_full`: Space request failed, must recover pages within 'space'.
212+ /// * `space`: The space that triggered the poll. This could `None` if the poll is not triggered by a space.
213+ fn poll ( & self , space_full : bool , space : Option < & dyn Space < Self :: VM > > ) -> bool {
207214 if self . collection_required ( space_full, space) {
208215 // FIXME
209216 /*if space == META_DATA_SPACE {
@@ -236,8 +243,12 @@ pub trait Plan: 'static + Sync + Downcast {
236243 false
237244 }
238245
239- fn log_poll ( & self , space : & dyn Space < Self :: VM > , message : & ' static str ) {
240- info ! ( " [POLL] {}: {}" , space. get_name( ) , message) ;
246+ fn log_poll ( & self , space : Option < & dyn Space < Self :: VM > > , message : & ' static str ) {
247+ if let Some ( space) = space {
248+ info ! ( " [POLL] {}: {}" , space. get_name( ) , message) ;
249+ } else {
250+ info ! ( " [POLL] {}" , message) ;
251+ }
241252 }
242253
243254 /**
@@ -248,7 +259,7 @@ pub trait Plan: 'static + Sync + Downcast {
248259 * @param space TODO
249260 * @return <code>true</code> if a collection is requested by the plan.
250261 */
251- fn collection_required ( & self , space_full : bool , _space : & dyn Space < Self :: VM > ) -> bool ;
262+ fn collection_required ( & self , space_full : bool , _space : Option < & dyn Space < Self :: VM > > ) -> bool ;
252263
253264 // Note: The following methods are about page accounting. The default implementation should
254265 // work fine for non-copying plans. For copying plans, the plan should override any of these methods
@@ -372,9 +383,12 @@ pub struct BasePlan<VM: VMBinding> {
372383 /// Have we scanned all the stacks?
373384 stacks_prepared : AtomicBool ,
374385 pub mutator_iterator_lock : Mutex < ( ) > ,
375- // A counter that keeps tracks of the number of bytes allocated since last stress test
376- pub allocation_bytes : AtomicUsize ,
377- // Wrapper around analysis counters
386+ /// A counter that keeps tracks of the number of bytes allocated since last stress test
387+ allocation_bytes : AtomicUsize ,
388+ /// A counteer that keeps tracks of the number of bytes allocated by malloc
389+ #[ cfg( feature = "malloc_counted_size" ) ]
390+ malloc_bytes : AtomicUsize ,
391+ /// Wrapper around analysis counters
378392 #[ cfg( feature = "analysis" ) ]
379393 pub analysis_manager : AnalysisManager < VM > ,
380394
@@ -518,6 +532,8 @@ impl<VM: VMBinding> BasePlan<VM> {
518532 scanned_stacks : AtomicUsize :: new ( 0 ) ,
519533 mutator_iterator_lock : Mutex :: new ( ( ) ) ,
520534 allocation_bytes : AtomicUsize :: new ( 0 ) ,
535+ #[ cfg( feature = "malloc_counted_size" ) ]
536+ malloc_bytes : AtomicUsize :: new ( 0 ) ,
521537 #[ cfg( feature = "analysis" ) ]
522538 analysis_manager,
523539 }
@@ -596,6 +612,14 @@ impl<VM: VMBinding> BasePlan<VM> {
596612 pages += self . ro_space . reserved_pages ( ) ;
597613 }
598614
615+ // If we need to count malloc'd size as part of our heap, we add it here.
616+ #[ cfg( feature = "malloc_counted_size" ) ]
617+ {
618+ pages += crate :: util:: conversions:: bytes_to_pages_up (
619+ self . malloc_bytes . load ( Ordering :: SeqCst ) ,
620+ ) ;
621+ }
622+
599623 // The VM space may be used as an immutable boot image, in which case, we should not count
600624 // it as part of the heap size.
601625 pages
@@ -794,12 +818,7 @@ impl<VM: VMBinding> BasePlan<VM> {
794818 && ( self . allocation_bytes . load ( Ordering :: SeqCst ) > * self . options . stress_factor )
795819 }
796820
797- pub ( super ) fn collection_required < P : Plan > (
798- & self ,
799- plan : & P ,
800- space_full : bool ,
801- _space : & dyn Space < VM > ,
802- ) -> bool {
821+ pub ( super ) fn collection_required < P : Plan > ( & self , plan : & P , space_full : bool ) -> bool {
803822 let stress_force_gc = self . should_do_stress_gc ( ) ;
804823 if stress_force_gc {
805824 debug ! (
@@ -838,6 +857,19 @@ impl<VM: VMBinding> BasePlan<VM> {
838857 self . vm_space
839858 . verify_side_metadata_sanity ( side_metadata_sanity_checker) ;
840859 }
860+
861+ #[ cfg( feature = "malloc_counted_size" ) ]
862+ pub ( crate ) fn increase_malloc_bytes_by ( & self , size : usize ) {
863+ self . malloc_bytes . fetch_add ( size, Ordering :: SeqCst ) ;
864+ }
865+ #[ cfg( feature = "malloc_counted_size" ) ]
866+ pub ( crate ) fn decrease_malloc_bytes_by ( & self , size : usize ) {
867+ self . malloc_bytes . fetch_sub ( size, Ordering :: SeqCst ) ;
868+ }
869+ #[ cfg( feature = "malloc_counted_size" ) ]
870+ pub fn get_malloc_bytes ( & self ) -> usize {
871+ self . malloc_bytes . load ( Ordering :: SeqCst )
872+ }
841873}
842874
843875/**
0 commit comments