@@ -28,13 +28,10 @@ struct PerThreadAllocProfile {
2828 vector<RawAlloc> allocs;
2929 unordered_map<size_t , size_t > type_address_by_value_address;
3030 unordered_map<size_t , size_t > frees_by_type_address;
31-
32- size_t alloc_counter;
33- size_t last_recorded_alloc;
3431};
3532
3633struct AllocProfile {
37- int skip_every ;
34+ double sample_rate ;
3835
3936 vector<PerThreadAllocProfile> per_thread_profiles;
4037};
@@ -69,8 +66,8 @@ RawBacktrace get_raw_backtrace() {
6966
7067// == exported interface ==
7168
72- JL_DLLEXPORT void jl_start_alloc_profile (int skip_every ) {
73- g_alloc_profile = AllocProfile{skip_every };
69+ JL_DLLEXPORT void jl_start_alloc_profile (double sample_rate ) {
70+ g_alloc_profile = AllocProfile{sample_rate };
7471
7572 for (int i = 0 ; i < jl_n_threads; i++) {
7673 g_alloc_profile.per_thread_profiles .push_back (PerThreadAllocProfile{});
@@ -131,20 +128,18 @@ JL_DLLEXPORT void jl_free_alloc_profile() {
131128
132129void _record_allocated_value (jl_value_t *val, size_t size) JL_NOTSAFEPOINT {
133130 auto & global_profile = g_alloc_profile;
134-
135131 auto & profile = global_profile.per_thread_profiles [jl_threadid ()];
136132
137- profile. alloc_counter ++ ;
138- auto diff = profile. alloc_counter - profile. last_recorded_alloc ;
139- if (diff < g_alloc_profile. skip_every ) {
133+ auto sample_val = double ( rand ()) / double (RAND_MAX) ;
134+ auto should_record = sample_val <= global_profile. sample_rate ;
135+ if (!should_record ) {
140136 return ;
141137 }
142- profile.last_recorded_alloc = profile.alloc_counter ;
143138
144139 auto type = (jl_datatype_t *)jl_typeof (val);
145-
140+ // Used when counting frees. We can't get type type info then,
141+ // because it gets corrupted during garbage collection.
146142 profile.type_address_by_value_address [(size_t )val] = (size_t )type;
147-
148143 profile.allocs .emplace_back (RawAlloc{
149144 type,
150145 get_raw_backtrace (),
0 commit comments