@@ -19,6 +19,31 @@ extern "C" {
19
19
#define MIN_BLOCK_PG_ALLOC (1) // 16 KB
20
20
21
21
static int block_pg_cnt = DEFAULT_BLOCK_PG_ALLOC ;
22
+ static _Atomic (size_t ) current_pg_count = 0 ;
23
+
24
+ // Julia allocates large blocks (64M) with mmap. These are never
25
+ // released back but the underlying physical memory may be released
26
+ // with calls to madvise(MADV_DONTNEED).
27
+ // These large blocks are used to allocated jl_page_size sized
28
+ // pages, that are tracked by current_pg_count.
29
+ static uint64_t poolmem_bytes_allocated = 0 ;
30
+ static uint64_t poolmem_blocks_allocated_total = 0 ;
31
+
32
+
33
+ JL_DLLEXPORT uint64_t jl_poolmem_blocks_allocated_total (void )
34
+ {
35
+ return poolmem_blocks_allocated_total ;
36
+ }
37
+
38
+ JL_DLLEXPORT uint64_t jl_poolmem_bytes_allocated (void )
39
+ {
40
+ return poolmem_bytes_allocated ;
41
+ }
42
+
43
+ JL_DLLEXPORT uint64_t jl_current_pg_count (void )
44
+ {
45
+ return (uint64_t )jl_atomic_load (& current_pg_count );
46
+ }
22
47
23
48
void jl_gc_init_page (void )
24
49
{
@@ -47,6 +72,8 @@ char *jl_gc_try_alloc_pages_(int pg_cnt) JL_NOTSAFEPOINT
47
72
MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
48
73
if (mem == MAP_FAILED )
49
74
return NULL ;
75
+ poolmem_bytes_allocated += pages_sz ;
76
+ poolmem_blocks_allocated_total ++ ;
50
77
#endif
51
78
if (GC_PAGE_SZ > jl_page_size )
52
79
// round data pointer up to the nearest gc_page_data-aligned
@@ -148,6 +175,7 @@ NOINLINE jl_gc_pagemeta_t *jl_gc_alloc_page(void) JL_NOTSAFEPOINT
148
175
SetLastError (last_error );
149
176
#endif
150
177
errno = last_errno ;
178
+ jl_atomic_fetch_add (& current_pg_count , 1 );
151
179
return meta ;
152
180
}
153
181
@@ -188,6 +216,7 @@ void jl_gc_free_page(jl_gc_pagemeta_t *pg) JL_NOTSAFEPOINT
188
216
madvise (p , decommit_size , MADV_DONTNEED );
189
217
#endif
190
218
msan_unpoison (p , decommit_size );
219
+ jl_atomic_fetch_add (& current_pg_count , -1 );
191
220
}
192
221
193
222
#ifdef __cplusplus
0 commit comments