From 7778c2ad00f17be122455cfd9c997f7acdbb74a5 Mon Sep 17 00:00:00 2001 From: Max Horn Date: Tue, 28 May 2024 09:28:49 +0200 Subject: [PATCH] Don't expose guard pages to malloc_stack API consumers Whether or not a guard page is in effect is an implementation detail and consumers of the `malloc_stack` API should not have to worry about that. In particular, if a stack of a certain size is requested, a stack of that size should be delivered, and not be reduced on some systems because we park a guard page in that range. This also helps consumers of the gcext API implementing stack scanning (i.e., GAP.jl), as it does not have to worry about running into those guard pages anymore. --- src/gc-stacks.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/src/gc-stacks.c b/src/gc-stacks.c index 0624c5fa66b54..7882cef73bdb6 100644 --- a/src/gc-stacks.c +++ b/src/gc-stacks.c @@ -22,13 +22,22 @@ // number of stacks to always keep available per pool #define MIN_STACK_MAPPINGS_PER_POOL 5 +#if defined(_OS_WINDOWS_) || (!defined(_OS_OPENBSD_) && !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK)) +#define JL_USE_GUARD_PAGE 1 const size_t jl_guard_size = (4096 * 8); +#else +const size_t jl_guard_size = 0; +#endif + static _Atomic(uint32_t) num_stack_mappings = 0; #ifdef _OS_WINDOWS_ #define MAP_FAILED NULL static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT { + size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size); + bufsz += guard_size; + void *stk = VirtualAlloc(NULL, bufsz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (stk == NULL) return MAP_FAILED; @@ -39,6 +48,7 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT VirtualFree(stk, 0, MEM_RELEASE); return MAP_FAILED; } + stk = (char *)stk + guard_size; jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1); return stk; @@ -47,6 +57,12 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT static void free_stack(void *stkbuf, size_t bufsz) { +#ifdef JL_USE_GUARD_PAGE + size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size); + bufsz += guard_size; + stkbuf = (char *)stkbuf - guard_size; +#endif + VirtualFree(stkbuf, 0, MEM_RELEASE); jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1); } @@ -72,16 +88,22 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT # else static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT { +#ifdef JL_USE_GUARD_PAGE + size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size); + bufsz += guard_size; +#endif + void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (stk == MAP_FAILED) return MAP_FAILED; -#if !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK) +#ifdef JL_USE_GUARD_PAGE // set up a guard page to detect stack overflow if (mprotect(stk, jl_guard_size, PROT_NONE) == -1) { munmap(stk, bufsz); return MAP_FAILED; } + stk = (char *)stk + guard_size; #endif jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1); @@ -91,6 +113,12 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT static void free_stack(void *stkbuf, size_t bufsz) { +#ifdef JL_USE_GUARD_PAGE + size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size); + bufsz += guard_size; + stkbuf = (char *)stkbuf - guard_size; +#endif + munmap(stkbuf, bufsz); jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1); }