Skip to content

Commit dd3cad3

Browse files
committed
lib: newlib: Add retargetable locking implementation
This commit adds the newlib retargetable locking interface function implementations in order to make newlib functions thread safe. The newlib retargetable locking interface is internally called by the standard C library functions provided by newlib to synchronise access to the internal shared resources. By default, the retargetable locking interface functions defined within the newlib library are no-op. When multi-threading is enabled (i.e. `CONFIG_MULTITHREADING=y`), the Zephyr-side retargetable locking interface implementations override the default newlib implementation and provide locking mechanism. The retargetable locking interface may be called with either a static (`__lock__...`) or a dynamic lock. The static locks are statically allocated and initialised immediately after kernel initialisation by `newlib_locks_prepare`. The dynamic locks are allocated and de-allocated through the `__retargetable_lock_init[_recursive]` and `__retarget_lock_close_[recurisve]` functions as necessary by the newlib functions. These locks are allocated in the newlib heap using the `malloc` function when userspace is not enabled -- this is safe because the internal multi-threaded malloc lock implementations (`__malloc_lock` and `__malloc_unlock`) call the retargetable locking interface with a static lock (`__lock__malloc_recursive_mutex`). When userspace is enabled, the dynamic locks are allocated and freed through `k_object_alloc` and `k_object_release`. Note that the lock implementations used here are `k_mutex` and `k_sem` instead of `sys_mutex` and `sys_sem` because the Zephyr kernel does not currently support dynamic allocation of the latter. These locks should be updated to use `sys_mutex` and `sys_sem` when the Zephyr becomes capable of dynamically allocating them in the future. Signed-off-by: Stephanos Ioannidis <[email protected]>
1 parent 8566bfe commit dd3cad3

File tree

1 file changed

+140
-5
lines changed

1 file changed

+140
-5
lines changed

lib/libc/newlib/libc-hooks.c

Lines changed: 140 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <arch/cpu.h>
88
#include <errno.h>
99
#include <stdio.h>
10+
#include <malloc.h>
1011
#include <sys/__assert.h>
1112
#include <sys/stat.h>
1213
#include <linker/linker-defs.h>
@@ -298,17 +299,151 @@ void *_sbrk(intptr_t count)
298299
}
299300
__weak FUNC_ALIAS(_sbrk, sbrk, void *);
300301

301-
static LIBC_DATA SYS_MUTEX_DEFINE(heap_mutex);
302+
#ifdef CONFIG_MULTITHREADING
303+
/*
304+
* Newlib Retargetable Locking Interface Implementation
305+
*
306+
* When multithreading is enabled, the newlib retargetable locking interface is
307+
* defined below to override the default void implementation and provide the
308+
* Zephyr-side locks.
309+
*
310+
* NOTE: `k_mutex` and `k_sem` are used instead of `sys_mutex` and `sys_sem`
311+
* because the latter do not support dynamic allocation for now.
312+
*/
313+
314+
/* Static locks */
315+
K_MUTEX_DEFINE(__lock___sinit_recursive_mutex);
316+
K_MUTEX_DEFINE(__lock___sfp_recursive_mutex);
317+
K_MUTEX_DEFINE(__lock___atexit_recursive_mutex);
318+
K_MUTEX_DEFINE(__lock___malloc_recursive_mutex);
319+
K_MUTEX_DEFINE(__lock___env_recursive_mutex);
320+
K_SEM_DEFINE(__lock___at_quick_exit_mutex, 1, 1);
321+
K_SEM_DEFINE(__lock___tz_mutex, 1, 1);
322+
K_SEM_DEFINE(__lock___dd_hash_mutex, 1, 1);
323+
K_SEM_DEFINE(__lock___arc4random_mutex, 1, 1);
324+
325+
#ifdef CONFIG_USERSPACE
326+
/* Grant public access to all static locks after boot */
327+
static int newlib_locks_prepare(const struct device *unused)
328+
{
329+
ARG_UNUSED(unused);
330+
331+
/* Initialise recursive locks */
332+
k_object_access_all_grant(&__lock___sinit_recursive_mutex);
333+
k_object_access_all_grant(&__lock___sfp_recursive_mutex);
334+
k_object_access_all_grant(&__lock___atexit_recursive_mutex);
335+
k_object_access_all_grant(&__lock___malloc_recursive_mutex);
336+
k_object_access_all_grant(&__lock___env_recursive_mutex);
337+
338+
/* Initialise non-recursive locks */
339+
k_object_access_all_grant(&__lock___at_quick_exit_mutex);
340+
k_object_access_all_grant(&__lock___tz_mutex);
341+
k_object_access_all_grant(&__lock___dd_hash_mutex);
342+
k_object_access_all_grant(&__lock___arc4random_mutex);
343+
344+
return 0;
345+
}
346+
347+
SYS_INIT(newlib_locks_prepare, POST_KERNEL,
348+
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
349+
#endif /* CONFIG_USERSPACE */
350+
351+
/* Create a new dynamic non-recursive lock */
352+
void __retarget_lock_init(_LOCK_T *lock)
353+
{
354+
__ASSERT_NO_MSG(lock != NULL);
355+
356+
/* Allocate semaphore object */
357+
#ifndef CONFIG_USERSPACE
358+
*lock = malloc(sizeof(struct k_sem));
359+
#else
360+
*lock = k_object_alloc(K_OBJ_SEM);
361+
#endif /* !CONFIG_USERSPACE */
362+
__ASSERT(*lock != NULL, "non-recursive lock allocation failed");
363+
364+
k_sem_init((struct k_sem *)*lock, 1, 1);
365+
}
366+
367+
/* Create a new dynamic recursive lock */
368+
void __retarget_lock_init_recursive(_LOCK_T *lock)
369+
{
370+
__ASSERT_NO_MSG(lock != NULL);
371+
372+
/* Allocate mutex object */
373+
#ifndef CONFIG_USERSPACE
374+
*lock = malloc(sizeof(struct k_mutex));
375+
#else
376+
*lock = k_object_alloc(K_OBJ_MUTEX);
377+
#endif /* !CONFIG_USERSPACE */
378+
__ASSERT(*lock != NULL, "recursive lock allocation failed");
379+
380+
k_mutex_init((struct k_mutex *)*lock);
381+
}
382+
383+
/* Close dynamic non-recursive lock */
384+
void __retarget_lock_close(_LOCK_T lock)
385+
{
386+
__ASSERT_NO_MSG(lock != NULL);
387+
#ifndef CONFIG_USERSPACE
388+
free(lock);
389+
#else
390+
k_object_release(lock);
391+
#endif /* !CONFIG_USERSPACE */
392+
}
393+
394+
/* Close dynamic recursive lock */
395+
void __retarget_lock_close_recursive(_LOCK_T lock)
396+
{
397+
__ASSERT_NO_MSG(lock != NULL);
398+
#ifndef CONFIG_USERSPACE
399+
free(lock);
400+
#else
401+
k_object_release(lock);
402+
#endif /* !CONFIG_USERSPACE */
403+
}
404+
405+
/* Acquiure non-recursive lock */
406+
void __retarget_lock_acquire(_LOCK_T lock)
407+
{
408+
__ASSERT_NO_MSG(lock != NULL);
409+
k_sem_take((struct k_sem *)lock, K_FOREVER);
410+
}
411+
412+
/* Acquiure recursive lock */
413+
void __retarget_lock_acquire_recursive(_LOCK_T lock)
414+
{
415+
__ASSERT_NO_MSG(lock != NULL);
416+
k_mutex_lock((struct k_mutex *)lock, K_FOREVER);
417+
}
418+
419+
/* Try acquiring non-recursive lock */
420+
int __retarget_lock_try_acquire(_LOCK_T lock)
421+
{
422+
__ASSERT_NO_MSG(lock != NULL);
423+
return !k_sem_take((struct k_sem *)lock, K_NO_WAIT);
424+
}
425+
426+
/* Try acquiring recursive lock */
427+
int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
428+
{
429+
__ASSERT_NO_MSG(lock != NULL);
430+
return !k_mutex_lock((struct k_mutex *)lock, K_NO_WAIT);
431+
}
302432

303-
void __malloc_lock(struct _reent *reent)
433+
/* Release non-recursive lock */
434+
void __retarget_lock_release(_LOCK_T lock)
304435
{
305-
sys_mutex_lock(&heap_mutex, K_FOREVER);
436+
__ASSERT_NO_MSG(lock != NULL);
437+
k_sem_give((struct k_sem *)lock);
306438
}
307439

308-
void __malloc_unlock(struct _reent *reent)
440+
/* Release recursive lock */
441+
void __retarget_lock_release_recursive(_LOCK_T lock)
309442
{
310-
sys_mutex_unlock(&heap_mutex);
443+
__ASSERT_NO_MSG(lock != NULL);
444+
k_mutex_unlock((struct k_mutex *)lock);
311445
}
446+
#endif /* CONFIG_MULTITHREADING */
312447

313448
__weak int *__errno(void)
314449
{

0 commit comments

Comments
 (0)