|
7 | 7 | #include <arch/cpu.h> |
8 | 8 | #include <errno.h> |
9 | 9 | #include <stdio.h> |
| 10 | +#include <malloc.h> |
10 | 11 | #include <sys/__assert.h> |
11 | 12 | #include <sys/stat.h> |
12 | 13 | #include <linker/linker-defs.h> |
@@ -298,17 +299,151 @@ void *_sbrk(intptr_t count) |
298 | 299 | } |
299 | 300 | __weak FUNC_ALIAS(_sbrk, sbrk, void *); |
300 | 301 |
|
301 | | -static LIBC_DATA SYS_MUTEX_DEFINE(heap_mutex); |
| 302 | +#ifdef CONFIG_MULTITHREADING |
| 303 | +/* |
| 304 | + * Newlib Retargetable Locking Interface Implementation |
| 305 | + * |
| 306 | + * When multithreading is enabled, the newlib retargetable locking interface is |
| 307 | + * defined below to override the default void implementation and provide the |
| 308 | + * Zephyr-side locks. |
| 309 | + * |
| 310 | + * NOTE: `k_mutex` and `k_sem` are used instead of `sys_mutex` and `sys_sem` |
| 311 | + * because the latter do not support dynamic allocation for now. |
| 312 | + */ |
| 313 | + |
| 314 | +/* Static locks */ |
| 315 | +K_MUTEX_DEFINE(__lock___sinit_recursive_mutex); |
| 316 | +K_MUTEX_DEFINE(__lock___sfp_recursive_mutex); |
| 317 | +K_MUTEX_DEFINE(__lock___atexit_recursive_mutex); |
| 318 | +K_MUTEX_DEFINE(__lock___malloc_recursive_mutex); |
| 319 | +K_MUTEX_DEFINE(__lock___env_recursive_mutex); |
| 320 | +K_SEM_DEFINE(__lock___at_quick_exit_mutex, 1, 1); |
| 321 | +K_SEM_DEFINE(__lock___tz_mutex, 1, 1); |
| 322 | +K_SEM_DEFINE(__lock___dd_hash_mutex, 1, 1); |
| 323 | +K_SEM_DEFINE(__lock___arc4random_mutex, 1, 1); |
| 324 | + |
| 325 | +#ifdef CONFIG_USERSPACE |
| 326 | +/* Grant public access to all static locks after boot */ |
| 327 | +static int newlib_locks_prepare(const struct device *unused) |
| 328 | +{ |
| 329 | + ARG_UNUSED(unused); |
| 330 | + |
| 331 | + /* Initialise recursive locks */ |
| 332 | + k_object_access_all_grant(&__lock___sinit_recursive_mutex); |
| 333 | + k_object_access_all_grant(&__lock___sfp_recursive_mutex); |
| 334 | + k_object_access_all_grant(&__lock___atexit_recursive_mutex); |
| 335 | + k_object_access_all_grant(&__lock___malloc_recursive_mutex); |
| 336 | + k_object_access_all_grant(&__lock___env_recursive_mutex); |
| 337 | + |
| 338 | + /* Initialise non-recursive locks */ |
| 339 | + k_object_access_all_grant(&__lock___at_quick_exit_mutex); |
| 340 | + k_object_access_all_grant(&__lock___tz_mutex); |
| 341 | + k_object_access_all_grant(&__lock___dd_hash_mutex); |
| 342 | + k_object_access_all_grant(&__lock___arc4random_mutex); |
| 343 | + |
| 344 | + return 0; |
| 345 | +} |
| 346 | + |
| 347 | +SYS_INIT(newlib_locks_prepare, POST_KERNEL, |
| 348 | + CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); |
| 349 | +#endif /* CONFIG_USERSPACE */ |
| 350 | + |
| 351 | +/* Create a new dynamic non-recursive lock */ |
| 352 | +void __retarget_lock_init(_LOCK_T *lock) |
| 353 | +{ |
| 354 | + __ASSERT_NO_MSG(lock != NULL); |
| 355 | + |
| 356 | + /* Allocate semaphore object */ |
| 357 | +#ifndef CONFIG_USERSPACE |
| 358 | + *lock = malloc(sizeof(struct k_sem)); |
| 359 | +#else |
| 360 | + *lock = k_object_alloc(K_OBJ_SEM); |
| 361 | +#endif /* !CONFIG_USERSPACE */ |
| 362 | + __ASSERT(*lock != NULL, "non-recursive lock allocation failed"); |
| 363 | + |
| 364 | + k_sem_init((struct k_sem *)*lock, 1, 1); |
| 365 | +} |
| 366 | + |
| 367 | +/* Create a new dynamic recursive lock */ |
| 368 | +void __retarget_lock_init_recursive(_LOCK_T *lock) |
| 369 | +{ |
| 370 | + __ASSERT_NO_MSG(lock != NULL); |
| 371 | + |
| 372 | + /* Allocate mutex object */ |
| 373 | +#ifndef CONFIG_USERSPACE |
| 374 | + *lock = malloc(sizeof(struct k_mutex)); |
| 375 | +#else |
| 376 | + *lock = k_object_alloc(K_OBJ_MUTEX); |
| 377 | +#endif /* !CONFIG_USERSPACE */ |
| 378 | + __ASSERT(*lock != NULL, "recursive lock allocation failed"); |
| 379 | + |
| 380 | + k_mutex_init((struct k_mutex *)*lock); |
| 381 | +} |
| 382 | + |
| 383 | +/* Close dynamic non-recursive lock */ |
| 384 | +void __retarget_lock_close(_LOCK_T lock) |
| 385 | +{ |
| 386 | + __ASSERT_NO_MSG(lock != NULL); |
| 387 | +#ifndef CONFIG_USERSPACE |
| 388 | + free(lock); |
| 389 | +#else |
| 390 | + k_object_release(lock); |
| 391 | +#endif /* !CONFIG_USERSPACE */ |
| 392 | +} |
| 393 | + |
| 394 | +/* Close dynamic recursive lock */ |
| 395 | +void __retarget_lock_close_recursive(_LOCK_T lock) |
| 396 | +{ |
| 397 | + __ASSERT_NO_MSG(lock != NULL); |
| 398 | +#ifndef CONFIG_USERSPACE |
| 399 | + free(lock); |
| 400 | +#else |
| 401 | + k_object_release(lock); |
| 402 | +#endif /* !CONFIG_USERSPACE */ |
| 403 | +} |
| 404 | + |
| 405 | +/* Acquiure non-recursive lock */ |
| 406 | +void __retarget_lock_acquire(_LOCK_T lock) |
| 407 | +{ |
| 408 | + __ASSERT_NO_MSG(lock != NULL); |
| 409 | + k_sem_take((struct k_sem *)lock, K_FOREVER); |
| 410 | +} |
| 411 | + |
| 412 | +/* Acquiure recursive lock */ |
| 413 | +void __retarget_lock_acquire_recursive(_LOCK_T lock) |
| 414 | +{ |
| 415 | + __ASSERT_NO_MSG(lock != NULL); |
| 416 | + k_mutex_lock((struct k_mutex *)lock, K_FOREVER); |
| 417 | +} |
| 418 | + |
| 419 | +/* Try acquiring non-recursive lock */ |
| 420 | +int __retarget_lock_try_acquire(_LOCK_T lock) |
| 421 | +{ |
| 422 | + __ASSERT_NO_MSG(lock != NULL); |
| 423 | + return !k_sem_take((struct k_sem *)lock, K_NO_WAIT); |
| 424 | +} |
| 425 | + |
| 426 | +/* Try acquiring recursive lock */ |
| 427 | +int __retarget_lock_try_acquire_recursive(_LOCK_T lock) |
| 428 | +{ |
| 429 | + __ASSERT_NO_MSG(lock != NULL); |
| 430 | + return !k_mutex_lock((struct k_mutex *)lock, K_NO_WAIT); |
| 431 | +} |
302 | 432 |
|
303 | | -void __malloc_lock(struct _reent *reent) |
| 433 | +/* Release non-recursive lock */ |
| 434 | +void __retarget_lock_release(_LOCK_T lock) |
304 | 435 | { |
305 | | - sys_mutex_lock(&heap_mutex, K_FOREVER); |
| 436 | + __ASSERT_NO_MSG(lock != NULL); |
| 437 | + k_sem_give((struct k_sem *)lock); |
306 | 438 | } |
307 | 439 |
|
308 | | -void __malloc_unlock(struct _reent *reent) |
| 440 | +/* Release recursive lock */ |
| 441 | +void __retarget_lock_release_recursive(_LOCK_T lock) |
309 | 442 | { |
310 | | - sys_mutex_unlock(&heap_mutex); |
| 443 | + __ASSERT_NO_MSG(lock != NULL); |
| 444 | + k_mutex_unlock((struct k_mutex *)lock); |
311 | 445 | } |
| 446 | +#endif /* CONFIG_MULTITHREADING */ |
312 | 447 |
|
313 | 448 | __weak int *__errno(void) |
314 | 449 | { |
|
0 commit comments