Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions arch/arm/core/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ config CPU_CORTEX_M
select ARCH_HAS_STACK_PROTECTION if ARM_MPU || CPU_CORTEX_M_HAS_SPLIM
select ARCH_HAS_USERSPACE if ARM_MPU
select ARCH_HAS_NOCACHE_MEMORY_SUPPORT if ARM_MPU && CPU_HAS_ARM_MPU && CPU_CORTEX_M7
select SWAP_NONATOMIC
help
This option signifies the use of a CPU of the Cortex-M family.

Expand Down
12 changes: 12 additions & 0 deletions kernel/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -483,6 +483,18 @@ config ARCH_HAS_CUSTOM_SWAP_TO_MAIN
the _main() thread, but instead must do something custom. It must
enable this option in that case.

config SWAP_NONATOMIC
bool
help
On some architectures, the _Swap() primitive cannot be made
atomic with respect to the irq_lock being released. That
is, interrupts may be received between the entry to _Swap
and the completion of the context switch. There are a
handful of workaround cases in the kernel that need to be
enabled when this is true. Currently, this only happens on
ARM when the PendSV exception priority sits below that of
Zephyr-handled interrupts.

config ARCH_HAS_CUSTOM_BUSY_WAIT
bool
# hidden
Expand Down
33 changes: 32 additions & 1 deletion kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,17 @@ static bool should_preempt(struct k_thread *th, int preempt_ok)
}

/* Or if we're pended/suspended/dummy (duh) */
if (!_current || !_is_thread_ready(_current)) {
if (!_current || _is_thread_prevented_from_running(_current)) {
return true;
}

/* Edge case on ARM where a thread can be pended out of an
* interrupt handler before the "synchronous" swap starts
* context switching. Platforms with atomic swap can never
* hit this.
*/
if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
&& _is_thread_timeout_active(th)) {
return true;
}

Expand Down Expand Up @@ -207,6 +217,15 @@ static struct k_thread *next_up(void)
static int slice_time;
static int slice_max_prio;

#ifdef CONFIG_SWAP_NONATOMIC
/* If _Swap() isn't atomic, then it's possible for a timer interrupt
* to try to timeslice away _current after it has already pended
* itself but before the corresponding context switch. Treat that as
* a noop condition in z_time_slice().
*/
static struct k_thread *pending_current;
#endif

static void reset_time_slice(void)
{
/* Add the elapsed time since the last announced tick to the
Expand Down Expand Up @@ -239,6 +258,15 @@ static inline int sliceable(struct k_thread *t)
/* Called out of each timer interrupt */
void z_time_slice(int ticks)
{
#ifdef CONFIG_SWAP_NONATOMIC
if (pending_current == _current) {
pending_current = NULL;
reset_time_slice();
return;
}
pending_current = NULL;
#endif

if (slice_time && sliceable(_current)) {
if (ticks >= _current_cpu->slice_ticks) {
_move_thread_to_end_of_prio_q(_current);
Expand Down Expand Up @@ -379,6 +407,9 @@ void z_thread_timeout(struct _timeout *to)

int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout)
{
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current;
#endif
pend(_current, wait_q, timeout);
return _Swap(key);
}
Expand Down