Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
5f0852c
Add spinlock implementation using RV32A atomics
visitorckw Jun 25, 2025
e26b7c9
Replace interrupt masking with spinlock in malloc for SMP support
visitorckw Jun 25, 2025
30330c2
Replace interrupt masking with spinlock in mqueue for SMP support
visitorckw Jun 25, 2025
9683192
Replace interrupt masking with spinlock in task management for SMP su…
visitorckw Jun 25, 2025
779dc0b
Replace interrupt masking with spinlock in pipe for SMP support
visitorckw Jun 25, 2025
05b0342
Replace interrupt masking with spinlock in semaphore for SMP support
visitorckw Jun 25, 2025
7ea07a7
Replace interrupt masking with spinlock in timer for SMP support
visitorckw Jun 25, 2025
8ba911a
Replace interrupt masking with spinlock in mutex for SMP support
visitorckw Jun 25, 2025
1686929
Protect printf with spinlock to prevent interleaved output on SMP
visitorckw Jun 29, 2025
a23914c
Remove obsolete NOSCHED_ENTER/LEAVE and CRITICAL_ENTER/LEAVE macros
visitorckw Jun 25, 2025
f556bc0
Add per-hart stack allocation in RISC-V boot for SMP support
visitorckw Jun 29, 2025
af31ca4
Remove hart parking and add spinlock synchronization during boot for SMP
visitorckw Jun 27, 2025
f23c962
Move task_lock spinlock into kcb struct
visitorckw Jun 29, 2025
f673a71
Add idle task per hart at boot to prevent panic on no runnable tasks
visitorckw Jun 29, 2025
d08fe87
Use per-hart current task pointer in KCB
visitorckw Jun 29, 2025
be43db3
Protect shared kcb->ticks with spinlock
visitorckw Jun 29, 2025
574ddb7
Adapt mtimecmp read/write for per-hart registers
visitorckw Jun 29, 2025
49f9119
Add -smp 4 to QEMU run command for multi-core simulation
visitorckw Jun 25, 2025
aad0525
Add spinlock protection for kcb
visitorckw Jul 1, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions arch/riscv/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
extern uint32_t _gp, _stack, _end;
extern uint32_t _sbss, _ebss;

#define STACK_SIZE_PER_HART 524288

/* C entry points */
void main(void);
void do_trap(uint32_t cause, uint32_t epc);
Expand All @@ -29,6 +31,12 @@ __attribute__((naked, section(".text.prologue"))) void _entry(void)
/* Initialize Global Pointer (gp) and Stack Pointer (sp) */
"la gp, _gp\n"
"la sp, _stack\n"
/* Set up stack for each hart */
"csrr t0, mhartid\n" /* t0 = hartid */
"la t1, _stack_top\n" /* t1 = base address of full stack region (top) */
"li t2, %2\n" /* t2 = per-hart stack size */
"mul t0, t0, t2\n" /* t0 = hartid * STACK_SIZE_PER_HART */
"sub sp, t1, t0\n" /* sp = _stack_top - hartid * stack_size */

/* Initialize Thread Pointer (tp). The ABI requires tp to point to
* a 64-byte aligned memory region for thread-local storage. Here, we
Expand Down Expand Up @@ -62,10 +70,6 @@ __attribute__((naked, section(".text.prologue"))) void _entry(void)
"csrw mideleg, zero\n" /* No interrupt delegation to S-mode */
"csrw medeleg, zero\n" /* No exception delegation to S-mode */

/* Park secondary harts (cores) - only hart 0 continues */
"csrr t0, mhartid\n"
"bnez t0, .Lpark_hart\n"

/* Set the machine trap vector (mtvec) to point to our ISR */
"la t0, _isr\n"
"csrw mtvec, t0\n"
Expand All @@ -79,17 +83,14 @@ __attribute__((naked, section(".text.prologue"))) void _entry(void)
"csrw mie, t0\n"

/* Jump to the C-level main function */
"csrr a0, mhartid\n"
"call main\n"

/* If main() ever returns, it is a fatal error */
"call hal_panic\n"

".Lpark_hart:\n"
"wfi\n"
"j .Lpark_hart\n"

: /* no outputs */
: "i"(MSTATUS_MPP_MACH), "i"(MIE_MEIE)
: "i"(MSTATUS_MPP_MACH), "i"(MIE_MEIE), "i"(STACK_SIZE_PER_HART)
: "memory");
}

Expand Down
6 changes: 3 additions & 3 deletions arch/riscv/build.mk
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ DEFINES := -DF_CPU=$(F_CLK) \
-DF_TIMER=$(F_TICK) \
-include config.h

ASFLAGS = -march=rv32imzicsr -mabi=ilp32
ASFLAGS = -march=rv32imzaicsr -mabi=ilp32
CFLAGS += -Wall -Wextra -Wshadow -Wno-unused-parameter -Werror
CFLAGS += -O2 -std=gnu99
CFLAGS += -march=rv32imzicsr -mabi=ilp32
CFLAGS += -march=rv32imazicsr -mabi=ilp32
CFLAGS += -mstrict-align -ffreestanding -nostdlib -fomit-frame-pointer
CFLAGS += $(INC_DIRS) $(DEFINES) -fdata-sections -ffunction-sections
ARFLAGS = r
Expand Down Expand Up @@ -49,4 +49,4 @@ $(BUILD_KERNEL_DIR)/%.o: $(ARCH_DIR)/%.c | $(BUILD_DIR)

run:
@$(call notice, Ready to launch Linmo kernel + application.)
$(Q)qemu-system-riscv32 -machine virt -nographic -bios none -kernel $(BUILD_DIR)/image.elf -nographic
$(Q)qemu-system-riscv32 -smp 4 -machine virt -nographic -bios none -kernel $(BUILD_DIR)/image.elf -nographic
20 changes: 12 additions & 8 deletions arch/riscv/hal.c
Original file line number Diff line number Diff line change
Expand Up @@ -136,11 +136,14 @@ static inline uint64_t mtime_r(void)
/* Safely read the 64-bit 'mtimecmp' register */
static inline uint64_t mtimecmp_r(void)
{
uint32_t hi, lo;
uint32_t hi, lo, hartid;

hartid = read_csr(mhartid);

do {
hi = MTIMECMP_H;
lo = MTIMECMP_L;
} while (hi != MTIMECMP_H);
hi = * (volatile uint32_t *) (CLINT_BASE + 0x4004u + 8 * hartid);
lo = * (volatile uint32_t *) (CLINT_BASE + 0x4000u + 8 * hartid);
} while (hi != *(volatile uint32_t *) (CLINT_BASE + 0x4004u + 8 * hartid));
return CT64(hi, lo);
}

Expand All @@ -157,10 +160,11 @@ static inline void mtimecmp_w(uint64_t val)
/* Disable timer interrupts during the critical section */
uint32_t old_mie = read_csr(mie);
write_csr(mie, old_mie & ~MIE_MTIE);
uint32_t hartid = read_csr(mhartid);

MTIMECMP_L = 0xFFFFFFFF; /* Set to maximum to prevent spurious interrupt */
MTIMECMP_H = (uint32_t) (val >> 32); /* Set high word */
MTIMECMP_L = (uint32_t) val; /* Set low word to final value */
* (volatile uint32_t *) (CLINT_BASE + 0x4000u + 8 * hartid) = 0xFFFFFFFF; /* Set to maximum to prevent spurious interrupt */
* (volatile uint32_t *) (CLINT_BASE + 0x4004u + 8 * hartid) = (uint32_t) (val >> 32); /* Set high word */
* (volatile uint32_t *) (CLINT_BASE + 0x4000u + 8 * hartid) = (uint32_t) val; /* Set low word to final value */

/* Re-enable timer interrupts if they were previously enabled */
write_csr(mie, old_mie);
Expand Down Expand Up @@ -320,7 +324,7 @@ void hal_timer_disable(void)
*/
void hal_interrupt_tick(void)
{
tcb_t *task = kcb->task_current->data;
tcb_t *task = get_task_current(kcb)->data;
if (unlikely(!task))
hal_panic(); /* Fatal error - invalid task state */

Expand Down
74 changes: 74 additions & 0 deletions arch/riscv/spinlock.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
#pragma once

#include <hal.h>

/* Spinlock structure */
typedef struct {
volatile uint32_t lock;
} spinlock_t;

#define SPINLOCK_INITIALIZER { 0 }

/* Save and restore interrupt state */
static inline uint32_t intr_save(void)
{
uint32_t mstatus_val = read_csr(mstatus);
_di();
return mstatus_val;
}

static inline void intr_restore(uint32_t mstatus_val)
{
write_csr(mstatus, mstatus_val);
}

/* CPU relax */
static inline void cpu_relax(void)
{
asm volatile("nop");
}

/* Basic spinlock API */
static inline void spin_lock(spinlock_t *lock)
{
while (__sync_lock_test_and_set(&lock->lock, 1)) {
while (lock->lock)
cpu_relax();
}
}

static inline void spin_unlock(spinlock_t *lock)
{
__sync_lock_release(&lock->lock);
}

static inline int spin_trylock(spinlock_t *lock)
{
return (__sync_lock_test_and_set(&lock->lock, 1) == 0);
}

/* IRQ-safe spinlock (no state saving) */
static inline void spin_lock_irq(spinlock_t *lock)
{
_di();
spin_lock(lock);
}

static inline void spin_unlock_irq(spinlock_t *lock)
{
spin_unlock(lock);
_ei();
}

/* IRQ-safe spinlock (with state saving) */
static inline void spin_lock_irqsave(spinlock_t *lock, uint32_t *flags)
{
*flags = intr_save();
spin_lock(lock);
}

static inline void spin_unlock_irqrestore(spinlock_t *lock, uint32_t flags)
{
spin_unlock(lock);
intr_restore(flags);
}
62 changes: 20 additions & 42 deletions include/sys/task.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
*/

#include <hal.h>
#include <spinlock.h>
#include <lib/list.h>
#include <lib/queue.h>

Expand Down Expand Up @@ -84,6 +85,8 @@ typedef struct tcb {
void *rt_prio; /* Opaque pointer for custom real-time scheduler hook */
} tcb_t;

#define MAX_HARTS 8

/* Kernel Control Block (KCB)
*
* Singleton structure holding global kernel state, including task lists,
Expand All @@ -92,7 +95,7 @@ typedef struct tcb {
typedef struct {
/* Task Management */
list_t *tasks; /* Master list of all tasks (nodes contain tcb_t) */
list_node_t *task_current; /* Node of currently running task */
list_node_t *task_current[MAX_HARTS]; /* Node of currently running task */
jmp_buf context; /* Saved context of main kernel thread before scheduling */
uint16_t next_tid; /* Monotonically increasing ID for next new task */
uint16_t task_count; /* Cached count of active tasks for quick access */
Expand All @@ -104,6 +107,9 @@ typedef struct {
/* Timer Management */
list_t *timer_list; /* List of active software timers */
volatile uint32_t ticks; /* Global system tick, incremented by timer */
/* Timers */

spinlock_t kcb_lock;
} kcb_t;

/* Global pointer to the singleton Kernel Control Block */
Expand All @@ -117,49 +123,21 @@ extern kcb_t *kcb;
#define TASK_CACHE_SIZE \
4 /* Task lookup cache size for frequently accessed tasks */

/* Critical Section Macros
*
* Two levels of protection are provided:
* 1. CRITICAL_* macros disable ALL maskable interrupts globally
* 2. NOSCHED_* macros disable ONLY the scheduler timer interrupt
*/
/* Core Kernel and Task Management API */

/* Disable/enable ALL maskable interrupts globally.
* Provides strongest protection against concurrency from both other tasks
* and all ISRs. Use when modifying data shared with any ISR.
* WARNING: Increases interrupt latency - use NOSCHED macros if protection
* is only needed against task preemption.
*/
#define CRITICAL_ENTER() \
do { \
if (kcb->preemptive) \
_di(); \
} while (0)

#define CRITICAL_LEAVE() \
do { \
if (kcb->preemptive) \
_ei(); \
} while (0)

/* Disable/enable ONLY the scheduler timer interrupt.
* Lighter-weight critical section that prevents task preemption but allows
* other hardware interrupts (e.g., UART) to be serviced, minimizing latency.
* Use when protecting data shared between tasks.
*/
#define NOSCHED_ENTER() \
do { \
if (kcb->preemptive) \
hal_timer_disable(); \
} while (0)

#define NOSCHED_LEAVE() \
do { \
if (kcb->preemptive) \
hal_timer_enable(); \
} while (0)
static inline list_node_t *get_task_current()
{
const uint32_t mhartid = read_csr(mhartid);

/* Core Kernel and Task Management API */
return kcb->task_current[mhartid];
}

static inline void set_task_current(list_node_t *task)
{
const uint32_t mhartid = read_csr(mhartid);

kcb->task_current[mhartid] = task;
}

/* System Control Functions */

Expand Down
58 changes: 45 additions & 13 deletions kernel/main.c
Original file line number Diff line number Diff line change
@@ -1,9 +1,19 @@
#include <hal.h>
#include <spinlock.h>
#include <lib/libc.h>
#include <sys/task.h>

#include "private/error.h"

static void idle_task(void)
{
while (1)
mo_task_wfi();
}

static volatile bool finish = false;
static spinlock_t finish_lock = SPINLOCK_INITIALIZER;

/* C-level entry point for the kernel.
*
* This function is called from the boot code ('_entry'). It is responsible for
Expand All @@ -12,42 +22,64 @@
*
* Under normal operation, this function never returns.
*/
int32_t main(void)
int32_t main(int32_t hartid)
{
/* Initialize hardware abstraction layer and memory heap. */
hal_hardware_init();

printf("Linmo kernel is starting...\n");
if (hartid == 0) {
printf("Linmo kernel is starting...\n");

mo_heap_init((void *) &_heap_start, (size_t) &_heap_size);
printf("Heap initialized, %u bytes available\n",
(unsigned int) (size_t) &_heap_size);

mo_heap_init((void *) &_heap_start, (size_t) &_heap_size);
printf("Heap initialized, %u bytes available\n",
(unsigned int) (size_t) &_heap_size);
/* Call the application's main entry point to create initial tasks. */
kcb->preemptive = (bool) app_main();
printf("Scheduler mode: %s\n",
kcb->preemptive ? "Preemptive" : "Cooperative");

/* Call the application's main entry point to create initial tasks. */
kcb->preemptive = (bool) app_main();
printf("Scheduler mode: %s\n",
kcb->preemptive ? "Preemptive" : "Cooperative");
spin_lock(&finish_lock);
finish = true;
spin_unlock(&finish_lock);
}

/* Make sure hardware initialize before running the first task. */
while (1) {
spin_lock(&finish_lock);
if (finish)
break;
spin_unlock(&finish_lock);
}
spin_unlock(&finish_lock);

mo_task_spawn(idle_task, DEFAULT_STACK_SIZE);

/* Verify that the application created at least one task.
* If 'kcb->task_current' is still NULL, it means mo_task_spawn was never
* If 'get_task_current()' is still NULL, it means mo_task_spawn was never
* successfully called.
*/
if (!kcb->task_current)
if (!get_task_current())
panic(ERR_NO_TASKS);

/* Save the kernel's context. This is a formality to establish a base
* execution context before launching the first real task.
*/
setjmp(kcb->context);

spin_lock(&finish_lock);

/* Launch the first task.
* 'kcb->task_current' was set by the first call to mo_task_spawn.
* 'get_task_current()' was set by the first call to mo_task_spawn.
* This function transfers control and does not return.
*/
tcb_t *first_task = kcb->task_current->data;

tcb_t *first_task = get_task_current()->data;
if (!first_task)
panic(ERR_NO_TASKS);

spin_unlock(&finish_lock);

hal_dispatch_init(first_task->context);

/* This line should be unreachable. */
Expand Down
Loading