diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index a768151bf389a..e10c286a83301 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -361,6 +361,7 @@ config RISCV_PMP select THREAD_LOCAL_STORAGE if USERSPACE select ARCH_MEM_DOMAIN_SUPPORTS_ISOLATED_STACKS select MEM_DOMAIN_ISOLATED_STACKS + select PMP_KERNEL_MODE_DYNAMIC if MEM_ATTR help MCU implements Physical Memory Protection. @@ -414,6 +415,7 @@ endif #RISCV_PMP config PMP_STACK_GUARD def_bool y depends on HW_STACK_PROTECTION + select PMP_KERNEL_MODE_DYNAMIC if MULTITHREADING config PMP_STACK_GUARD_MIN_SIZE int "Stack Guard area size" @@ -430,6 +432,12 @@ config PMP_STACK_GUARD_MIN_SIZE wiggle room to accommodate the eventual overflow exception stack usage. +config PMP_KERNEL_MODE_DYNAMIC + bool + help + Enable this to dynamically reconfigure and activate PMP entries for + Machine mode when switching between kernel (ISR, syscall) and threads. + # Implement the null pointer detection using the Physical Memory Protection # (PMP) Unit. config NULL_POINTER_EXCEPTION_DETECTION_PMP diff --git a/arch/riscv/core/fatal.c b/arch/riscv/core/fatal.c index de94363051a77..1f40c81fc37e6 100644 --- a/arch/riscv/core/fatal.c +++ b/arch/riscv/core/fatal.c @@ -221,12 +221,12 @@ void z_riscv_fault(struct arch_esf *esf) unsigned int reason = K_ERR_CPU_EXCEPTION; if (bad_stack_pointer(esf)) { -#ifdef CONFIG_PMP_STACK_GUARD +#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING) /* * Remove the thread's PMP setting to prevent triggering a stack * overflow error again due to the previous configuration. */ - z_riscv_pmp_stackguard_disable(); + z_riscv_pmp_kernelmode_disable(); #endif /* CONFIG_PMP_STACK_GUARD */ reason = K_ERR_STACK_CHK_FAIL; } diff --git a/arch/riscv/core/isr.S b/arch/riscv/core/isr.S index 6eeaaa7d26993..dfd68660ec0ca 100644 --- a/arch/riscv/core/isr.S +++ b/arch/riscv/core/isr.S @@ -368,19 +368,19 @@ no_fp: /* increment _current->arch.exception_depth */ li t1, RISCV_EXC_ECALLU beq t0, t1, is_user_syscall -#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING) +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC /* * Determine if we come from user space. If so, reconfigure the PMP for - * kernel mode stack guard. + * kernel mode configuration. */ csrr t0, mstatus li t1, MSTATUS_MPP and t0, t0, t1 bnez t0, 1f lr a0, ___cpu_t_current_OFFSET(s0) - call z_riscv_pmp_stackguard_enable + call z_riscv_pmp_kernelmode_enable 1: -#endif /* CONFIG_PMP_STACK_GUARD */ +#endif /* CONFIG_PMP_KERNEL_MODE_DYNAMIC */ #endif /* CONFIG_USERSPACE */ @@ -422,7 +422,7 @@ is_kernel_syscall: addi t0, t0, 4 sr t0, __struct_arch_esf_mepc_OFFSET(sp) -#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING) +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC /* Re-activate PMP for m-mode */ li t1, MSTATUS_MPP csrc mstatus, t1 @@ -515,13 +515,13 @@ do_irq_offload: #ifdef CONFIG_USERSPACE is_user_syscall: -#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING) +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC /* * We came from userspace and need to reconfigure the - * PMP for kernel mode stack guard. + * PMP for kernel mode configuration. */ lr a0, ___cpu_t_current_OFFSET(s0) - call z_riscv_pmp_stackguard_enable + call z_riscv_pmp_kernelmode_enable #endif /* It is safe to re-enable IRQs now */ @@ -585,18 +585,18 @@ valid_syscall_id: is_interrupt: -#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING) +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC #ifdef CONFIG_USERSPACE /* * If we came from userspace then we need to reconfigure the - * PMP for kernel mode stack guard. + * PMP for kernel mode configuration. */ lr t0, __struct_arch_esf_mstatus_OFFSET(sp) li t1, MSTATUS_MPP and t0, t0, t1 bnez t0, 1f lr a0, ___cpu_t_current_OFFSET(s0) - call z_riscv_pmp_stackguard_enable + call z_riscv_pmp_kernelmode_enable j 2f #endif /* CONFIG_USERSPACE */ 1: /* Re-activate PMP for m-mode */ @@ -769,8 +769,8 @@ fp_trap_exit: and t0, t2, t1 bnez t0, 1f -#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING) - /* Remove kernel stack guard and Reconfigure PMP for user mode */ +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC + /* Remove kernel mode configuration and Reconfigure PMP for user mode */ lr a0, ___cpu_t_current_OFFSET(s0) call z_riscv_pmp_usermode_enable #endif diff --git a/arch/riscv/core/pmp.c b/arch/riscv/core/pmp.c index 182302d75cd65..7d1598a20e567 100644 --- a/arch/riscv/core/pmp.c +++ b/arch/riscv/core/pmp.c @@ -15,22 +15,26 @@ * cycles on corresponding CSR registers. Relevant CSR registers are always * written in batch from their shadow copy in RAM for better efficiency. * - * In the stackguard case we keep an m-mode copy for each thread. Each user + * In the kernel mode case we keep an m-mode copy for each thread. Each user * mode threads also has a u-mode copy. This makes faster context switching * as precomputed content just have to be written to actual registers with * no additional processing. * * Thread-specific m-mode and u-mode PMP entries start from the PMP slot - * indicated by global_pmp_end_index. Lower slots are used by global entries - * which are never modified. + * indicated by global_pmp_end_index[M_MODE] and global_pmp_end_index[U_MODE], + * respectively. Lower slots are used by global entries which are never + * modified. */ +#include "zephyr/toolchain.h" #include #include #include #include #include #include +#include +#include #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL #include @@ -54,6 +58,45 @@ LOG_MODULE_REGISTER(mpu); #define PMP_NONE 0 +/** + * @brief Decodes PMP configuration and address registers into a memory region's + * start/end addresses. + * + * @param cfg_byte The PMP configuration byte (pmpcfg_n). + * @param pmp_addr A pointer to the full array of PMP address registers (pmpaddr_n). + * @param index The current PMP entry index. + * @param start Pointer to where the calculated start address should be stored. + * @param end Pointer to where the calculated end address should be stored. + */ +IF_DISABLED(CONFIG_ZTEST, (static)) +void pmp_decode_region(uint8_t cfg_byte, unsigned long *pmp_addr, unsigned int index, + unsigned long *start, unsigned long *end) +{ + unsigned long tmp; + unsigned long pmp_addr_val = pmp_addr[index]; + unsigned long pmp_prev_addr_val = (index == 0) ? 0 : pmp_addr[index - 1]; + + switch (cfg_byte & PMP_A) { + case PMP_TOR: + *start = (index == 0) ? 0 : (pmp_prev_addr_val << 2); + *end = (pmp_addr_val << 2) - 1; + break; + case PMP_NA4: + *start = pmp_addr_val << 2; + *end = *start + 3; + break; + case PMP_NAPOT: + tmp = (pmp_addr_val << 2) | 0x3; + *start = tmp & (tmp + 1); + *end = tmp | (tmp + 1); + break; + default: + *start = 0; + *end = 0; + break; + } +} + static void print_pmp_entries(unsigned int pmp_start, unsigned int pmp_end, unsigned long *pmp_addr, unsigned long *pmp_cfg, const char *banner) @@ -63,27 +106,9 @@ static void print_pmp_entries(unsigned int pmp_start, unsigned int pmp_end, LOG_DBG("PMP %s:", banner); for (index = pmp_start; index < pmp_end; index++) { - unsigned long start, end, tmp; + unsigned long start, end; - switch (pmp_n_cfg[index] & PMP_A) { - case PMP_TOR: - start = (index == 0) ? 0 : (pmp_addr[index - 1] << 2); - end = (pmp_addr[index] << 2) - 1; - break; - case PMP_NA4: - start = pmp_addr[index] << 2; - end = start + 3; - break; - case PMP_NAPOT: - tmp = (pmp_addr[index] << 2) | 0x3; - start = tmp & (tmp + 1); - end = tmp | (tmp + 1); - break; - default: - start = 0; - end = 0; - break; - } + pmp_decode_region(pmp_n_cfg[index], pmp_addr, index, &start, &end); if (end == 0) { LOG_DBG("%3d: "PR_ADDR" 0x%02x", index, @@ -112,7 +137,8 @@ static void print_pmp_entries(unsigned int pmp_start, unsigned int pmp_end, * @param pmp_cfg Pointer to the array where the CSR contents will be stored. * @param pmp_cfg_size The size of the pmp_cfg array, measured in unsigned long entries. */ -static inline void z_riscv_pmp_read_config(unsigned long *pmp_cfg, size_t pmp_cfg_size) +IF_DISABLED(CONFIG_ZTEST, (static inline)) +void z_riscv_pmp_read_config(unsigned long *pmp_cfg, size_t pmp_cfg_size) { __ASSERT(pmp_cfg_size == (size_t)(CONFIG_PMP_SLOTS / PMPCFG_STRIDE), "Invalid PMP config array size"); @@ -178,7 +204,8 @@ static inline void z_riscv_pmp_write_config(unsigned long *pmp_cfg, size_t pmp_c * @param pmp_addr Pointer to the array where the CSR contents will be stored. * @param pmp_addr_size The size of the pmp_addr array, measured in unsigned long entries. */ -static inline void z_riscv_pmp_read_addr(unsigned long *pmp_addr, size_t pmp_addr_size) +IF_DISABLED(CONFIG_ZTEST, (static inline)) +void z_riscv_pmp_read_addr(unsigned long *pmp_addr, size_t pmp_addr_size) { __ASSERT(pmp_addr_size == (size_t)(CONFIG_PMP_SLOTS), "PMP address array size mismatch"); @@ -270,7 +297,7 @@ static bool set_pmp_entry(unsigned int *index_p, uint8_t perm, return ok; } -#ifdef CONFIG_PMP_STACK_GUARD +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC static inline bool set_pmp_mprv_catchall(unsigned int *index_p, unsigned long *pmp_addr, unsigned long *pmp_cfg, unsigned int index_limit) @@ -298,7 +325,7 @@ static inline bool set_pmp_mprv_catchall(unsigned int *index_p, return ok; } -#endif /* CONFIG_PMP_STACK_GUARD */ +#endif /* CONFIG_PMP_KERNEL_MODE_DYNAMIC */ /** * @brief Write a range of PMP entries to corresponding PMP registers @@ -382,6 +409,58 @@ static void write_pmp_entries(unsigned int start, unsigned int end, pmp_addr, pmp_cfg); } +#ifdef CONFIG_MEM_ATTR +/** + * @brief Install PMP entries from devicetree mem-attr regions. + * + * Iterates over devicetree-provided memory-attr regions and programs PMP + * via set_pmp_entry(). Ordering matters because PMP checks entries from lowest + * to highest index and uses the first entry that matches the address. + * + * @param index_p Location of the current PMP slot index to use. This index + * will be updated according to the number of slots used. + * @param pmp_addr Array of pmpaddr values (starting at entry 0). + * @param pmp_cfg Array of pmpcfg values (starting at entry 0). + * @param index_limit Index value representing the size of the provided arrays. + * @return Number of PMP slots consumed by installed mem-attr regions. + * + * @note DT_MEM_RISCV_TYPE_IO_X Limitation: + * Since the current PMP entries are non-locked, the eXecute (X) + * permission restriction applied by DT_MEM_RISCV_TYPE_IO_X does + * not prevent execution in higher privilege modes (M-mode/kernel). + * This is because the mstatus.MPRV register bit only affects + * M-mode load/store operations, not instruction fetches. + * The execute restriction still applies to User mode because PMP + * is always enforced for lower privilege modes. + */ +static unsigned int set_pmp_mem_attr(unsigned int *index_p, + unsigned long *pmp_addr, unsigned long *pmp_cfg, + unsigned int index_limit) +{ + const struct mem_attr_region_t *region; + unsigned int entry_cnt = *index_p; + size_t num_regions; + + num_regions = mem_attr_get_regions(®ion); + + for (size_t idx = 0; idx < num_regions; ++idx) { + + uint8_t perm = DT_MEM_RISCV_TO_PMP_PERM(region[idx].dt_attr); + + if (perm || (region[idx].dt_attr & DT_MEM_RISCV_TYPE_EMPTY)) { + set_pmp_entry(index_p, perm, + (uintptr_t)(region[idx].dt_addr), + (size_t)(region[idx].dt_size), + pmp_addr, pmp_cfg, index_limit); + } + } + + entry_cnt = *index_p - entry_cnt; + + return entry_cnt; +} +#endif /* CONFIG_MEM_ATTR */ + /** * @brief Abstract the last 3 arguments to set_pmp_entry() and * write_pmp_entries( for m-mode. @@ -402,16 +481,34 @@ static void write_pmp_entries(unsigned int start, unsigned int end, /* * Stores the initial values of the pmpcfg CSRs, covering all global - * m-mode PMP entries. This array is sized to hold all pmpcfg registers - * necessary for CONFIG_PMP_SLOTS. It is used to seed the per-thread - * PMP configuration copies. Locked entries aren't modifiable but - * we could have non-locked entries here too. + * m-mode and u-mode PMP entries. This array is sized to hold all pmpcfg + * registers necessary for CONFIG_PMP_SLOTS. It is used to seed the + * per-thread PMP configuration copies. Locked entries aren't modifiable + * but we could have non-locked entries here too. */ static unsigned long global_pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE]; -static unsigned long global_pmp_last_addr; -/* End of global PMP entry range */ -static unsigned int global_pmp_end_index; +/* + * Defines an enumeration for PMP operating modes (M or U) to index the + * PMP state arrays. + */ +enum pmp_mode { + M_MODE = 0, +#if defined(CONFIG_USERSPACE) + U_MODE, +#endif /* CONFIG_USERSPACE */ + MODE_TOTAL +}; + +/* Last address of global PMP entry range for each mode (M or U). */ +static unsigned long global_pmp_last_addr[MODE_TOTAL]; +/* End of global PMP entry range for each mode (M or U). */ +static unsigned int global_pmp_end_index[MODE_TOTAL]; + +#if defined(CONFIG_MEM_ATTR) && defined(CONFIG_USERSPACE) +/* Stores the initial pmpaddr values for the memory attribute region. */ +static unsigned long mem_attr_pmp_addr[CONFIG_PMP_SLOTS]; +#endif /** * @Brief Initialize the PMP with global entries on each CPU @@ -419,8 +516,11 @@ static unsigned int global_pmp_end_index; void z_riscv_pmp_init(void) { unsigned long pmp_addr[CONFIG_PMP_SLOTS]; - unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE]; + unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE] = {0}; unsigned int index = 0; + unsigned int attr_cnt = 0; + + ARG_UNUSED(attr_cnt); #ifdef CONFIG_NULL_POINTER_EXCEPTION_DETECTION_PMP /* @@ -450,23 +550,6 @@ void z_riscv_pmp_init(void) (uintptr_t)z_interrupt_stacks[_current_cpu->id], Z_RISCV_STACK_GUARD_SIZE, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); - - /* - * This early, the kernel init code uses the IRQ stack and we want to - * safeguard it as soon as possible. But we need a temporary default - * "catch all" PMP entry for MPRV to work. Later on, this entry will - * be set for each thread by z_riscv_pmp_stackguard_prepare(). - */ - set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); - - /* Write those entries to PMP regs. */ - write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); - - /* Activate our non-locked PMP entries for m-mode */ - csr_set(mstatus, MSTATUS_MPRV); - - /* And forget about that last entry as we won't need it later */ - index--; #else /* Without multithreading setup stack guards for IRQ and main stacks */ set_pmp_entry(&index, PMP_NONE | PMP_L, @@ -479,11 +562,39 @@ void z_riscv_pmp_init(void) Z_RISCV_STACK_GUARD_SIZE, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); +#endif /* CONFIG_MULTITHREADING */ +#ifdef CONFIG_SMP + unsigned int irq_index = index; +#endif /* CONFIG_SMP */ +#endif + +#ifdef CONFIG_MEM_ATTR + /* + * Set the memory attribute region as temporary PMP entries for early + * kernel initialization. This provides essential protection before + * the kernel mode memory attribute permission is fully operational. + */ + attr_cnt = set_pmp_mem_attr(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); + + /* + * This early, we want to protect unlock PMP entries as soon as + * possible. But we need a temporary default "catch all" PMP entry for + * MPRV to work. Later on, this entry will be set for each thread by + * z_riscv_pmp_kernelmode_prepare(). + */ + set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); + /* Write those entries to PMP regs. */ write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); -#endif /* CONFIG_MULTITHREADING */ + + /* Activate our non-locked PMP entries for m-mode */ + csr_clear(mstatus, MSTATUS_MPP); + csr_set(mstatus, MSTATUS_MPRV); + + /* And forget about that last entry as we won't need it later */ + index--; #else - /* Write those entries to PMP regs. */ + /* Write those entries to PMP regs. */ write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); #endif @@ -494,21 +605,36 @@ void z_riscv_pmp_init(void) * Make sure TOR entry sharing won't be attempted with it by * remembering a bogus address for those entries. */ - pmp_addr[index - 1] = -1L; + pmp_addr[irq_index - 1] = -1L; #endif /* Make sure secondary CPUs produced the same values */ - if (global_pmp_end_index != 0) { - __ASSERT(global_pmp_end_index == index, ""); + if (global_pmp_end_index[M_MODE] != 0) { + __ASSERT(global_pmp_end_index[M_MODE] == index, ""); __ASSERT(global_pmp_cfg[index / PMPCFG_STRIDE] == pmp_cfg[index / PMPCFG_STRIDE], ""); - __ASSERT(global_pmp_last_addr == pmp_addr[index - 1], ""); + __ASSERT(global_pmp_last_addr[M_MODE] == pmp_addr[index - 1], ""); } #endif memcpy(global_pmp_cfg, pmp_cfg, sizeof(pmp_cfg)); - global_pmp_last_addr = pmp_addr[index - 1]; - global_pmp_end_index = index; + global_pmp_last_addr[M_MODE] = pmp_addr[index - 1]; + global_pmp_end_index[M_MODE] = index; + +#ifdef CONFIG_USERSPACE + global_pmp_last_addr[U_MODE] = pmp_addr[index - attr_cnt - 1]; + global_pmp_end_index[U_MODE] = index - attr_cnt; +#endif /* CONFIG_USERSPACE */ + +#if defined(CONFIG_MEM_ATTR) && defined(CONFIG_USERSPACE) + /* + * Copy the memory attribute pmpaddr entries to the global buffer. + * These kernel mode pmpaddr entries are saved for restoration when + * switching back from user mode. + */ + memcpy(mem_attr_pmp_addr, &pmp_addr[global_pmp_end_index[U_MODE]], + attr_cnt * PMPCFG_STRIDE); +#endif if (PMP_DEBUG_DUMP) { dump_pmp_regs("initial register dump"); @@ -518,9 +644,9 @@ void z_riscv_pmp_init(void) /** * @Brief Initialize the per-thread PMP register copy with global values. */ -#if (defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || defined(CONFIG_USERSPACE) -static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr, - unsigned long *pmp_cfg, +#if defined(CONFIG_PMP_KERNEL_MODE_DYNAMIC) || defined(CONFIG_USERSPACE) +static inline unsigned int z_riscv_pmp_thread_init(enum pmp_mode mode, + unsigned long *pmp_addr, unsigned long *pmp_cfg, unsigned int index_limit) { ARG_UNUSED(index_limit); @@ -534,23 +660,38 @@ static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr, * Retrieve the pmpaddr value matching the last global PMP slot. * This is so that set_pmp_entry() can safely attempt TOR with it. */ - pmp_addr[global_pmp_end_index - 1] = global_pmp_last_addr; + unsigned int pmp_end_index = global_pmp_end_index[mode]; - return global_pmp_end_index; -} + pmp_addr[pmp_end_index - 1] = global_pmp_last_addr[mode]; + +#if defined(CONFIG_MEM_ATTR) && defined(CONFIG_USERSPACE) + /* + * This block restores the PMP entries used for memory attributes (set in + * mem_attr_pmp_addr) that were overwritten when switching from user mode + * back to kernel mode. It only applies when running in M_MODE pmp mode. + */ + if (mode == M_MODE) { + memcpy(&pmp_addr[global_pmp_end_index[U_MODE]], mem_attr_pmp_addr, + (global_pmp_end_index[M_MODE] - global_pmp_end_index[U_MODE]) * + PMPCFG_STRIDE); + } #endif -#ifdef CONFIG_PMP_STACK_GUARD + return pmp_end_index; +} +#endif -#ifdef CONFIG_MULTITHREADING +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC /** - * @brief Prepare the PMP stackguard content for given thread. + * @brief Prepare the PMP kernelmode content for given thread. * * This is called once during new thread creation. */ -void z_riscv_pmp_stackguard_prepare(struct k_thread *thread) +void z_riscv_pmp_kernelmode_prepare(struct k_thread *thread) { - unsigned int index = z_riscv_pmp_thread_init(PMP_M_MODE(thread)); + unsigned int index = z_riscv_pmp_thread_init(M_MODE, PMP_M_MODE(thread)); + +#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING) uintptr_t stack_bottom; /* make the bottom addresses of our stack inaccessible */ @@ -561,10 +702,12 @@ void z_riscv_pmp_stackguard_prepare(struct k_thread *thread) } else if (z_stack_is_user_capable(thread->stack_obj)) { stack_bottom = thread->stack_info.start - K_THREAD_STACK_RESERVED; } -#endif +#endif /* CONFIG_USERSPACE */ set_pmp_entry(&index, PMP_NONE, stack_bottom, Z_RISCV_STACK_GUARD_SIZE, PMP_M_MODE(thread)); +#endif /* CONFIG_PMP_STACK_GUARD */ + set_pmp_mprv_catchall(&index, PMP_M_MODE(thread)); /* remember how many entries we use */ @@ -572,13 +715,13 @@ void z_riscv_pmp_stackguard_prepare(struct k_thread *thread) } /** - * @brief Write PMP stackguard content to actual PMP registers + * @brief Write PMP kernel mode content to actual PMP registers * * This is called on every context switch. */ -void z_riscv_pmp_stackguard_enable(struct k_thread *thread) +void z_riscv_pmp_kernelmode_enable(struct k_thread *thread) { - LOG_DBG("pmp_stackguard_enable for thread %p", thread); + LOG_DBG("pmp_kernelmode_enable for thread %p", thread); /* * Disable (non-locked) PMP entries for m-mode while we update them. @@ -588,9 +731,17 @@ void z_riscv_pmp_stackguard_enable(struct k_thread *thread) csr_clear(mstatus, MSTATUS_MPRV | MSTATUS_MPP); /* Write our m-mode MPP entries */ - write_pmp_entries(global_pmp_end_index, thread->arch.m_mode_pmp_end_index, +#ifdef CONFIG_USERSPACE + write_pmp_entries(global_pmp_end_index[U_MODE], + thread->arch.m_mode_pmp_end_index, + false /* no need to clear to the end */, + PMP_M_MODE(thread)); +#else + write_pmp_entries(global_pmp_end_index[M_MODE], + thread->arch.m_mode_pmp_end_index, false /* no need to clear to the end */, PMP_M_MODE(thread)); +#endif /* CONFIG_USERSPACE */ if (PMP_DEBUG_DUMP) { dump_pmp_regs("m-mode register dump"); @@ -600,20 +751,18 @@ void z_riscv_pmp_stackguard_enable(struct k_thread *thread) csr_set(mstatus, MSTATUS_MPRV); } -#endif /* CONFIG_MULTITHREADING */ - /** - * @brief Remove PMP stackguard content to actual PMP registers + * @brief Remove PMP kernel mode content to actual PMP registers */ -void z_riscv_pmp_stackguard_disable(void) +void z_riscv_pmp_kernelmode_disable(void) { unsigned long pmp_addr[CONFIG_PMP_SLOTS]; unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE]; - unsigned int index = global_pmp_end_index; + unsigned int index = global_pmp_end_index[M_MODE]; /* Retrieve the pmpaddr value matching the last global PMP slot. */ - pmp_addr[global_pmp_end_index - 1] = global_pmp_last_addr; + pmp_addr[index - 1] = global_pmp_last_addr[M_MODE]; /* Disable (non-locked) PMP entries for m-mode while we update them. */ csr_clear(mstatus, MSTATUS_MPRV); @@ -625,15 +774,14 @@ void z_riscv_pmp_stackguard_disable(void) set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); /* Write "catch all" entry and clear unlocked entries to PMP regs. */ - write_pmp_entries(global_pmp_end_index, index, + write_pmp_entries(global_pmp_end_index[M_MODE], index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr)); if (PMP_DEBUG_DUMP) { dump_pmp_regs("catch all register dump"); } } - -#endif /* CONFIG_PMP_STACK_GUARD */ +#endif /* CONFIG_PMP_KERNEL_MODE_DYNAMIC */ #ifdef CONFIG_USERSPACE @@ -655,7 +803,7 @@ void z_riscv_pmp_usermode_init(struct k_thread *thread) */ void z_riscv_pmp_usermode_prepare(struct k_thread *thread) { - unsigned int index = z_riscv_pmp_thread_init(PMP_U_MODE(thread)); + unsigned int index = z_riscv_pmp_thread_init(U_MODE, PMP_U_MODE(thread)); LOG_DBG("pmp_usermode_prepare for thread %p", thread); @@ -699,8 +847,52 @@ static void resync_pmp_domain(struct k_thread *thread, continue; } - ok = set_pmp_entry(&index, part->attr.pmp_attr, - part->start, part->size, PMP_U_MODE(thread)); +#ifdef CONFIG_MEM_ATTR + /* + * Determine whether the partition is covered by a memory + * attribute region. + * + * Constraint due to number of PMP entry limitation: + * The logic asserts against any cases that requires splitting + * a partition into multiple permissions, such as partial + * overlap or the partition fully containing the memory + * attribute region but not fully match. + * + * Supported cases: + * 1. Partition excludes all memory attribute regions + * The partition's permission is applied directly. + * 2. Partition is contained in a memory attribute region: + * The partition's permission is masked with the memory + * attribute. + */ + const struct mem_attr_region_t *region; + uint8_t attr_mask = PMP_R | PMP_W | PMP_X; + + for (int idx = 0; idx < mem_attr_get_regions(®ion); idx++) { + uintptr_t dt_start = (uintptr_t)(region[idx].dt_addr); + uintptr_t dt_end = dt_start + (size_t)(region[idx].dt_size); + bool covered = false; + + /* No overlap at all, skip this memory region */ + if ((part->start + part->size) <= dt_start || part->start >= dt_end) { + continue; + } + + /* Check if the partition is contained in the memory attribute region. */ + covered = part->start >= dt_start && (part->start + part->size) <= dt_end; + __ASSERT(covered, "No allowed partition partially overlaps memory region"); + + attr_mask = DT_MEM_RISCV_TO_PMP_PERM(region[idx].dt_attr); + break; + } + + ok = set_pmp_entry(&index, part->attr.pmp_attr & attr_mask, part->start, part->size, + PMP_U_MODE(thread)); +#else + ok = set_pmp_entry(&index, part->attr.pmp_attr, part->start, part->size, + PMP_U_MODE(thread)); +#endif + __ASSERT(ok, "no PMP slot left for %d remaining partitions in domain %p", remaining_partitions + 1, domain); @@ -736,13 +928,14 @@ void z_riscv_pmp_usermode_enable(struct k_thread *thread) resync_pmp_domain(thread, domain); } -#ifdef CONFIG_PMP_STACK_GUARD +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC /* Make sure m-mode PMP usage is disabled before we reprogram it */ csr_clear(mstatus, MSTATUS_MPRV); #endif /* Write our u-mode MPP entries */ - write_pmp_entries(global_pmp_end_index, thread->arch.u_mode_pmp_end_index, + write_pmp_entries(global_pmp_end_index[U_MODE], + thread->arch.u_mode_pmp_end_index, true /* must clear to the end */, PMP_U_MODE(thread)); @@ -756,7 +949,7 @@ int arch_mem_domain_max_partitions_get(void) int available_pmp_slots = CONFIG_PMP_SLOTS; /* remove those slots dedicated to global entries */ - available_pmp_slots -= global_pmp_end_index; + available_pmp_slots -= global_pmp_end_index[U_MODE]; /* * User thread stack mapping: diff --git a/arch/riscv/core/switch.S b/arch/riscv/core/switch.S index d177d92c84829..81292748f8382 100644 --- a/arch/riscv/core/switch.S +++ b/arch/riscv/core/switch.S @@ -61,16 +61,17 @@ SECTION_FUNC(TEXT, z_riscv_switch) mv a0, s0 #endif -#if defined(CONFIG_PMP_STACK_GUARD) - /* Stack guard has priority over user space for PMP usage. */ +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC + /* Kernel mode has priority over user space for PMP usage. */ mv s0, a0 - call z_riscv_pmp_stackguard_enable + call z_riscv_pmp_kernelmode_enable mv a0, s0 #elif defined(CONFIG_USERSPACE) /* - * When stackguard is not enabled, we need to configure the PMP only - * at context switch time as the PMP is not in effect while inm-mode. - * (it is done on every exception return otherwise). + * When kernel mode dynamic configuration is not enabled, we need to + * configure the PMP only at context switch time as the PMP is not + * in effect while in m-mode. (it is done on every exception return + * otherwise). */ lb t0, _thread_offset_to_user_options(a0) andi t0, t0, K_USER diff --git a/arch/riscv/core/thread.c b/arch/riscv/core/thread.c index 773bf51479a44..5cbfa821fff39 100644 --- a/arch/riscv/core/thread.c +++ b/arch/riscv/core/thread.c @@ -89,18 +89,18 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, /* Supervisor thread */ stack_init->mepc = (unsigned long)z_thread_entry; -#if defined(CONFIG_PMP_STACK_GUARD) +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC /* Enable PMP in mstatus.MPRV mode for RISC-V machine mode * if thread is supervisor thread. */ stack_init->mstatus |= MSTATUS_MPRV; -#endif /* CONFIG_PMP_STACK_GUARD */ +#endif /* CONFIG_PMP_KERNEL_MODE_DYNAMIC */ } -#if defined(CONFIG_PMP_STACK_GUARD) - /* Setup PMP regions of PMP stack guard of thread. */ - z_riscv_pmp_stackguard_prepare(thread); -#endif /* CONFIG_PMP_STACK_GUARD */ +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC + /* Setup PMP regions of kernel mode configuration of thread. */ + z_riscv_pmp_kernelmode_prepare(thread); +#endif /* CONFIG_PMP_KERNEL_MODE_DYNAMIC */ #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE stack_init->soc_context = soc_esf_init; @@ -176,9 +176,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, csr_write(mstatus, status); csr_write(mepc, z_thread_entry); -#ifdef CONFIG_PMP_STACK_GUARD - /* reconfigure as the kernel mode stack will be different */ - z_riscv_pmp_stackguard_prepare(_current); +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC + /* reconfigure as the kernel mode configuration will be different */ + z_riscv_pmp_kernelmode_prepare(_current); #endif /* Set up Physical Memory Protection */ diff --git a/arch/riscv/include/pmp.h b/arch/riscv/include/pmp.h index 25b3f1fa0df00..8367342bb9bb1 100644 --- a/arch/riscv/include/pmp.h +++ b/arch/riscv/include/pmp.h @@ -7,14 +7,28 @@ #ifndef PMP_H_ #define PMP_H_ +#include + #define PMPCFG_STRIDE (__riscv_xlen / 8) +#define DT_MEM_RISCV_TO_PMP_PERM(dt_attr) ( \ + (((dt_attr) & DT_MEM_RISCV_TYPE_IO_R) ? PMP_R : 0) | \ + (((dt_attr) & DT_MEM_RISCV_TYPE_IO_W) ? PMP_W : 0) | \ + (((dt_attr) & DT_MEM_RISCV_TYPE_IO_X) ? PMP_X : 0)) + void z_riscv_pmp_init(void); -void z_riscv_pmp_stackguard_prepare(struct k_thread *thread); -void z_riscv_pmp_stackguard_enable(struct k_thread *thread); -void z_riscv_pmp_stackguard_disable(void); +void z_riscv_pmp_kernelmode_prepare(struct k_thread *thread); +void z_riscv_pmp_kernelmode_enable(struct k_thread *thread); +void z_riscv_pmp_kernelmode_disable(void); void z_riscv_pmp_usermode_init(struct k_thread *thread); void z_riscv_pmp_usermode_prepare(struct k_thread *thread); void z_riscv_pmp_usermode_enable(struct k_thread *thread); +#ifdef CONFIG_ZTEST +void z_riscv_pmp_read_config(unsigned long *pmp_cfg, size_t pmp_cfg_size); +void z_riscv_pmp_read_addr(unsigned long *pmp_addr, size_t pmp_addr_size); +void pmp_decode_region(uint8_t cfg_byte, unsigned long *pmp_addr, unsigned int index, + unsigned long *start, unsigned long *end); +#endif /* CONFIG_ZTEST */ + #endif /* PMP_H_ */ diff --git a/include/zephyr/arch/riscv/thread.h b/include/zephyr/arch/riscv/thread.h index 231fa784d6a28..dd8e23ad6437b 100644 --- a/include/zephyr/arch/riscv/thread.h +++ b/include/zephyr/arch/riscv/thread.h @@ -77,7 +77,7 @@ struct _thread_arch { unsigned int u_mode_pmp_end_index; unsigned int u_mode_pmp_update_nr; #endif -#ifdef CONFIG_PMP_STACK_GUARD +#ifdef CONFIG_PMP_KERNEL_MODE_DYNAMIC unsigned int m_mode_pmp_end_index; unsigned long m_mode_pmpaddr_regs[CONFIG_PMP_SLOTS]; unsigned long m_mode_pmpcfg_regs[CONFIG_PMP_SLOTS / (__riscv_xlen / 8)]; diff --git a/tests/arch/riscv/pmp/mem-attr-entries/CMakeLists.txt b/tests/arch/riscv/pmp/mem-attr-entries/CMakeLists.txt new file mode 100644 index 0000000000000..e0f392177d6f3 --- /dev/null +++ b/tests/arch/riscv/pmp/mem-attr-entries/CMakeLists.txt @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(riscv_pmp) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) + +target_include_directories(app PRIVATE + ${ZEPHYR_BASE}/kernel/include + ${ZEPHYR_BASE}/arch/${ARCH}/include + ) diff --git a/tests/arch/riscv/pmp/mem-attr-entries/memattr_mapping.overlay b/tests/arch/riscv/pmp/mem-attr-entries/memattr_mapping.overlay new file mode 100644 index 0000000000000..38be6b644efed --- /dev/null +++ b/tests/arch/riscv/pmp/mem-attr-entries/memattr_mapping.overlay @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2025 Google LLC + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +/ { + memattr_region1: memattr_region1@80000000 { + compatible = "zephyr,memory-region"; + reg = <0x80000000 0x20000>; + zephyr,memory-region = "MEMATTR_REGION1"; + zephyr,memory-attr = <(DT_MEM_RISCV_TYPE_IO_R | + DT_MEM_RISCV_TYPE_IO_W | + DT_MEM_RISCV_TYPE_IO_X)>; + }; + + memattr_region2: memattr_region2@80000000 { + compatible = "zephyr,memory-region"; + reg = <0x80020000 0x30000>; + zephyr,memory-region = "MEMATTR_REGION2"; + zephyr,memory-attr = <(DT_MEM_RISCV_TYPE_IO_R | + DT_MEM_RISCV_TYPE_IO_W | + DT_MEM_RISCV_TYPE_IO_X)>; + }; +}; diff --git a/tests/arch/riscv/pmp/mem-attr-entries/prj.conf b/tests/arch/riscv/pmp/mem-attr-entries/prj.conf new file mode 100644 index 0000000000000..6c71044ec5143 --- /dev/null +++ b/tests/arch/riscv/pmp/mem-attr-entries/prj.conf @@ -0,0 +1,2 @@ +CONFIG_ZTEST=y +CONFIG_MEM_ATTR=y diff --git a/tests/arch/riscv/pmp/mem-attr-entries/src/main.c b/tests/arch/riscv/pmp/mem-attr-entries/src/main.c new file mode 100644 index 0000000000000..5618e65d06f51 --- /dev/null +++ b/tests/arch/riscv/pmp/mem-attr-entries/src/main.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2025 Google LLC + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +/* Checks if the Machine Privilege Register Virtualization (MPRV) bit in mstatus is 1 (enabled). */ +static bool riscv_mprv_is_enabled(void) +{ + return csr_read(mstatus) & MSTATUS_MPRV; +} + +/* Checks if the Machine Previous Privilege (MPP) field in mstatus is set to M-Mode (0b11). */ +static bool riscv_mpp_is_m_mode(void) +{ + return (csr_read(mstatus) & MSTATUS_MPP) == MSTATUS_MPP; +} + +/* Helper structure to define the expected PMP regions derived from the Device Tree. */ +struct expected_region { + uintptr_t base; + size_t size; + uint8_t perm; + bool found; +}; + +/* + * Extract base address, size, and permission for the memory regions + * defined in the Device Tree under the 'memattr' nodes. + */ +static struct expected_region dt_regions[] = { + {.base = DT_REG_ADDR(DT_NODELABEL(memattr_region1)), + .size = DT_REG_SIZE(DT_NODELABEL(memattr_region1)), + .perm = DT_MEM_RISCV_TO_PMP_PERM( + DT_PROP(DT_NODELABEL(memattr_region1), zephyr_memory_attr)), + .found = false}, + {.base = DT_REG_ADDR(DT_NODELABEL(memattr_region2)), + .size = DT_REG_SIZE(DT_NODELABEL(memattr_region2)), + .perm = DT_MEM_RISCV_TO_PMP_PERM( + DT_PROP(DT_NODELABEL(memattr_region2), zephyr_memory_attr)), + .found = false}}; + +ZTEST(riscv_pmp_memattr_entries, test_pmp_devicetree_memattr_config) +{ + const size_t num_pmpcfg_regs = CONFIG_PMP_SLOTS / sizeof(unsigned long); + const size_t num_pmpaddr_regs = CONFIG_PMP_SLOTS; + + unsigned long current_pmpcfg_regs[num_pmpcfg_regs]; + unsigned long current_pmpaddr_regs[num_pmpaddr_regs]; + + /* Read the current PMP configuration from the control registers */ + z_riscv_pmp_read_config(current_pmpcfg_regs, num_pmpcfg_regs); + z_riscv_pmp_read_addr(current_pmpaddr_regs, num_pmpaddr_regs); + + const uint8_t *const current_pmp_cfg_entries = (const uint8_t *)current_pmpcfg_regs; + + for (unsigned int index = 0; index < CONFIG_PMP_SLOTS; ++index) { + unsigned long start, end; + uint8_t cfg_byte = current_pmp_cfg_entries[index]; + + /* Decode the configured PMP region (start and end addresses) */ + pmp_decode_region(cfg_byte, current_pmpaddr_regs, index, &start, &end); + + /* Compare the decoded region against the list of expected DT regions */ + for (size_t i = 0; i < ARRAY_SIZE(dt_regions); ++i) { + if ((start == dt_regions[i].base) && + (end == dt_regions[i].base + dt_regions[i].size - 1) && + ((cfg_byte & 0x07) == dt_regions[i].perm)) { + + dt_regions[i].found = true; + break; + } + } + } + + for (size_t i = 0; i < ARRAY_SIZE(dt_regions); i++) { + zassert_true(dt_regions[i].found, + "PMP entry for DT region %zu (base 0x%lx, size 0x%zx, perm 0x%x) not " + "found.", + i + 1, dt_regions[i].base, dt_regions[i].size, dt_regions[i].perm); + } +} + +ZTEST(riscv_pmp_memattr_entries, test_riscv_mprv_mpp_config) +{ + zassert_true(riscv_mprv_is_enabled(), + "MPRV should be enabled (1) to use the privilege specified by the MPP field."); + + zassert_false(riscv_mpp_is_m_mode(), + "MPP should be set to 0x00 (U-Mode) before execution."); +} + +ZTEST(riscv_pmp_memattr_entries, test_dt_pmp_perm_conversion) +{ + uint8_t result; + + result = DT_MEM_RISCV_TO_PMP_PERM(0); + zassert_equal(result, 0, "Expected 0, got 0x%x", result); + + result = DT_MEM_RISCV_TO_PMP_PERM(DT_MEM_RISCV_TYPE_EMPTY); + zassert_equal(result, 0, "Expected 0, got 0x%x", result); + + result = DT_MEM_RISCV_TO_PMP_PERM(DT_MEM_RISCV_TYPE_IO_R); + zassert_equal(result, PMP_R, "Expected PMP_R (0x%x), got 0x%x", PMP_R, result); + + result = DT_MEM_RISCV_TO_PMP_PERM(DT_MEM_RISCV_TYPE_IO_W); + zassert_equal(result, PMP_W, "Expected PMP_W (0x%x), got 0x%x", PMP_W, result); + + result = DT_MEM_RISCV_TO_PMP_PERM(DT_MEM_RISCV_TYPE_IO_X); + zassert_equal(result, PMP_X, "Expected PMP_X (0x%x), got 0x%x", PMP_X, result); + + result = DT_MEM_RISCV_TO_PMP_PERM(DT_MEM_RISCV_TYPE_IO_R | DT_MEM_RISCV_TYPE_IO_W); + zassert_equal(result, PMP_R | PMP_W, "Expected R|W (0x%x), got 0x%x", PMP_R | PMP_W, + result); + + result = DT_MEM_RISCV_TO_PMP_PERM(DT_MEM_RISCV_TYPE_IO_R | DT_MEM_RISCV_TYPE_IO_W | + DT_MEM_RISCV_TYPE_IO_X); + zassert_equal(result, PMP_R | PMP_W | PMP_X, "Expected R|W|X (0x%x), got 0x%x", + PMP_R | PMP_W | PMP_X, result); +} + +ZTEST_SUITE(riscv_pmp_memattr_entries, NULL, NULL, NULL, NULL, NULL); diff --git a/tests/arch/riscv/pmp/mem-attr-entries/testcase.yaml b/tests/arch/riscv/pmp/mem-attr-entries/testcase.yaml new file mode 100644 index 0000000000000..8105213cfd990 --- /dev/null +++ b/tests/arch/riscv/pmp/mem-attr-entries/testcase.yaml @@ -0,0 +1,11 @@ +common: + platform_allow: + - qemu_riscv32 + - qemu_riscv32e + - qemu_riscv64 + filter: CONFIG_RISCV_PMP + +tests: + arch.riscv.pmp.memattr.entries: + extra_args: + DTC_OVERLAY_FILE="memattr_mapping.overlay"