Skip to content

Commit 57f4959

Browse files
James Morsectmarinas
authored andcommitted
arm64: kernel: Add support for User Access Override
'User Access Override' is a new ARMv8.2 feature which allows the unprivileged load and store instructions to be overridden to behave in the normal way. This patch converts {get,put}_user() and friends to use ldtr*/sttr* instructions - so that they can only access EL0 memory, then enables UAO when fs==KERNEL_DS so that these functions can access kernel memory. This allows user space's read/write permissions to be checked against the page tables, instead of testing addr<USER_DS, then using the kernel's read/write permissions. Signed-off-by: James Morse <[email protected]> [[email protected]: move uao_thread_switch() above dsb()] Signed-off-by: Catalin Marinas <[email protected]>
1 parent 406e308 commit 57f4959

File tree

15 files changed

+213
-39
lines changed

15 files changed

+213
-39
lines changed

arch/arm64/Kconfig

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -756,6 +756,27 @@ config ARM64_LSE_ATOMICS
756756

757757
endmenu
758758

759+
config ARM64_UAO
760+
bool "Enable support for User Access Override (UAO)"
761+
default y
762+
help
763+
User Access Override (UAO; part of the ARMv8.2 Extensions)
764+
causes the 'unprivileged' variant of the load/store instructions to
765+
be overriden to be privileged.
766+
767+
This option changes get_user() and friends to use the 'unprivileged'
768+
variant of the load/store instructions. This ensures that user-space
769+
really did have access to the supplied memory. When addr_limit is
770+
set to kernel memory the UAO bit will be set, allowing privileged
771+
access to kernel memory.
772+
773+
Choosing this option will cause copy_to_user() et al to use user-space
774+
memory permissions.
775+
776+
The feature is detected at runtime, the kernel will use the
777+
regular load/store instructions if the cpu does not implement the
778+
feature.
779+
759780
endmenu
760781

761782
menu "Boot options"

arch/arm64/include/asm/alternative.h

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
#ifndef __ASM_ALTERNATIVE_H
22
#define __ASM_ALTERNATIVE_H
33

4+
#include <asm/cpufeature.h>
5+
46
#ifndef __ASSEMBLY__
57

68
#include <linux/init.h>
@@ -63,6 +65,8 @@ void apply_alternatives(void *start, size_t length);
6365

6466
#else
6567

68+
#include <asm/assembler.h>
69+
6670
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
6771
.word \orig_offset - .
6872
.word \alt_offset - .
@@ -136,6 +140,74 @@ void apply_alternatives(void *start, size_t length);
136140
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
137141

138142

143+
/*
144+
* Generate the assembly for UAO alternatives with exception table entries.
145+
* This is complicated as there is no post-increment or pair versions of the
146+
* unprivileged instructions, and USER() only works for single instructions.
147+
*/
148+
#ifdef CONFIG_ARM64_UAO
149+
.macro uao_ldp l, reg1, reg2, addr, post_inc
150+
alternative_if_not ARM64_HAS_UAO
151+
8888: ldp \reg1, \reg2, [\addr], \post_inc;
152+
8889: nop;
153+
nop;
154+
alternative_else
155+
ldtr \reg1, [\addr];
156+
ldtr \reg2, [\addr, #8];
157+
add \addr, \addr, \post_inc;
158+
alternative_endif
159+
160+
.section __ex_table,"a";
161+
.align 3;
162+
.quad 8888b,\l;
163+
.quad 8889b,\l;
164+
.previous;
165+
.endm
166+
167+
.macro uao_stp l, reg1, reg2, addr, post_inc
168+
alternative_if_not ARM64_HAS_UAO
169+
8888: stp \reg1, \reg2, [\addr], \post_inc;
170+
8889: nop;
171+
nop;
172+
alternative_else
173+
sttr \reg1, [\addr];
174+
sttr \reg2, [\addr, #8];
175+
add \addr, \addr, \post_inc;
176+
alternative_endif
177+
178+
.section __ex_table,"a";
179+
.align 3;
180+
.quad 8888b,\l;
181+
.quad 8889b,\l;
182+
.previous
183+
.endm
184+
185+
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
186+
alternative_if_not ARM64_HAS_UAO
187+
8888: \inst \reg, [\addr], \post_inc;
188+
nop;
189+
alternative_else
190+
\alt_inst \reg, [\addr];
191+
add \addr, \addr, \post_inc;
192+
alternative_endif
193+
194+
.section __ex_table,"a";
195+
.align 3;
196+
.quad 8888b,\l;
197+
.previous
198+
.endm
199+
#else
200+
.macro uao_ldp l, reg1, reg2, addr, post_inc
201+
USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
202+
.endm
203+
.macro uao_stp l, reg1, reg2, addr, post_inc
204+
USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
205+
.endm
206+
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
207+
USER(\l, \inst \reg, [\addr], \post_inc)
208+
.endm
209+
#endif
210+
139211
#endif /* __ASSEMBLY__ */
140212

141213
/*

arch/arm64/include/asm/cpufeature.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,9 @@
3131
#define ARM64_WORKAROUND_CAVIUM_23154 6
3232
#define ARM64_WORKAROUND_834220 7
3333
#define ARM64_HAS_NO_HW_PREFETCH 8
34+
#define ARM64_HAS_UAO 9
3435

35-
#define ARM64_NCAPS 9
36+
#define ARM64_NCAPS 10
3637

3738
#ifndef __ASSEMBLY__
3839

arch/arm64/include/asm/processor.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,5 +191,6 @@ static inline void spin_lock_prefetch(const void *ptr)
191191
#endif
192192

193193
void cpu_enable_pan(void *__unused);
194+
void cpu_enable_uao(void *__unused);
194195

195196
#endif /* __ASM_PROCESSOR_H */

arch/arm64/include/asm/sysreg.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,9 +79,12 @@
7979
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
8080

8181
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
82+
#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
8283

8384
#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
8485
(!!x)<<8 | 0x1f)
86+
#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
87+
(!!x)<<8 | 0x1f)
8588

8689
/* SCTLR_EL1 */
8790
#define SCTLR_EL1_CP15BEN (0x1 << 5)

arch/arm64/include/asm/thread_info.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,12 @@ static inline struct thread_info *current_thread_info(void)
8585
return (struct thread_info *)sp_el0;
8686
}
8787

88+
/* Access struct thread_info of another thread */
89+
static inline struct thread_info *get_thread_info(unsigned long thread_stack)
90+
{
91+
return (struct thread_info *)(thread_stack & ~(THREAD_SIZE - 1));
92+
}
93+
8894
#define thread_saved_pc(tsk) \
8995
((unsigned long)(tsk->thread.cpu_context.pc))
9096
#define thread_saved_sp(tsk) \

arch/arm64/include/asm/uaccess.h

Lines changed: 32 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,16 @@ extern int fixup_exception(struct pt_regs *regs);
6464
static inline void set_fs(mm_segment_t fs)
6565
{
6666
current_thread_info()->addr_limit = fs;
67+
68+
/*
69+
* Enable/disable UAO so that copy_to_user() etc can access
70+
* kernel memory with the unprivileged instructions.
71+
*/
72+
if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
73+
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
74+
else
75+
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
76+
CONFIG_ARM64_UAO));
6777
}
6878

6979
#define segment_eq(a, b) ((a) == (b))
@@ -113,9 +123,10 @@ static inline void set_fs(mm_segment_t fs)
113123
* The "__xxx_error" versions set the third argument to -EFAULT if an error
114124
* occurs, and leave it unchanged on success.
115125
*/
116-
#define __get_user_asm(instr, reg, x, addr, err) \
126+
#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
117127
asm volatile( \
118-
"1: " instr " " reg "1, [%2]\n" \
128+
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
129+
alt_instr " " reg "1, [%2]\n", feature) \
119130
"2:\n" \
120131
" .section .fixup, \"ax\"\n" \
121132
" .align 2\n" \
@@ -138,16 +149,20 @@ do { \
138149
CONFIG_ARM64_PAN)); \
139150
switch (sizeof(*(ptr))) { \
140151
case 1: \
141-
__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \
152+
__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
153+
(err), ARM64_HAS_UAO); \
142154
break; \
143155
case 2: \
144-
__get_user_asm("ldrh", "%w", __gu_val, (ptr), (err)); \
156+
__get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
157+
(err), ARM64_HAS_UAO); \
145158
break; \
146159
case 4: \
147-
__get_user_asm("ldr", "%w", __gu_val, (ptr), (err)); \
160+
__get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
161+
(err), ARM64_HAS_UAO); \
148162
break; \
149163
case 8: \
150-
__get_user_asm("ldr", "%", __gu_val, (ptr), (err)); \
164+
__get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
165+
(err), ARM64_HAS_UAO); \
151166
break; \
152167
default: \
153168
BUILD_BUG(); \
@@ -181,9 +196,10 @@ do { \
181196
((x) = 0, -EFAULT); \
182197
})
183198

184-
#define __put_user_asm(instr, reg, x, addr, err) \
199+
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
185200
asm volatile( \
186-
"1: " instr " " reg "1, [%2]\n" \
201+
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
202+
alt_instr " " reg "1, [%2]\n", feature) \
187203
"2:\n" \
188204
" .section .fixup,\"ax\"\n" \
189205
" .align 2\n" \
@@ -205,16 +221,20 @@ do { \
205221
CONFIG_ARM64_PAN)); \
206222
switch (sizeof(*(ptr))) { \
207223
case 1: \
208-
__put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \
224+
__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
225+
(err), ARM64_HAS_UAO); \
209226
break; \
210227
case 2: \
211-
__put_user_asm("strh", "%w", __pu_val, (ptr), (err)); \
228+
__put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
229+
(err), ARM64_HAS_UAO); \
212230
break; \
213231
case 4: \
214-
__put_user_asm("str", "%w", __pu_val, (ptr), (err)); \
232+
__put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
233+
(err), ARM64_HAS_UAO); \
215234
break; \
216235
case 8: \
217-
__put_user_asm("str", "%", __pu_val, (ptr), (err)); \
236+
__put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
237+
(err), ARM64_HAS_UAO); \
218238
break; \
219239
default: \
220240
BUILD_BUG(); \

arch/arm64/include/uapi/asm/ptrace.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
#define PSR_A_BIT 0x00000100
4646
#define PSR_D_BIT 0x00000200
4747
#define PSR_PAN_BIT 0x00400000
48+
#define PSR_UAO_BIT 0x00800000
4849
#define PSR_Q_BIT 0x08000000
4950
#define PSR_V_BIT 0x10000000
5051
#define PSR_C_BIT 0x20000000

arch/arm64/kernel/cpufeature.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -677,6 +677,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
677677
.capability = ARM64_HAS_NO_HW_PREFETCH,
678678
.matches = has_no_hw_prefetch,
679679
},
680+
#ifdef CONFIG_ARM64_UAO
681+
{
682+
.desc = "User Access Override",
683+
.capability = ARM64_HAS_UAO,
684+
.matches = has_cpuid_feature,
685+
.sys_reg = SYS_ID_AA64MMFR2_EL1,
686+
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
687+
.min_field_value = 1,
688+
.enable = cpu_enable_uao,
689+
},
690+
#endif /* CONFIG_ARM64_UAO */
680691
{},
681692
};
682693

arch/arm64/kernel/process.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
#include <linux/notifier.h>
4747
#include <trace/events/power.h>
4848

49+
#include <asm/alternative.h>
4950
#include <asm/compat.h>
5051
#include <asm/cacheflush.h>
5152
#include <asm/fpsimd.h>
@@ -280,6 +281,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
280281
} else {
281282
memset(childregs, 0, sizeof(struct pt_regs));
282283
childregs->pstate = PSR_MODE_EL1h;
284+
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
285+
cpus_have_cap(ARM64_HAS_UAO))
286+
childregs->pstate |= PSR_UAO_BIT;
283287
p->thread.cpu_context.x19 = stack_start;
284288
p->thread.cpu_context.x20 = stk_sz;
285289
}
@@ -308,6 +312,20 @@ static void tls_thread_switch(struct task_struct *next)
308312
: : "r" (tpidr), "r" (tpidrro));
309313
}
310314

315+
/* Restore the UAO state depending on next's addr_limit */
316+
static void uao_thread_switch(struct task_struct *next)
317+
{
318+
unsigned long next_sp = next->thread.cpu_context.sp;
319+
320+
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
321+
get_thread_info(next_sp)->addr_limit == KERNEL_DS)
322+
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO,
323+
CONFIG_ARM64_UAO));
324+
else
325+
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
326+
CONFIG_ARM64_UAO));
327+
}
328+
311329
/*
312330
* Thread switching.
313331
*/
@@ -320,6 +338,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
320338
tls_thread_switch(next);
321339
hw_breakpoint_thread_switch(next);
322340
contextidr_thread_switch(next);
341+
uao_thread_switch(next);
323342

324343
/*
325344
* Complete any pending TLB or cache maintenance on this CPU in case

0 commit comments

Comments
 (0)