Skip to content

Commit a5e090a

Browse files
author
Russell King
committed
ARM: software-based priviledged-no-access support
Provide a software-based implementation of the priviledged no access support found in ARMv8.1. Userspace pages are mapped using a different domain number from the kernel and IO mappings. If we switch the user domain to "no access" when we enter the kernel, we can prevent the kernel from touching userspace. However, the kernel needs to be able to access userspace via the various user accessor functions. With the wrapping in the previous patch, we can temporarily enable access when the kernel needs user access, and re-disable it afterwards. This allows us to trap non-intended accesses to userspace, eg, caused by an inadvertent dereference of the LIST_POISON* values, which, with appropriate user mappings setup, can be made to succeed. This in turn can allow use-after-free bugs to be further exploited than would otherwise be possible. Signed-off-by: Russell King <[email protected]>
1 parent 2190fed commit a5e090a

File tree

7 files changed

+125
-8
lines changed

7 files changed

+125
-8
lines changed

arch/arm/Kconfig

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1694,6 +1694,21 @@ config HIGHPTE
16941694
bool "Allocate 2nd-level pagetables from highmem"
16951695
depends on HIGHMEM
16961696

1697+
config CPU_SW_DOMAIN_PAN
1698+
bool "Enable use of CPU domains to implement privileged no-access"
1699+
depends on MMU && !ARM_LPAE
1700+
default y
1701+
help
1702+
Increase kernel security by ensuring that normal kernel accesses
1703+
are unable to access userspace addresses. This can help prevent
1704+
use-after-free bugs becoming an exploitable privilege escalation
1705+
by ensuring that magic values (such as LIST_POISON) will always
1706+
fault when dereferenced.
1707+
1708+
CPUs with low-vector mappings use a best-efforts implementation.
1709+
Their lower 1MB needs to remain accessible for the vectors, but
1710+
the remainder of userspace will become appropriately inaccessible.
1711+
16971712
config HW_PERF_EVENTS
16981713
bool "Enable hardware performance counter support for perf events"
16991714
depends on PERF_EVENTS

arch/arm/include/asm/assembler.h

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -446,15 +446,45 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
446446
.endm
447447

448448
.macro uaccess_disable, tmp, isb=1
449+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
450+
/*
451+
* Whenever we re-enter userspace, the domains should always be
452+
* set appropriately.
453+
*/
454+
mov \tmp, #DACR_UACCESS_DISABLE
455+
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
456+
.if \isb
457+
instr_sync
458+
.endif
459+
#endif
449460
.endm
450461

451462
.macro uaccess_enable, tmp, isb=1
463+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
464+
/*
465+
* Whenever we re-enter userspace, the domains should always be
466+
* set appropriately.
467+
*/
468+
mov \tmp, #DACR_UACCESS_ENABLE
469+
mcr p15, 0, \tmp, c3, c0, 0
470+
.if \isb
471+
instr_sync
472+
.endif
473+
#endif
452474
.endm
453475

454476
.macro uaccess_save, tmp
477+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
478+
mrc p15, 0, \tmp, c3, c0, 0
479+
str \tmp, [sp, #S_FRAME_SIZE]
480+
#endif
455481
.endm
456482

457483
.macro uaccess_restore
484+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
485+
ldr r0, [sp, #S_FRAME_SIZE]
486+
mcr p15, 0, r0, c3, c0, 0
487+
#endif
458488
.endm
459489

460490
.macro uaccess_save_and_disable, tmp

arch/arm/include/asm/domain.h

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,29 @@
5757
#define domain_mask(dom) ((3) << (2 * (dom)))
5858
#define domain_val(dom,type) ((type) << (2 * (dom)))
5959

60+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61+
#define DACR_INIT \
62+
(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
63+
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
64+
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
65+
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
66+
#else
6067
#define DACR_INIT \
6168
(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
6269
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
6370
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
6471
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
72+
#endif
73+
74+
#define __DACR_DEFAULT \
75+
domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
76+
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
77+
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
78+
79+
#define DACR_UACCESS_DISABLE \
80+
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
81+
#define DACR_UACCESS_ENABLE \
82+
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
6583

6684
#ifndef __ASSEMBLY__
6785

@@ -76,7 +94,6 @@ static inline unsigned int get_domain(void)
7694
return domain;
7795
}
7896

79-
#ifdef CONFIG_CPU_USE_DOMAINS
8097
static inline void set_domain(unsigned val)
8198
{
8299
asm volatile(
@@ -85,6 +102,7 @@ static inline void set_domain(unsigned val)
85102
isb();
86103
}
87104

105+
#ifdef CONFIG_CPU_USE_DOMAINS
88106
#define modify_domain(dom,type) \
89107
do { \
90108
unsigned int domain = get_domain(); \
@@ -94,7 +112,6 @@ static inline void set_domain(unsigned val)
94112
} while (0)
95113

96114
#else
97-
static inline void set_domain(unsigned val) { }
98115
static inline void modify_domain(unsigned dom, unsigned type) { }
99116
#endif
100117

arch/arm/include/asm/uaccess.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,25 @@ extern int fixup_exception(struct pt_regs *regs);
5757
*/
5858
static inline unsigned int uaccess_save_and_enable(void)
5959
{
60+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61+
unsigned int old_domain = get_domain();
62+
63+
/* Set the current domain access to permit user accesses */
64+
set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
65+
domain_val(DOMAIN_USER, DOMAIN_CLIENT));
66+
67+
return old_domain;
68+
#else
6069
return 0;
70+
#endif
6171
}
6272

6373
static inline void uaccess_restore(unsigned int flags)
6474
{
75+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
76+
/* Restore the user access mask */
77+
set_domain(flags);
78+
#endif
6579
}
6680

6781
/*

arch/arm/kernel/process.c

Lines changed: 30 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs)
129129
buf[4] = '\0';
130130

131131
#ifndef CONFIG_CPU_V7M
132-
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
133-
buf, interrupts_enabled(regs) ? "n" : "ff",
134-
fast_interrupts_enabled(regs) ? "n" : "ff",
135-
processor_modes[processor_mode(regs)],
136-
isa_modes[isa_mode(regs)],
137-
get_fs() == get_ds() ? "kernel" : "user");
132+
{
133+
unsigned int domain = get_domain();
134+
const char *segment;
135+
136+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
137+
/*
138+
* Get the domain register for the parent context. In user
139+
* mode, we don't save the DACR, so lets use what it should
140+
* be. For other modes, we place it after the pt_regs struct.
141+
*/
142+
if (user_mode(regs))
143+
domain = DACR_UACCESS_ENABLE;
144+
else
145+
domain = *(unsigned int *)(regs + 1);
146+
#endif
147+
148+
if ((domain & domain_mask(DOMAIN_USER)) ==
149+
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
150+
segment = "none";
151+
else if (get_fs() == get_ds())
152+
segment = "kernel";
153+
else
154+
segment = "user";
155+
156+
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
157+
buf, interrupts_enabled(regs) ? "n" : "ff",
158+
fast_interrupts_enabled(regs) ? "n" : "ff",
159+
processor_modes[processor_mode(regs)],
160+
isa_modes[isa_mode(regs)], segment);
161+
}
138162
#else
139163
printk("xPSR: %08lx\n", regs->ARM_cpsr);
140164
#endif

arch/arm/kernel/swp_emulate.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
141141

142142
while (1) {
143143
unsigned long temp;
144+
unsigned int __ua_flags;
144145

146+
__ua_flags = uaccess_save_and_enable();
145147
if (type == TYPE_SWPB)
146148
__user_swpb_asm(*data, address, res, temp);
147149
else
148150
__user_swp_asm(*data, address, res, temp);
151+
uaccess_restore(__ua_flags);
149152

150153
if (likely(res != -EAGAIN) || signal_pending(current))
151154
break;

arch/arm/lib/csumpartialcopyuser.S

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,27 @@
1717

1818
.text
1919

20+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
21+
.macro save_regs
22+
mrc p15, 0, ip, c3, c0, 0
23+
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
24+
uaccess_enable ip
25+
.endm
26+
27+
.macro load_regs
28+
ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
29+
mcr p15, 0, ip, c3, c0, 0
30+
ret lr
31+
.endm
32+
#else
2033
.macro save_regs
2134
stmfd sp!, {r1, r2, r4 - r8, lr}
2235
.endm
2336

2437
.macro load_regs
2538
ldmfd sp!, {r1, r2, r4 - r8, pc}
2639
.endm
40+
#endif
2741

2842
.macro load1b, reg1
2943
ldrusr \reg1, r0, 1

0 commit comments

Comments
 (0)