Skip to content

Commit a13644f

Browse files
joergroedelsuryasaimadhu
authored andcommitted
x86/entry/64: Add entry code for #VC handler
The #VC handler needs special entry code because: 1. It runs on an IST stack 2. It needs to be able to handle nested #VC exceptions To make this work, the entry code is implemented to pretend it doesn't use an IST stack. When entered from user-mode or early SYSCALL entry path it switches to the task stack. If entered from kernel-mode it tries to switch back to the previous stack in the IRET frame. The stack found in the IRET frame is validated first, and if it is not safe to use it for the #VC handler, the code will switch to a fall-back stack (the #VC2 IST stack). From there, it can cause nested exceptions again. Signed-off-by: Joerg Roedel <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 6b27edd commit a13644f

File tree

5 files changed

+171
-0
lines changed

5 files changed

+171
-0
lines changed

arch/x86/entry/entry_64.S

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,8 @@ SYM_CODE_START(entry_SYSCALL_64)
101101
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
102102
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
103103

104+
SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
105+
104106
/* Construct struct pt_regs on stack */
105107
pushq $__USER_DS /* pt_regs->ss */
106108
pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
@@ -446,6 +448,84 @@ _ASM_NOKPROBE(\asmsym)
446448
SYM_CODE_END(\asmsym)
447449
.endm
448450

451+
#ifdef CONFIG_AMD_MEM_ENCRYPT
452+
/**
453+
* idtentry_vc - Macro to generate entry stub for #VC
454+
* @vector: Vector number
455+
* @asmsym: ASM symbol for the entry point
456+
* @cfunc: C function to be called
457+
*
458+
* The macro emits code to set up the kernel context for #VC. The #VC handler
459+
* runs on an IST stack and needs to be able to cause nested #VC exceptions.
460+
*
461+
* To make this work the #VC entry code tries its best to pretend it doesn't use
462+
* an IST stack by switching to the task stack if coming from user-space (which
463+
* includes early SYSCALL entry path) or back to the stack in the IRET frame if
464+
* entered from kernel-mode.
465+
*
466+
* If entered from kernel-mode the return stack is validated first, and if it is
467+
* not safe to use (e.g. because it points to the entry stack) the #VC handler
468+
* will switch to a fall-back stack (VC2) and call a special handler function.
469+
*
470+
* The macro is only used for one vector, but it is planned to be extended in
471+
* the future for the #HV exception.
472+
*/
473+
.macro idtentry_vc vector asmsym cfunc
474+
SYM_CODE_START(\asmsym)
475+
UNWIND_HINT_IRET_REGS
476+
ASM_CLAC
477+
478+
/*
479+
* If the entry is from userspace, switch stacks and treat it as
480+
* a normal entry.
481+
*/
482+
testb $3, CS-ORIG_RAX(%rsp)
483+
jnz .Lfrom_usermode_switch_stack_\@
484+
485+
/*
486+
* paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
487+
* EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
488+
*/
489+
call paranoid_entry
490+
491+
UNWIND_HINT_REGS
492+
493+
/*
494+
* Switch off the IST stack to make it free for nested exceptions. The
495+
* vc_switch_off_ist() function will switch back to the interrupted
496+
* stack if it is safe to do so. If not it switches to the VC fall-back
497+
* stack.
498+
*/
499+
movq %rsp, %rdi /* pt_regs pointer */
500+
call vc_switch_off_ist
501+
movq %rax, %rsp /* Switch to new stack */
502+
503+
UNWIND_HINT_REGS
504+
505+
/* Update pt_regs */
506+
movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
507+
movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
508+
509+
movq %rsp, %rdi /* pt_regs pointer */
510+
511+
call \cfunc
512+
513+
/*
514+
* No need to switch back to the IST stack. The current stack is either
515+
* identical to the stack in the IRET frame or the VC fall-back stack,
516+
* so it is definitly mapped even with PTI enabled.
517+
*/
518+
jmp paranoid_exit
519+
520+
/* Switch to the regular task stack */
521+
.Lfrom_usermode_switch_stack_\@:
522+
idtentry_body safe_stack_\cfunc, has_error_code=1
523+
524+
_ASM_NOKPROBE(\asmsym)
525+
SYM_CODE_END(\asmsym)
526+
.endm
527+
#endif
528+
449529
/*
450530
* Double fault entry. Straight paranoid. No checks from which context
451531
* this comes because for the espfix induced #DF this would do the wrong

arch/x86/include/asm/idtentry.h

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,18 @@ static __always_inline void __##func(struct pt_regs *regs)
308308
DECLARE_IDTENTRY_RAW(vector, func); \
309309
__visible void noist_##func(struct pt_regs *regs)
310310

311+
/**
312+
* DECLARE_IDTENTRY_VC - Declare functions for the VC entry point
313+
* @vector: Vector number (ignored for C)
314+
* @func: Function name of the entry point
315+
*
316+
* Maps to DECLARE_IDTENTRY_RAW_ERRORCODE, but declares also the
317+
* safe_stack C handler.
318+
*/
319+
#define DECLARE_IDTENTRY_VC(vector, func) \
320+
DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \
321+
__visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
322+
311323
/**
312324
* DEFINE_IDTENTRY_IST - Emit code for IST entry points
313325
* @func: Function name of the entry point
@@ -347,6 +359,35 @@ static __always_inline void __##func(struct pt_regs *regs)
347359
#define DEFINE_IDTENTRY_DF(func) \
348360
DEFINE_IDTENTRY_RAW_ERRORCODE(func)
349361

362+
/**
363+
* DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
364+
which runs on a safe stack.
365+
* @func: Function name of the entry point
366+
*
367+
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
368+
*/
369+
#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \
370+
DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
371+
372+
/**
373+
* DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
374+
which runs on the VC fall-back stack
375+
* @func: Function name of the entry point
376+
*
377+
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
378+
*/
379+
#define DEFINE_IDTENTRY_VC_IST(func) \
380+
DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
381+
382+
/**
383+
* DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
384+
* @func: Function name of the entry point
385+
*
386+
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
387+
*/
388+
#define DEFINE_IDTENTRY_VC(func) \
389+
DEFINE_IDTENTRY_RAW_ERRORCODE(func)
390+
350391
#else /* CONFIG_X86_64 */
351392

352393
/**
@@ -433,6 +474,9 @@ __visible noinstr void func(struct pt_regs *regs, \
433474
# define DECLARE_IDTENTRY_XENCB(vector, func) \
434475
DECLARE_IDTENTRY(vector, func)
435476

477+
# define DECLARE_IDTENTRY_VC(vector, func) \
478+
idtentry_vc vector asm_##func func
479+
436480
#else
437481
# define DECLARE_IDTENTRY_MCE(vector, func) \
438482
DECLARE_IDTENTRY(vector, func)

arch/x86/include/asm/proto.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ void syscall_init(void);
1010

1111
#ifdef CONFIG_X86_64
1212
void entry_SYSCALL_64(void);
13+
void entry_SYSCALL_64_safe_stack(void);
1314
long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2);
1415
#endif
1516

arch/x86/include/asm/traps.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
1515
asmlinkage __visible notrace
1616
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s);
1717
void __init trap_init(void);
18+
asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *eregs);
1819
#endif
1920

2021
#ifdef CONFIG_X86_F00F_BUG

arch/x86/kernel/traps.c

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
#include <asm/stacktrace.h>
4444
#include <asm/processor.h>
4545
#include <asm/debugreg.h>
46+
#include <asm/realmode.h>
4647
#include <asm/text-patching.h>
4748
#include <asm/ftrace.h>
4849
#include <asm/traps.h>
@@ -673,6 +674,50 @@ asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
673674
return regs;
674675
}
675676

677+
#ifdef CONFIG_AMD_MEM_ENCRYPT
678+
asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
679+
{
680+
unsigned long sp, *stack;
681+
struct stack_info info;
682+
struct pt_regs *regs_ret;
683+
684+
/*
685+
* In the SYSCALL entry path the RSP value comes from user-space - don't
686+
* trust it and switch to the current kernel stack
687+
*/
688+
if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
689+
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) {
690+
sp = this_cpu_read(cpu_current_top_of_stack);
691+
goto sync;
692+
}
693+
694+
/*
695+
* From here on the RSP value is trusted. Now check whether entry
696+
* happened from a safe stack. Not safe are the entry or unknown stacks,
697+
* use the fall-back stack instead in this case.
698+
*/
699+
sp = regs->sp;
700+
stack = (unsigned long *)sp;
701+
702+
if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
703+
info.type >= STACK_TYPE_EXCEPTION_LAST)
704+
sp = __this_cpu_ist_top_va(VC2);
705+
706+
sync:
707+
/*
708+
* Found a safe stack - switch to it as if the entry didn't happen via
709+
* IST stack. The code below only copies pt_regs, the real switch happens
710+
* in assembly code.
711+
*/
712+
sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
713+
714+
regs_ret = (struct pt_regs *)sp;
715+
*regs_ret = *regs;
716+
717+
return regs_ret;
718+
}
719+
#endif
720+
676721
struct bad_iret_stack {
677722
void *error_entry_ret;
678723
struct pt_regs regs;

0 commit comments

Comments
 (0)