Skip to content

Commit cedbb8b

Browse files
author
Marc Zyngier
committed
arm64: KVM: VHE: Patch out kern_hyp_va
The kern_hyp_va macro is pretty meaninless with VHE, as there is only one mapping - the kernel one. In order to keep the code readable and efficient, use runtime patching to replace the 'and' instruction used to compute the VA with a 'nop'. Reviewed-by: Christoffer Dall <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent b81125c commit cedbb8b

File tree

2 files changed

+33
-4
lines changed

2 files changed

+33
-4
lines changed

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,16 @@
2323
#include <asm/cpufeature.h>
2424

2525
/*
26-
* As we only have the TTBR0_EL2 register, we cannot express
26+
* As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
2727
* "negative" addresses. This makes it impossible to directly share
2828
* mappings with the kernel.
2929
*
3030
* Instead, give the HYP mode its own VA region at a fixed offset from
3131
* the kernel by just masking the top bits (which are all ones for a
3232
* kernel address).
33+
*
34+
* ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
35+
* macros (the entire kernel runs at EL2).
3336
*/
3437
#define HYP_PAGE_OFFSET_SHIFT VA_BITS
3538
#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
@@ -56,12 +59,19 @@
5659

5760
#ifdef __ASSEMBLY__
5861

62+
#include <asm/alternative.h>
63+
#include <asm/cpufeature.h>
64+
5965
/*
6066
* Convert a kernel VA into a HYP VA.
6167
* reg: VA to be converted.
6268
*/
6369
.macro kern_hyp_va reg
70+
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
6471
and \reg, \reg, #HYP_PAGE_OFFSET_MASK
72+
alternative_else
73+
nop
74+
alternative_endif
6575
.endm
6676

6777
#else

arch/arm64/kvm/hyp/hyp.h

Lines changed: 22 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,28 @@
2525

2626
#define __hyp_text __section(.hyp.text) notrace
2727

28-
#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK)
29-
#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \
30-
+ PAGE_OFFSET)
28+
static inline unsigned long __kern_hyp_va(unsigned long v)
29+
{
30+
asm volatile(ALTERNATIVE("and %0, %0, %1",
31+
"nop",
32+
ARM64_HAS_VIRT_HOST_EXTN)
33+
: "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
34+
return v;
35+
}
36+
37+
#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
38+
39+
static inline unsigned long __hyp_kern_va(unsigned long v)
40+
{
41+
u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
42+
asm volatile(ALTERNATIVE("add %0, %0, %1",
43+
"nop",
44+
ARM64_HAS_VIRT_HOST_EXTN)
45+
: "+r" (v) : "r" (offset));
46+
return v;
47+
}
48+
49+
#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))
3150

3251
/**
3352
* hyp_alternate_select - Generates patchable code sequences that are

0 commit comments

Comments
 (0)