Skip to content

Commit 2415e46

Browse files
committed
RISC-V: KVM: Introduce range based local HFENCE functions
Various __kvm_riscv_hfence_xyz() functions implemented in the kvm/tlb.S are equivalent to corresponding HFENCE.GVMA instructions and we don't have range based local HFENCE functions. This patch provides complete set of local HFENCE functions which supports range based TLB invalidation and supports HFENCE.VVMA based functions. This is also a preparatory patch for upcoming Svinval support in KVM RISC-V. Signed-off-by: Anup Patel <[email protected]> Reviewed-by: Atish Patra <[email protected]> Signed-off-by: Anup Patel <[email protected]>
1 parent c7fa3c4 commit 2415e46

File tree

6 files changed

+237
-83
lines changed

6 files changed

+237
-83
lines changed

arch/riscv/include/asm/kvm_host.h

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -201,11 +201,26 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
201201

202202
#define KVM_ARCH_WANT_MMU_NOTIFIER
203203

204-
void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa_divby_4,
205-
unsigned long vmid);
206-
void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid);
207-
void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4);
208-
void __kvm_riscv_hfence_gvma_all(void);
204+
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
205+
206+
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
207+
gpa_t gpa, gpa_t gpsz,
208+
unsigned long order);
209+
void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
210+
void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
211+
unsigned long order);
212+
void kvm_riscv_local_hfence_gvma_all(void);
213+
void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
214+
unsigned long asid,
215+
unsigned long gva,
216+
unsigned long gvsz,
217+
unsigned long order);
218+
void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
219+
unsigned long asid);
220+
void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
221+
unsigned long gva, unsigned long gvsz,
222+
unsigned long order);
223+
void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
209224

210225
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
211226
struct kvm_memory_slot *memslot,

arch/riscv/kvm/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -745,7 +745,7 @@ void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu)
745745
csr_write(CSR_HGATP, hgatp);
746746

747747
if (!kvm_riscv_gstage_vmid_bits())
748-
__kvm_riscv_hfence_gvma_all();
748+
kvm_riscv_local_hfence_gvma_all();
749749
}
750750

751751
void kvm_riscv_gstage_mode_detect(void)
@@ -768,7 +768,7 @@ void kvm_riscv_gstage_mode_detect(void)
768768
skip_sv48x4_test:
769769

770770
csr_write(CSR_HGATP, 0);
771-
__kvm_riscv_hfence_gvma_all();
771+
kvm_riscv_local_hfence_gvma_all();
772772
#endif
773773
}
774774

arch/riscv/kvm/tlb.S

Lines changed: 0 additions & 74 deletions
This file was deleted.

arch/riscv/kvm/tlb.c

Lines changed: 213 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,213 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Copyright (c) 2022 Ventana Micro Systems Inc.
4+
*/
5+
6+
#include <linux/bitops.h>
7+
#include <linux/errno.h>
8+
#include <linux/err.h>
9+
#include <linux/module.h>
10+
#include <linux/kvm_host.h>
11+
#include <asm/csr.h>
12+
13+
/*
14+
* Instruction encoding of hfence.gvma is:
15+
* HFENCE.GVMA rs1, rs2
16+
* HFENCE.GVMA zero, rs2
17+
* HFENCE.GVMA rs1
18+
* HFENCE.GVMA
19+
*
20+
* rs1!=zero and rs2!=zero ==> HFENCE.GVMA rs1, rs2
21+
* rs1==zero and rs2!=zero ==> HFENCE.GVMA zero, rs2
22+
* rs1!=zero and rs2==zero ==> HFENCE.GVMA rs1
23+
* rs1==zero and rs2==zero ==> HFENCE.GVMA
24+
*
25+
* Instruction encoding of HFENCE.GVMA is:
26+
* 0110001 rs2(5) rs1(5) 000 00000 1110011
27+
*/
28+
29+
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
30+
gpa_t gpa, gpa_t gpsz,
31+
unsigned long order)
32+
{
33+
gpa_t pos;
34+
35+
if (PTRS_PER_PTE < (gpsz >> order)) {
36+
kvm_riscv_local_hfence_gvma_vmid_all(vmid);
37+
return;
38+
}
39+
40+
for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) {
41+
/*
42+
* rs1 = a0 (GPA >> 2)
43+
* rs2 = a1 (VMID)
44+
* HFENCE.GVMA a0, a1
45+
* 0110001 01011 01010 000 00000 1110011
46+
*/
47+
asm volatile ("srli a0, %0, 2\n"
48+
"add a1, %1, zero\n"
49+
".word 0x62b50073\n"
50+
:: "r" (pos), "r" (vmid)
51+
: "a0", "a1", "memory");
52+
}
53+
}
54+
55+
void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
56+
{
57+
/*
58+
* rs1 = zero
59+
* rs2 = a0 (VMID)
60+
* HFENCE.GVMA zero, a0
61+
* 0110001 01010 00000 000 00000 1110011
62+
*/
63+
asm volatile ("add a0, %0, zero\n"
64+
".word 0x62a00073\n"
65+
:: "r" (vmid) : "a0", "memory");
66+
}
67+
68+
void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
69+
unsigned long order)
70+
{
71+
gpa_t pos;
72+
73+
if (PTRS_PER_PTE < (gpsz >> order)) {
74+
kvm_riscv_local_hfence_gvma_all();
75+
return;
76+
}
77+
78+
for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) {
79+
/*
80+
* rs1 = a0 (GPA >> 2)
81+
* rs2 = zero
82+
* HFENCE.GVMA a0
83+
* 0110001 00000 01010 000 00000 1110011
84+
*/
85+
asm volatile ("srli a0, %0, 2\n"
86+
".word 0x62050073\n"
87+
:: "r" (pos) : "a0", "memory");
88+
}
89+
}
90+
91+
void kvm_riscv_local_hfence_gvma_all(void)
92+
{
93+
/*
94+
* rs1 = zero
95+
* rs2 = zero
96+
* HFENCE.GVMA
97+
* 0110001 00000 00000 000 00000 1110011
98+
*/
99+
asm volatile (".word 0x62000073" ::: "memory");
100+
}
101+
102+
/*
103+
* Instruction encoding of hfence.gvma is:
104+
* HFENCE.VVMA rs1, rs2
105+
* HFENCE.VVMA zero, rs2
106+
* HFENCE.VVMA rs1
107+
* HFENCE.VVMA
108+
*
109+
* rs1!=zero and rs2!=zero ==> HFENCE.VVMA rs1, rs2
110+
* rs1==zero and rs2!=zero ==> HFENCE.VVMA zero, rs2
111+
* rs1!=zero and rs2==zero ==> HFENCE.VVMA rs1
112+
* rs1==zero and rs2==zero ==> HFENCE.VVMA
113+
*
114+
* Instruction encoding of HFENCE.VVMA is:
115+
* 0010001 rs2(5) rs1(5) 000 00000 1110011
116+
*/
117+
118+
void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
119+
unsigned long asid,
120+
unsigned long gva,
121+
unsigned long gvsz,
122+
unsigned long order)
123+
{
124+
unsigned long pos, hgatp;
125+
126+
if (PTRS_PER_PTE < (gvsz >> order)) {
127+
kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
128+
return;
129+
}
130+
131+
hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
132+
133+
for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) {
134+
/*
135+
* rs1 = a0 (GVA)
136+
* rs2 = a1 (ASID)
137+
* HFENCE.VVMA a0, a1
138+
* 0010001 01011 01010 000 00000 1110011
139+
*/
140+
asm volatile ("add a0, %0, zero\n"
141+
"add a1, %1, zero\n"
142+
".word 0x22b50073\n"
143+
:: "r" (pos), "r" (asid)
144+
: "a0", "a1", "memory");
145+
}
146+
147+
csr_write(CSR_HGATP, hgatp);
148+
}
149+
150+
void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
151+
unsigned long asid)
152+
{
153+
unsigned long hgatp;
154+
155+
hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
156+
157+
/*
158+
* rs1 = zero
159+
* rs2 = a0 (ASID)
160+
* HFENCE.VVMA zero, a0
161+
* 0010001 01010 00000 000 00000 1110011
162+
*/
163+
asm volatile ("add a0, %0, zero\n"
164+
".word 0x22a00073\n"
165+
:: "r" (asid) : "a0", "memory");
166+
167+
csr_write(CSR_HGATP, hgatp);
168+
}
169+
170+
void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
171+
unsigned long gva, unsigned long gvsz,
172+
unsigned long order)
173+
{
174+
unsigned long pos, hgatp;
175+
176+
if (PTRS_PER_PTE < (gvsz >> order)) {
177+
kvm_riscv_local_hfence_vvma_all(vmid);
178+
return;
179+
}
180+
181+
hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
182+
183+
for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) {
184+
/*
185+
* rs1 = a0 (GVA)
186+
* rs2 = zero
187+
* HFENCE.VVMA a0
188+
* 0010001 00000 01010 000 00000 1110011
189+
*/
190+
asm volatile ("add a0, %0, zero\n"
191+
".word 0x22050073\n"
192+
:: "r" (pos) : "a0", "memory");
193+
}
194+
195+
csr_write(CSR_HGATP, hgatp);
196+
}
197+
198+
void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
199+
{
200+
unsigned long hgatp;
201+
202+
hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
203+
204+
/*
205+
* rs1 = zero
206+
* rs2 = zero
207+
* HFENCE.VVMA
208+
* 0010001 00000 00000 000 00000 1110011
209+
*/
210+
asm volatile (".word 0x22000073" ::: "memory");
211+
212+
csr_write(CSR_HGATP, hgatp);
213+
}

arch/riscv/kvm/vcpu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -693,7 +693,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
693693
kvm_riscv_gstage_update_hgatp(vcpu);
694694

695695
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
696-
__kvm_riscv_hfence_gvma_all();
696+
kvm_riscv_local_hfence_gvma_all();
697697
}
698698
}
699699

arch/riscv/kvm/vmid.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ void kvm_riscv_gstage_vmid_detect(void)
3333
csr_write(CSR_HGATP, old);
3434

3535
/* We polluted local TLB so flush all guest TLB */
36-
__kvm_riscv_hfence_gvma_all();
36+
kvm_riscv_local_hfence_gvma_all();
3737

3838
/* We don't use VMID bits if they are not sufficient */
3939
if ((1UL << vmid_bits) < num_possible_cpus())

0 commit comments

Comments
 (0)