Skip to content

Commit 05b4ebd

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "RISC-V: - Fix compilation without RISCV_ISA_ZICBOM - Fix kvm_riscv_vcpu_timer_pending() for Sstc ARM: - Fix a bug preventing restoring an ITS containing mappings for very large and very sparse device topology - Work around a relocation handling error when compiling the nVHE object with profile optimisation - Fix for stage-2 invalidation holding the VM MMU lock for too long by limiting the walk to the largest block mapping size - Enable stack protection and branch profiling for VHE - Two selftest fixes x86: - add compat implementation for KVM_X86_SET_MSR_FILTER ioctl selftests: - synchronize includes between include/uapi and tools/include/uapi" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: tools: include: sync include/api/linux/kvm.h KVM: x86: Add compat handler for KVM_X86_SET_MSR_FILTER KVM: x86: Copy filter arg outside kvm_vm_ioctl_set_msr_filter() kvm: Add support for arch compat vm ioctls RISC-V: KVM: Fix kvm_riscv_vcpu_timer_pending() for Sstc RISC-V: Fix compilation without RISCV_ISA_ZICBOM KVM: arm64: vgic: Fix exit condition in scan_its_table() KVM: arm64: nvhe: Fix build with profile optimization KVM: selftests: Fix number of pages for memory slot in memslot_modification_stress_test KVM: arm64: selftests: Fix multiple versions of GIC creation KVM: arm64: Enable stack protection and branch profiling for VHE KVM: arm64: Limit stage2_apply_range() batch size to largest block KVM: arm64: Work out supported block level at compile time
2 parents ca4582c + 9aec606 commit 05b4ebd

File tree

18 files changed

+180
-99
lines changed

18 files changed

+180
-99
lines changed

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,18 @@
1313

1414
#define KVM_PGTABLE_MAX_LEVELS 4U
1515

16+
/*
17+
* The largest supported block sizes for KVM (no 52-bit PA support):
18+
* - 4K (level 1): 1GB
19+
* - 16K (level 2): 32MB
20+
* - 64K (level 2): 512MB
21+
*/
22+
#ifdef CONFIG_ARM64_4K_PAGES
23+
#define KVM_PGTABLE_MIN_BLOCK_LEVEL 1U
24+
#else
25+
#define KVM_PGTABLE_MIN_BLOCK_LEVEL 2U
26+
#endif
27+
1628
static inline u64 kvm_get_parange(u64 mmfr0)
1729
{
1830
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
@@ -58,11 +70,7 @@ static inline u64 kvm_granule_size(u32 level)
5870

5971
static inline bool kvm_level_supports_block_mapping(u32 level)
6072
{
61-
/*
62-
* Reject invalid block mappings and don't bother with 4TB mappings for
63-
* 52-bit PAs.
64-
*/
65-
return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
73+
return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
6674
}
6775

6876
/**

arch/arm64/include/asm/stage2_pgtable.h

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,6 @@
1010

1111
#include <linux/pgtable.h>
1212

13-
/*
14-
* PGDIR_SHIFT determines the size a top-level page table entry can map
15-
* and depends on the number of levels in the page table. Compute the
16-
* PGDIR_SHIFT for a given number of levels.
17-
*/
18-
#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
19-
2013
/*
2114
* The hardware supports concatenation of up to 16 tables at stage2 entry
2215
* level and we use the feature whenever possible, which means we resolve 4
@@ -30,24 +23,11 @@
3023
#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
3124
#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
3225

33-
/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */
34-
#define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
35-
#define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm))
36-
#define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1)
37-
3826
/*
3927
* kvm_mmmu_cache_min_pages() is the number of pages required to install
4028
* a stage-2 translation. We pre-allocate the entry level page table at
4129
* the VM creation.
4230
*/
4331
#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1)
4432

45-
static inline phys_addr_t
46-
stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
47-
{
48-
phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
49-
50-
return (boundary - 1 < end - 1) ? boundary : end;
51-
}
52-
5333
#endif /* __ARM64_S2_PGTABLE_H_ */

arch/arm64/kvm/hyp/Makefile

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,6 @@
55

66
incdir := $(srctree)/$(src)/include
77
subdir-asflags-y := -I$(incdir)
8-
subdir-ccflags-y := -I$(incdir) \
9-
-fno-stack-protector \
10-
-DDISABLE_BRANCH_PROFILING \
11-
$(DISABLE_STACKLEAK_PLUGIN)
8+
subdir-ccflags-y := -I$(incdir)
129

1310
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o

arch/arm64/kvm/hyp/nvhe/Makefile

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
1010
# will explode instantly (Words of Marc Zyngier). So introduce a generic flag
1111
# __DISABLE_TRACE_MMIO__ to disable MMIO tracing for nVHE KVM.
1212
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
13+
ccflags-y += -fno-stack-protector \
14+
-DDISABLE_BRANCH_PROFILING \
15+
$(DISABLE_STACKLEAK_PLUGIN)
1316

1417
hostprogs := gen-hyprel
1518
HOST_EXTRACFLAGS += -I$(objtree)/include
@@ -89,6 +92,10 @@ quiet_cmd_hypcopy = HYPCOPY $@
8992
# Remove ftrace, Shadow Call Stack, and CFI CFLAGS.
9093
# This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations.
9194
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
95+
# Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile'
96+
# when profile optimization is applied. gen-hyprel does not support SHT_REL and
97+
# causes a build failure. Remove profile optimization flags.
98+
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS))
9299

93100
# KVM nVHE code is run at a different exception code with a different map, so
94101
# compiler instrumentation that inserts callbacks or checks into the code may

arch/arm64/kvm/mmu.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,13 @@ static phys_addr_t hyp_idmap_vector;
3131

3232
static unsigned long io_map_base;
3333

34+
static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
35+
{
36+
phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
37+
phys_addr_t boundary = ALIGN_DOWN(addr + size, size);
38+
39+
return (boundary - 1 < end - 1) ? boundary : end;
40+
}
3441

3542
/*
3643
* Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
@@ -52,7 +59,7 @@ static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
5259
if (!pgt)
5360
return -EINVAL;
5461

55-
next = stage2_pgd_addr_end(kvm, addr, end);
62+
next = stage2_range_addr_end(addr, end);
5663
ret = fn(pgt, addr, next - addr);
5764
if (ret)
5865
break;

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2149,7 +2149,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
21492149

21502150
memset(entry, 0, esz);
21512151

2152-
while (len > 0) {
2152+
while (true) {
21532153
int next_offset;
21542154
size_t byte_offset;
21552155

@@ -2162,6 +2162,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
21622162
return next_offset;
21632163

21642164
byte_offset = next_offset * esz;
2165+
if (byte_offset >= len)
2166+
break;
2167+
21652168
id += next_offset;
21662169
gpa += byte_offset;
21672170
len -= byte_offset;

arch/riscv/include/asm/cacheflush.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -42,16 +42,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
4242

4343
#endif /* CONFIG_SMP */
4444

45-
/*
46-
* The T-Head CMO errata internally probe the CBOM block size, but otherwise
47-
* don't depend on Zicbom.
48-
*/
4945
extern unsigned int riscv_cbom_block_size;
50-
#ifdef CONFIG_RISCV_ISA_ZICBOM
5146
void riscv_init_cbom_blocksize(void);
52-
#else
53-
static inline void riscv_init_cbom_blocksize(void) { }
54-
#endif
5547

5648
#ifdef CONFIG_RISCV_DMA_NONCOHERENT
5749
void riscv_noncoherent_supported(void);

arch/riscv/include/asm/kvm_vcpu_timer.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
4545
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
4646
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
4747
void kvm_riscv_guest_timer_init(struct kvm *kvm);
48+
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu);
4849
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
4950
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
5051

arch/riscv/kvm/vcpu.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -708,6 +708,9 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
708708
clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
709709
}
710710
}
711+
712+
/* Sync-up timer CSRs */
713+
kvm_riscv_vcpu_timer_sync(vcpu);
711714
}
712715

713716
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)

arch/riscv/kvm/vcpu_timer.c

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -320,20 +320,33 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
320320
kvm_riscv_vcpu_timer_unblocking(vcpu);
321321
}
322322

323-
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
323+
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
324324
{
325325
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
326326

327327
if (!t->sstc_enabled)
328328
return;
329329

330-
t = &vcpu->arch.timer;
331330
#if defined(CONFIG_32BIT)
332331
t->next_cycles = csr_read(CSR_VSTIMECMP);
333332
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
334333
#else
335334
t->next_cycles = csr_read(CSR_VSTIMECMP);
336335
#endif
336+
}
337+
338+
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
339+
{
340+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
341+
342+
if (!t->sstc_enabled)
343+
return;
344+
345+
/*
346+
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
347+
* upon every VM exit so no need to save here.
348+
*/
349+
337350
/* timer should be enabled for the remaining operations */
338351
if (unlikely(!t->init_done))
339352
return;

0 commit comments

Comments
 (0)