Skip to content

Commit 24625f7

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "While last week's pull request contained miscellaneous fixes for x86, this one covers other architectures, selftests changes, and a bigger series for APIC virtualization bugs that were discovered during 5.20 development. The idea is to base 5.20 development for KVM on top of this tag. ARM64: - Properly reset the SVE/SME flags on vcpu load - Fix a vgic-v2 regression regarding accessing the pending state of a HW interrupt from userspace (and make the code common with vgic-v3) - Fix access to the idreg range for protected guests - Ignore 'kvm-arm.mode=protected' when using VHE - Return an error from kvm_arch_init_vm() on allocation failure - A bunch of small cleanups (comments, annotations, indentation) RISC-V: - Typo fix in arch/riscv/kvm/vmid.c - Remove broken reference pattern from MAINTAINERS entry x86-64: - Fix error in page tables with MKTME enabled - Dirty page tracking performance test extended to running a nested guest - Disable APICv/AVIC in cases that it cannot implement correctly" [ This merge also fixes a misplaced end parenthesis bug introduced in commit 3743c2f ("KVM: x86: inhibit APICv/AVIC on changes to APIC ID or APIC base") pointed out by Sean Christopherson ] Link: https://lore.kernel.org/all/[email protected]/ * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (34 commits) KVM: selftests: Restrict test region to 48-bit physical addresses when using nested KVM: selftests: Add option to run dirty_log_perf_test vCPUs in L2 KVM: selftests: Clean up LIBKVM files in Makefile KVM: selftests: Link selftests directly with lib object files KVM: selftests: Drop unnecessary rule for STATIC_LIBS KVM: selftests: Add a helper to check EPT/VPID capabilities KVM: selftests: Move VMX_EPT_VPID_CAP_AD_BITS to vmx.h KVM: selftests: Refactor nested_map() to specify target level KVM: selftests: Drop stale function parameter comment for nested_map() KVM: selftests: Add option to create 2M and 1G EPT mappings KVM: selftests: Replace x86_page_size with PG_LEVEL_XX KVM: x86: SVM: fix nested PAUSE filtering when L0 intercepts PAUSE KVM: x86: SVM: drop preempt-safe wrappers for avic_vcpu_load/put KVM: x86: disable preemption around the call to kvm_arch_vcpu_{un|}blocking KVM: x86: disable preemption while updating apicv inhibition KVM: x86: SVM: fix avic_kick_target_vcpus_fast KVM: x86: SVM: remove avic's broken code that updated APIC ID KVM: x86: inhibit APICv/AVIC on changes to APIC ID or APIC base KVM: x86: document AVIC/APICv inhibit reasons KVM: x86/mmu: Set memory encryption "value", not "mask", in shadow PDPTRs ...
2 parents 8e8afaf + e0f3f46 commit 24625f7

File tree

37 files changed

+640
-312
lines changed

37 files changed

+640
-312
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2469,7 +2469,6 @@
24692469

24702470
protected: nVHE-based mode with support for guests whose
24712471
state is kept private from the host.
2472-
Not valid if the kernel is running in EL2.
24732472

24742473
Defaults to VHE/nVHE based on hardware support. Setting
24752474
mode to "protected" will disable kexec and hibernation

MAINTAINERS

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10872,7 +10872,6 @@ F: arch/riscv/include/asm/kvm*
1087210872
F: arch/riscv/include/uapi/asm/kvm*
1087310873
F: arch/riscv/kvm/
1087410874
F: tools/testing/selftests/kvm/*/riscv/
10875-
F: tools/testing/selftests/kvm/riscv/
1087610875

1087710876
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
1087810877
M: Christian Borntraeger <[email protected]>

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -362,11 +362,6 @@ struct kvm_vcpu_arch {
362362
struct arch_timer_cpu timer_cpu;
363363
struct kvm_pmu pmu;
364364

365-
/*
366-
* Anything that is not used directly from assembly code goes
367-
* here.
368-
*/
369-
370365
/*
371366
* Guest registers we preserve during guest debugging.
372367
*

arch/arm64/include/asm/virt.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,9 @@ static __always_inline bool has_vhe(void)
113113
/*
114114
* Code only run in VHE/NVHE hyp context can assume VHE is present or
115115
* absent. Otherwise fall back to caps.
116+
* This allows the compiler to discard VHE-specific code from the
117+
* nVHE object, reducing the number of external symbol references
118+
* needed to link.
116119
*/
117120
if (is_vhe_hyp_code())
118121
return true;

arch/arm64/kernel/cpufeature.c

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1974,15 +1974,7 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
19741974
#ifdef CONFIG_KVM
19751975
static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
19761976
{
1977-
if (kvm_get_mode() != KVM_MODE_PROTECTED)
1978-
return false;
1979-
1980-
if (is_kernel_in_hyp_mode()) {
1981-
pr_warn("Protected KVM not available with VHE\n");
1982-
return false;
1983-
}
1984-
1985-
return true;
1977+
return kvm_get_mode() == KVM_MODE_PROTECTED;
19861978
}
19871979
#endif /* CONFIG_KVM */
19881980

arch/arm64/kvm/arch_timer.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1230,6 +1230,9 @@ bool kvm_arch_timer_get_input_level(int vintid)
12301230
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
12311231
struct arch_timer_context *timer;
12321232

1233+
if (WARN(!vcpu, "No vcpu context!\n"))
1234+
return false;
1235+
12331236
if (vintid == vcpu_vtimer(vcpu)->irq.irq)
12341237
timer = vcpu_vtimer(vcpu);
12351238
else if (vintid == vcpu_ptimer(vcpu)->irq.irq)

arch/arm64/kvm/arm.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -150,8 +150,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
150150
if (ret)
151151
goto out_free_stage2_pgd;
152152

153-
if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL))
153+
if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) {
154+
ret = -ENOMEM;
154155
goto out_free_stage2_pgd;
156+
}
155157
cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
156158

157159
kvm_vgic_early_init(kvm);
@@ -2271,7 +2273,11 @@ static int __init early_kvm_mode_cfg(char *arg)
22712273
return -EINVAL;
22722274

22732275
if (strcmp(arg, "protected") == 0) {
2274-
kvm_mode = KVM_MODE_PROTECTED;
2276+
if (!is_kernel_in_hyp_mode())
2277+
kvm_mode = KVM_MODE_PROTECTED;
2278+
else
2279+
pr_warn_once("Protected KVM not available with VHE\n");
2280+
22752281
return 0;
22762282
}
22772283

arch/arm64/kvm/fpsimd.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
8080
vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED;
8181
vcpu->arch.flags |= KVM_ARM64_FP_HOST;
8282

83+
vcpu->arch.flags &= ~KVM_ARM64_HOST_SVE_ENABLED;
8384
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
8485
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
8586

@@ -93,6 +94,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
9394
* operations. Do this for ZA as well for now for simplicity.
9495
*/
9596
if (system_supports_sme()) {
97+
vcpu->arch.flags &= ~KVM_ARM64_HOST_SME_ENABLED;
9698
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
9799
vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED;
98100

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -314,15 +314,11 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
314314
int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
315315
enum kvm_pgtable_prot prot)
316316
{
317-
hyp_assert_lock_held(&host_kvm.lock);
318-
319317
return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
320318
}
321319

322320
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
323321
{
324-
hyp_assert_lock_held(&host_kvm.lock);
325-
326322
return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
327323
addr, size, &host_s2_pool, owner_id);
328324
}

arch/arm64/kvm/hyp/nvhe/sys_regs.c

Lines changed: 34 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -243,15 +243,9 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
243243
case SYS_ID_AA64MMFR2_EL1:
244244
return get_pvm_id_aa64mmfr2(vcpu);
245245
default:
246-
/*
247-
* Should never happen because all cases are covered in
248-
* pvm_sys_reg_descs[].
249-
*/
250-
WARN_ON(1);
251-
break;
246+
/* Unhandled ID register, RAZ */
247+
return 0;
252248
}
253-
254-
return 0;
255249
}
256250

257251
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
@@ -332,6 +326,16 @@ static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
332326
/* Mark the specified system register as an AArch64 feature id register. */
333327
#define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
334328

329+
/*
330+
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID
331+
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
332+
* (1 <= crm < 8, 0 <= Op2 < 8).
333+
*/
334+
#define ID_UNALLOCATED(crm, op2) { \
335+
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
336+
.access = pvm_access_id_aarch64, \
337+
}
338+
335339
/* Mark the specified system register as Read-As-Zero/Write-Ignored */
336340
#define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
337341

@@ -375,24 +379,46 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
375379
AARCH32(SYS_MVFR0_EL1),
376380
AARCH32(SYS_MVFR1_EL1),
377381
AARCH32(SYS_MVFR2_EL1),
382+
ID_UNALLOCATED(3,3),
378383
AARCH32(SYS_ID_PFR2_EL1),
379384
AARCH32(SYS_ID_DFR1_EL1),
380385
AARCH32(SYS_ID_MMFR5_EL1),
386+
ID_UNALLOCATED(3,7),
381387

382388
/* AArch64 ID registers */
383389
/* CRm=4 */
384390
AARCH64(SYS_ID_AA64PFR0_EL1),
385391
AARCH64(SYS_ID_AA64PFR1_EL1),
392+
ID_UNALLOCATED(4,2),
393+
ID_UNALLOCATED(4,3),
386394
AARCH64(SYS_ID_AA64ZFR0_EL1),
395+
ID_UNALLOCATED(4,5),
396+
ID_UNALLOCATED(4,6),
397+
ID_UNALLOCATED(4,7),
387398
AARCH64(SYS_ID_AA64DFR0_EL1),
388399
AARCH64(SYS_ID_AA64DFR1_EL1),
400+
ID_UNALLOCATED(5,2),
401+
ID_UNALLOCATED(5,3),
389402
AARCH64(SYS_ID_AA64AFR0_EL1),
390403
AARCH64(SYS_ID_AA64AFR1_EL1),
404+
ID_UNALLOCATED(5,6),
405+
ID_UNALLOCATED(5,7),
391406
AARCH64(SYS_ID_AA64ISAR0_EL1),
392407
AARCH64(SYS_ID_AA64ISAR1_EL1),
408+
AARCH64(SYS_ID_AA64ISAR2_EL1),
409+
ID_UNALLOCATED(6,3),
410+
ID_UNALLOCATED(6,4),
411+
ID_UNALLOCATED(6,5),
412+
ID_UNALLOCATED(6,6),
413+
ID_UNALLOCATED(6,7),
393414
AARCH64(SYS_ID_AA64MMFR0_EL1),
394415
AARCH64(SYS_ID_AA64MMFR1_EL1),
395416
AARCH64(SYS_ID_AA64MMFR2_EL1),
417+
ID_UNALLOCATED(7,3),
418+
ID_UNALLOCATED(7,4),
419+
ID_UNALLOCATED(7,5),
420+
ID_UNALLOCATED(7,6),
421+
ID_UNALLOCATED(7,7),
396422

397423
/* Scalable Vector Registers are restricted. */
398424

0 commit comments

Comments
 (0)