Skip to content

Commit f122dfe

Browse files
sean-jcbonzini
authored andcommitted
KVM: x86: Use __try_cmpxchg_user() to update guest PTE A/D bits
Use the recently introduced __try_cmpxchg_user() to update guest PTE A/D bits instead of mapping the PTE into kernel address space. The VM_PFNMAP path is broken as it assumes that vm_pgoff is the base pfn of the mapped VMA range, which is conceptually wrong as vm_pgoff is the offset relative to the file and has nothing to do with the pfn. The horrific hack worked for the original use case (backing guest memory with /dev/mem), but leads to accessing "random" pfns for pretty much any other VM_PFNMAP case. Fixes: bd53cb3 ("X86/KVM: Handle PFNs outside of kernel reach when touching GPTEs") Debugged-by: Tadeusz Struk <[email protected]> Tested-by: Tadeusz Struk <[email protected]> Reported-by: [email protected] Cc: [email protected] Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 989b5db commit f122dfe

File tree

1 file changed

+1
-37
lines changed

1 file changed

+1
-37
lines changed

arch/x86/kvm/mmu/paging_tmpl.h

Lines changed: 1 addition & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -144,42 +144,6 @@ static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
144144
FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
145145
}
146146

147-
static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
148-
pt_element_t __user *ptep_user, unsigned index,
149-
pt_element_t orig_pte, pt_element_t new_pte)
150-
{
151-
signed char r;
152-
153-
if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
154-
return -EFAULT;
155-
156-
#ifdef CMPXCHG
157-
asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
158-
"setnz %b[r]\n"
159-
"2:"
160-
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
161-
: [ptr] "+m" (*ptep_user),
162-
[old] "+a" (orig_pte),
163-
[r] "=q" (r)
164-
: [new] "r" (new_pte)
165-
: "memory");
166-
#else
167-
asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
168-
"setnz %b[r]\n"
169-
"2:"
170-
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
171-
: [ptr] "+m" (*ptep_user),
172-
[old] "+A" (orig_pte),
173-
[r] "=q" (r)
174-
: [new_lo] "b" ((u32)new_pte),
175-
[new_hi] "c" ((u32)(new_pte >> 32))
176-
: "memory");
177-
#endif
178-
179-
user_access_end();
180-
return r;
181-
}
182-
183147
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
184148
struct kvm_mmu_page *sp, u64 *spte,
185149
u64 gpte)
@@ -278,7 +242,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
278242
if (unlikely(!walker->pte_writable[level - 1]))
279243
continue;
280244

281-
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
245+
ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault);
282246
if (ret)
283247
return ret;
284248

0 commit comments

Comments
 (0)