Skip to content

Commit 88178fd

Browse files
Kai Huangbonzini
authored andcommitted
KVM: x86: Add new dirty logging kvm_x86_ops for PML
This patch adds new kvm_x86_ops dirty logging hooks to enable/disable dirty logging for particular memory slot, and to flush potentially logged dirty GPAs before reporting slot->dirty_bitmap to userspace. kvm x86 common code calls these hooks when they are available so PML logic can be hidden to VMX specific. SVM won't be impacted as these hooks remain NULL there. Signed-off-by: Kai Huang <[email protected]> Reviewed-by: Xiao Guangrong <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 1c91cad commit 88178fd

File tree

3 files changed

+93
-9
lines changed

3 files changed

+93
-9
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -802,6 +802,31 @@ struct kvm_x86_ops {
802802
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
803803

804804
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
805+
806+
/*
807+
* Arch-specific dirty logging hooks. These hooks are only supposed to
808+
* be valid if the specific arch has hardware-accelerated dirty logging
809+
* mechanism. Currently only for PML on VMX.
810+
*
811+
* - slot_enable_log_dirty:
812+
* called when enabling log dirty mode for the slot.
813+
* - slot_disable_log_dirty:
814+
* called when disabling log dirty mode for the slot.
815+
* also called when slot is created with log dirty disabled.
816+
* - flush_log_dirty:
817+
* called before reporting dirty_bitmap to userspace.
818+
* - enable_log_dirty_pt_masked:
819+
* called when reenabling log dirty for the GFNs in the mask after
820+
* corresponding bits are cleared in slot->dirty_bitmap.
821+
*/
822+
void (*slot_enable_log_dirty)(struct kvm *kvm,
823+
struct kvm_memory_slot *slot);
824+
void (*slot_disable_log_dirty)(struct kvm *kvm,
825+
struct kvm_memory_slot *slot);
826+
void (*flush_log_dirty)(struct kvm *kvm);
827+
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
828+
struct kvm_memory_slot *slot,
829+
gfn_t offset, unsigned long mask);
805830
};
806831

807832
struct kvm_arch_async_pf {

arch/x86/kvm/mmu.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1335,7 +1335,11 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
13351335
struct kvm_memory_slot *slot,
13361336
gfn_t gfn_offset, unsigned long mask)
13371337
{
1338-
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1338+
if (kvm_x86_ops->enable_log_dirty_pt_masked)
1339+
kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
1340+
mask);
1341+
else
1342+
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
13391343
}
13401344

13411345
static bool rmap_write_protect(struct kvm *kvm, u64 gfn)

arch/x86/kvm/x86.c

Lines changed: 63 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3780,6 +3780,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
37803780

37813781
mutex_lock(&kvm->slots_lock);
37823782

3783+
/*
3784+
* Flush potentially hardware-cached dirty pages to dirty_bitmap.
3785+
*/
3786+
if (kvm_x86_ops->flush_log_dirty)
3787+
kvm_x86_ops->flush_log_dirty(kvm);
3788+
37833789
r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
37843790

37853791
/*
@@ -7533,6 +7539,56 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
75337539
return 0;
75347540
}
75357541

7542+
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
7543+
struct kvm_memory_slot *new)
7544+
{
7545+
/* Still write protect RO slot */
7546+
if (new->flags & KVM_MEM_READONLY) {
7547+
kvm_mmu_slot_remove_write_access(kvm, new);
7548+
return;
7549+
}
7550+
7551+
/*
7552+
* Call kvm_x86_ops dirty logging hooks when they are valid.
7553+
*
7554+
* kvm_x86_ops->slot_disable_log_dirty is called when:
7555+
*
7556+
* - KVM_MR_CREATE with dirty logging is disabled
7557+
* - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
7558+
*
7559+
* The reason is, in case of PML, we need to set D-bit for any slots
7560+
* with dirty logging disabled in order to eliminate unnecessary GPA
7561+
* logging in PML buffer (and potential PML buffer full VMEXT). This
7562+
* guarantees leaving PML enabled during guest's lifetime won't have
7563+
* any additonal overhead from PML when guest is running with dirty
7564+
* logging disabled for memory slots.
7565+
*
7566+
* kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
7567+
* to dirty logging mode.
7568+
*
7569+
* If kvm_x86_ops dirty logging hooks are invalid, use write protect.
7570+
*
7571+
* In case of write protect:
7572+
*
7573+
* Write protect all pages for dirty logging.
7574+
*
7575+
* All the sptes including the large sptes which point to this
7576+
* slot are set to readonly. We can not create any new large
7577+
* spte on this slot until the end of the logging.
7578+
*
7579+
* See the comments in fast_page_fault().
7580+
*/
7581+
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
7582+
if (kvm_x86_ops->slot_enable_log_dirty)
7583+
kvm_x86_ops->slot_enable_log_dirty(kvm, new);
7584+
else
7585+
kvm_mmu_slot_remove_write_access(kvm, new);
7586+
} else {
7587+
if (kvm_x86_ops->slot_disable_log_dirty)
7588+
kvm_x86_ops->slot_disable_log_dirty(kvm, new);
7589+
}
7590+
}
7591+
75367592
void kvm_arch_commit_memory_region(struct kvm *kvm,
75377593
struct kvm_userspace_memory_region *mem,
75387594
const struct kvm_memory_slot *old,
@@ -7562,16 +7618,15 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
75627618
new = id_to_memslot(kvm->memslots, mem->slot);
75637619

75647620
/*
7565-
* Write protect all pages for dirty logging.
7621+
* Set up write protection and/or dirty logging for the new slot.
75667622
*
7567-
* All the sptes including the large sptes which point to this
7568-
* slot are set to readonly. We can not create any new large
7569-
* spte on this slot until the end of the logging.
7570-
*
7571-
* See the comments in fast_page_fault().
7623+
* For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
7624+
* been zapped so no dirty logging staff is needed for old slot. For
7625+
* KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
7626+
* new and it's also covered when dealing with the new slot.
75727627
*/
7573-
if ((change != KVM_MR_DELETE) && (new->flags & KVM_MEM_LOG_DIRTY_PAGES))
7574-
kvm_mmu_slot_remove_write_access(kvm, new);
7628+
if (change != KVM_MR_DELETE)
7629+
kvm_mmu_slot_apply_flags(kvm, new);
75757630
}
75767631

75777632
void kvm_arch_flush_shadow_all(struct kvm *kvm)

0 commit comments

Comments
 (0)