@@ -3780,6 +3780,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
37803780
37813781 mutex_lock (& kvm -> slots_lock );
37823782
3783+ /*
3784+ * Flush potentially hardware-cached dirty pages to dirty_bitmap.
3785+ */
3786+ if (kvm_x86_ops -> flush_log_dirty )
3787+ kvm_x86_ops -> flush_log_dirty (kvm );
3788+
37833789 r = kvm_get_dirty_log_protect (kvm , log , & is_dirty );
37843790
37853791 /*
@@ -7533,6 +7539,56 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
75337539 return 0 ;
75347540}
75357541
7542+ static void kvm_mmu_slot_apply_flags (struct kvm * kvm ,
7543+ struct kvm_memory_slot * new )
7544+ {
7545+ /* Still write protect RO slot */
7546+ if (new -> flags & KVM_MEM_READONLY ) {
7547+ kvm_mmu_slot_remove_write_access (kvm , new );
7548+ return ;
7549+ }
7550+
7551+ /*
7552+ * Call kvm_x86_ops dirty logging hooks when they are valid.
7553+ *
7554+ * kvm_x86_ops->slot_disable_log_dirty is called when:
7555+ *
7556+ * - KVM_MR_CREATE with dirty logging is disabled
7557+ * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
7558+ *
7559+ * The reason is, in case of PML, we need to set D-bit for any slots
7560+ * with dirty logging disabled in order to eliminate unnecessary GPA
7561+ * logging in PML buffer (and potential PML buffer full VMEXT). This
7562+ * guarantees leaving PML enabled during guest's lifetime won't have
7563+ * any additonal overhead from PML when guest is running with dirty
7564+ * logging disabled for memory slots.
7565+ *
7566+ * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
7567+ * to dirty logging mode.
7568+ *
7569+ * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
7570+ *
7571+ * In case of write protect:
7572+ *
7573+ * Write protect all pages for dirty logging.
7574+ *
7575+ * All the sptes including the large sptes which point to this
7576+ * slot are set to readonly. We can not create any new large
7577+ * spte on this slot until the end of the logging.
7578+ *
7579+ * See the comments in fast_page_fault().
7580+ */
7581+ if (new -> flags & KVM_MEM_LOG_DIRTY_PAGES ) {
7582+ if (kvm_x86_ops -> slot_enable_log_dirty )
7583+ kvm_x86_ops -> slot_enable_log_dirty (kvm , new );
7584+ else
7585+ kvm_mmu_slot_remove_write_access (kvm , new );
7586+ } else {
7587+ if (kvm_x86_ops -> slot_disable_log_dirty )
7588+ kvm_x86_ops -> slot_disable_log_dirty (kvm , new );
7589+ }
7590+ }
7591+
75367592void kvm_arch_commit_memory_region (struct kvm * kvm ,
75377593 struct kvm_userspace_memory_region * mem ,
75387594 const struct kvm_memory_slot * old ,
@@ -7562,16 +7618,15 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
75627618 new = id_to_memslot (kvm -> memslots , mem -> slot );
75637619
75647620 /*
7565- * Write protect all pages for dirty logging.
7621+ * Set up write protection and/or dirty logging for the new slot .
75667622 *
7567- * All the sptes including the large sptes which point to this
7568- * slot are set to readonly. We can not create any new large
7569- * spte on this slot until the end of the logging.
7570- *
7571- * See the comments in fast_page_fault().
7623+ * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
7624+ * been zapped so no dirty logging staff is needed for old slot. For
7625+ * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
7626+ * new and it's also covered when dealing with the new slot.
75727627 */
7573- if (( change != KVM_MR_DELETE ) && ( new -> flags & KVM_MEM_LOG_DIRTY_PAGES ) )
7574- kvm_mmu_slot_remove_write_access (kvm , new );
7628+ if (change != KVM_MR_DELETE )
7629+ kvm_mmu_slot_apply_flags (kvm , new );
75757630}
75767631
75777632void kvm_arch_flush_shadow_all (struct kvm * kvm )
0 commit comments