mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-06 22:58:29 +00:00
KVM: MMU: mark page dirty in make_spte
This simplifies set_spte, which we want to remove, and unifies code between the shadow MMU and the TDP MMU. The warning will be added back later to make_spte as well. There is a small disadvantage in the TDP MMU; it may unnecessarily mark a page as dirty twice if two vCPUs end up mapping the same page twice. However, this is a very small cost for a case that is already rare. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
68be1306ca
commit
bcc4f2bc50
3 changed files with 4 additions and 23 deletions
|
@ -2688,9 +2688,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
|
||||
can_unsync, host_writable, sp_ad_disabled(sp), &spte);
|
||||
|
||||
if (spte & PT_WRITABLE_MASK)
|
||||
kvm_vcpu_mark_page_dirty(vcpu, gfn);
|
||||
|
||||
if (*sptep == spte)
|
||||
ret |= SET_SPTE_SPURIOUS;
|
||||
else if (mmu_spte_update(sptep, spte))
|
||||
|
|
|
@ -179,6 +179,9 @@ out:
|
|||
"spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
|
||||
get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
|
||||
|
||||
if (spte & PT_WRITABLE_MASK)
|
||||
kvm_vcpu_mark_page_dirty(vcpu, gfn);
|
||||
|
||||
*new_spte = spte;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -542,26 +542,7 @@ static inline bool tdp_mmu_map_set_spte_atomic(struct kvm_vcpu *vcpu,
|
|||
struct tdp_iter *iter,
|
||||
u64 new_spte)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Use kvm_vcpu_gfn_to_memslot() instead of going through
|
||||
* handle_changed_spte_dirty_log() to leverage vcpu->last_used_slot.
|
||||
*/
|
||||
if (is_writable_pte(new_spte)) {
|
||||
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, iter->gfn);
|
||||
|
||||
if (slot && kvm_slot_dirty_track_enabled(slot)) {
|
||||
/* Enforced by kvm_mmu_hugepage_adjust. */
|
||||
WARN_ON_ONCE(iter->level > PG_LEVEL_4K);
|
||||
mark_page_dirty_in_slot(kvm, slot, iter->gfn);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, iter, new_spte);
|
||||
}
|
||||
|
||||
static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
|
||||
|
|
Loading…
Add table
Reference in a new issue