mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-16 12:14:06 +00:00
KVM: x86/mmu: Shove refcounted page dependency into host_pfn_mapping_level()
Move the check that restricts mapping huge pages into the guest to pfns that are backed by refcounted 'struct page' memory into the helper that actually "requires" a 'struct page', host_pfn_mapping_level(). In addition to deduplicating code, moving the check to the helper eliminates the subtle requirement that the caller check that the incoming pfn is backed by a refcounted struct page, and as an added bonus avoids an extra pfn_to_page() lookup. Note, the is_error_noslot_pfn() check in kvm_mmu_hugepage_adjust() needs to stay where it is, as it guards against dereferencing a NULL memslot in the kvm_slot_dirty_track_enabled() that follows. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220429010416.2788472-11-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
b14b2690c5
commit
5d49f08c2e
2 changed files with 11 additions and 7 deletions
|
@ -2791,8 +2791,8 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
|
|||
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
|
||||
const struct kvm_memory_slot *slot)
|
||||
{
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
int level = PG_LEVEL_4K;
|
||||
struct page *page;
|
||||
unsigned long hva;
|
||||
unsigned long flags;
|
||||
pgd_t pgd;
|
||||
|
@ -2800,6 +2800,14 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
|
|||
pud_t pud;
|
||||
pmd_t pmd;
|
||||
|
||||
/*
|
||||
* Note, @slot must be non-NULL, i.e. the caller is responsible for
|
||||
* ensuring @pfn isn't garbage and is backed by a memslot.
|
||||
*/
|
||||
page = kvm_pfn_to_refcounted_page(pfn);
|
||||
if (!page)
|
||||
return PG_LEVEL_4K;
|
||||
|
||||
if (!PageCompound(page) && !kvm_is_zone_device_page(page))
|
||||
return PG_LEVEL_4K;
|
||||
|
||||
|
@ -2884,7 +2892,7 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
|||
if (unlikely(fault->max_level == PG_LEVEL_4K))
|
||||
return;
|
||||
|
||||
if (is_error_noslot_pfn(fault->pfn) || !kvm_pfn_to_refcounted_page(fault->pfn))
|
||||
if (is_error_noslot_pfn(fault->pfn))
|
||||
return;
|
||||
|
||||
if (kvm_slot_dirty_track_enabled(slot))
|
||||
|
@ -5996,7 +6004,7 @@ restart:
|
|||
* the guest, and the guest page table is using 4K page size
|
||||
* mapping if the indirect sp has level = 1.
|
||||
*/
|
||||
if (sp->role.direct && kvm_pfn_to_refcounted_page(pfn) &&
|
||||
if (sp->role.direct &&
|
||||
sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
|
||||
pfn, PG_LEVEL_NUM)) {
|
||||
pte_list_remove(kvm, rmap_head, sptep);
|
||||
|
|
|
@ -1750,10 +1750,6 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
|
|||
* be mapped at a higher level.
|
||||
*/
|
||||
pfn = spte_to_pfn(iter.old_spte);
|
||||
|
||||
if (!kvm_pfn_to_refcounted_page(pfn))
|
||||
continue;
|
||||
|
||||
max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
|
||||
iter.gfn, pfn, PG_LEVEL_NUM);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue