mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 17:11:46 +00:00
KVM: x86/mmu: Don't put invalid SPs back on the list of active pages
Delete a shadow page from the invalidation list instead of throwing it back on the list of active pages when it's a root shadow page with active users. Invalid active root pages will be explicitly freed by mmu_free_root_page() when the root_count hits zero, i.e. they don't need to be put on the active list to avoid leakage. Use sp->role.invalid to detect that a shadow page has already been zapped, i.e. is not on a list. WARN if an invalid page is encountered when zapping pages, as it should now be impossible. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200623193542.7554-2-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
fb58a9c345
commit
f95eec9bed
1 changed files with 20 additions and 8 deletions
|
@ -2748,10 +2748,23 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
|
||||||
if (!sp->root_count) {
|
if (!sp->root_count) {
|
||||||
/* Count self */
|
/* Count self */
|
||||||
(*nr_zapped)++;
|
(*nr_zapped)++;
|
||||||
list_move(&sp->link, invalid_list);
|
|
||||||
|
/*
|
||||||
|
* Already invalid pages (previously active roots) are not on
|
||||||
|
* the active page list. See list_del() in the "else" case of
|
||||||
|
* !sp->root_count.
|
||||||
|
*/
|
||||||
|
if (sp->role.invalid)
|
||||||
|
list_add(&sp->link, invalid_list);
|
||||||
|
else
|
||||||
|
list_move(&sp->link, invalid_list);
|
||||||
kvm_mod_used_mmu_pages(kvm, -1);
|
kvm_mod_used_mmu_pages(kvm, -1);
|
||||||
} else {
|
} else {
|
||||||
list_move(&sp->link, &kvm->arch.active_mmu_pages);
|
/*
|
||||||
|
* Remove the active root from the active page list, the root
|
||||||
|
* will be explicitly freed when the root_count hits zero.
|
||||||
|
*/
|
||||||
|
list_del(&sp->link);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Obsolete pages cannot be used on any vCPUs, see the comment
|
* Obsolete pages cannot be used on any vCPUs, see the comment
|
||||||
|
@ -5718,12 +5731,11 @@ restart:
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Skip invalid pages with a non-zero root count, zapping pages
|
* Invalid pages should never land back on the list of active
|
||||||
* with a non-zero root count will never succeed, i.e. the page
|
* pages. Skip the bogus page, otherwise we'll get stuck in an
|
||||||
* will get thrown back on active_mmu_pages and we'll get stuck
|
* infinite loop if the page gets put back on the list (again).
|
||||||
* in an infinite loop.
|
|
||||||
*/
|
*/
|
||||||
if (sp->role.invalid && sp->root_count)
|
if (WARN_ON(sp->role.invalid))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -6001,7 +6013,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
restart:
|
restart:
|
||||||
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
|
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
|
||||||
if (sp->role.invalid && sp->root_count)
|
if (WARN_ON(sp->role.invalid))
|
||||||
continue;
|
continue;
|
||||||
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
|
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
|
||||||
goto restart;
|
goto restart;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue