mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-13 10:08:19 +00:00
KVM: x86/mmu: Batch zap MMU pages when recycling oldest pages
Collect MMU pages for zapping in a loop when making MMU pages available, and skip over active roots when doing so as zapping an active root can never immediately free up a page. Batching the zapping avoids multiple remote TLB flushes and remedies the issue where the loop would bail early if an active root was encountered. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200623193542.7554-3-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
f95eec9bed
commit
6b82ef2c9c
1 changed files with 39 additions and 13 deletions
|
@ -2829,20 +2829,51 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
|
||||||
return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
|
return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
|
static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
|
||||||
|
unsigned long nr_to_zap)
|
||||||
{
|
{
|
||||||
|
unsigned long total_zapped = 0;
|
||||||
|
struct kvm_mmu_page *sp, *tmp;
|
||||||
LIST_HEAD(invalid_list);
|
LIST_HEAD(invalid_list);
|
||||||
|
bool unstable;
|
||||||
|
int nr_zapped;
|
||||||
|
|
||||||
if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
|
if (list_empty(&kvm->arch.active_mmu_pages))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
|
restart:
|
||||||
if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
|
list_for_each_entry_safe(sp, tmp, &kvm->arch.active_mmu_pages, link) {
|
||||||
|
/*
|
||||||
|
* Don't zap active root pages, the page itself can't be freed
|
||||||
|
* and zapping it will just force vCPUs to realloc and reload.
|
||||||
|
*/
|
||||||
|
if (sp->root_count)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
|
||||||
|
&nr_zapped);
|
||||||
|
total_zapped += nr_zapped;
|
||||||
|
if (total_zapped >= nr_to_zap)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
++vcpu->kvm->stat.mmu_recycled;
|
if (unstable)
|
||||||
|
goto restart;
|
||||||
}
|
}
|
||||||
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
|
|
||||||
|
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||||
|
|
||||||
|
kvm->stat.mmu_recycled += total_zapped;
|
||||||
|
return total_zapped;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
|
||||||
|
|
||||||
|
if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
|
||||||
|
|
||||||
if (!kvm_mmu_available_pages(vcpu->kvm))
|
if (!kvm_mmu_available_pages(vcpu->kvm))
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
@ -2855,17 +2886,12 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
|
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
|
||||||
{
|
{
|
||||||
LIST_HEAD(invalid_list);
|
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
|
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
|
||||||
/* Need to free some mmu pages to achieve the goal. */
|
kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
|
||||||
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
|
goal_nr_mmu_pages);
|
||||||
if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
|
|
||||||
break;
|
|
||||||
|
|
||||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
|
||||||
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
|
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue