mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-16 12:14:06 +00:00
KVM: x86: make KVM_REQ_NMI request iff NMI pending for vcpu
commit 6231c9e1a9f35b535c66709aa8a6eda40dbc4132 upstream.
kvm_vcpu_ioctl_x86_set_vcpu_events() routine makes 'KVM_REQ_NMI'
request for a vcpu even when its 'events->nmi.pending' is zero.
Ex:
qemu_thread_start
kvm_vcpu_thread_fn
qemu_wait_io_event
qemu_wait_io_event_common
process_queued_cpu_work
do_kvm_cpu_synchronize_post_init/_reset
kvm_arch_put_registers
kvm_put_vcpu_events (cpu, level=[2|3])
This leads vCPU threads in QEMU to constantly acquire & release the
global mutex lock, delaying the guest boot due to lock contention.
Add check to make KVM_REQ_NMI request only if vcpu has NMI pending.
Fixes: bdedff2631
("KVM: x86: Route pending NMIs from userspace through process_nmi()")
Cc: stable@vger.kernel.org
Signed-off-by: Prasad Pandit <pjp@fedoraproject.org>
Link: https://lore.kernel.org/r/20240103075343.549293-1-ppandit@redhat.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
b2479ab426
commit
eea9b2e0d2
1 changed files with 2 additions and 1 deletions
|
@ -5300,7 +5300,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
|||
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
|
||||
vcpu->arch.nmi_pending = 0;
|
||||
atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
|
||||
kvm_make_request(KVM_REQ_NMI, vcpu);
|
||||
if (events->nmi.pending)
|
||||
kvm_make_request(KVM_REQ_NMI, vcpu);
|
||||
}
|
||||
static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue