mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-05 22:28:00 +00:00
KVM: add kvm_request_pending
A first step in vcpu->requests encapsulation. Additionally, we now use READ_ONCE() when accessing vcpu->requests, which ensures we always load vcpu->requests when it's accessed. This is important as other threads can change it any time. Also, READ_ONCE() documents that vcpu->requests is used with other threads, likely requiring memory barriers, which it does. Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> [ Documented the new use of READ_ONCE() and converted another check in arch/mips/kvm/vz.c ] Signed-off-by: Andrew Jones <drjones@redhat.com> Acked-by: Christoffer Dall <cdall@linaro.org> Signed-off-by: Christoffer Dall <cdall@linaro.org>
This commit is contained in:
parent
2387149ead
commit
2fa6e1e12a
7 changed files with 13 additions and 9 deletions
|
@ -1094,7 +1094,7 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (likely(!vcpu->requests))
|
if (likely(!kvm_request_pending(vcpu)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
|
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
|
||||||
|
|
|
@ -2337,7 +2337,7 @@ static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!vcpu->requests)
|
if (!kvm_request_pending(vcpu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
|
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
|
||||||
|
|
|
@ -687,7 +687,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
kvmppc_core_check_exceptions(vcpu);
|
kvmppc_core_check_exceptions(vcpu);
|
||||||
|
|
||||||
if (vcpu->requests) {
|
if (kvm_request_pending(vcpu)) {
|
||||||
/* Exception delivery raised request; start over */
|
/* Exception delivery raised request; start over */
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,8 +55,7 @@ EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
|
||||||
|
|
||||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
||||||
{
|
{
|
||||||
return !!(v->arch.pending_exceptions) ||
|
return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
|
||||||
v->requests;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
||||||
|
@ -108,7 +107,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
if (vcpu->requests) {
|
if (kvm_request_pending(vcpu)) {
|
||||||
/* Make sure we process requests preemptable */
|
/* Make sure we process requests preemptable */
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
trace_kvm_check_requests(vcpu);
|
trace_kvm_check_requests(vcpu);
|
||||||
|
|
|
@ -2440,7 +2440,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
retry:
|
retry:
|
||||||
kvm_s390_vcpu_request_handled(vcpu);
|
kvm_s390_vcpu_request_handled(vcpu);
|
||||||
if (!vcpu->requests)
|
if (!kvm_request_pending(vcpu))
|
||||||
return 0;
|
return 0;
|
||||||
/*
|
/*
|
||||||
* We use MMU_RELOAD just to re-arm the ipte notifier for the
|
* We use MMU_RELOAD just to re-arm the ipte notifier for the
|
||||||
|
|
|
@ -6710,7 +6710,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
bool req_immediate_exit = false;
|
bool req_immediate_exit = false;
|
||||||
|
|
||||||
if (vcpu->requests) {
|
if (kvm_request_pending(vcpu)) {
|
||||||
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
|
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
|
||||||
kvm_mmu_unload(vcpu);
|
kvm_mmu_unload(vcpu);
|
||||||
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
|
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
|
||||||
|
@ -6874,7 +6874,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||||
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|
if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
|
||||||
|| need_resched() || signal_pending(current)) {
|
|| need_resched() || signal_pending(current)) {
|
||||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
|
@ -1105,6 +1105,11 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
|
||||||
set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
|
set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return READ_ONCE(vcpu->requests);
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
|
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
|
return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
|
||||||
|
|
Loading…
Add table
Reference in a new issue