diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index aa7f6aee1d69..a6b02f6241a7 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1319,11 +1319,12 @@ void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) svm->asid_generation = 0; /* - * Workaround: we don't yet track the physical CPU that - * target_vmcb has run on. + * Track the physical CPU the target_vmcb is running on + * in order to mark the VMCB dirty if the cpu changes at + * its next vmrun. */ - vmcb_mark_all_dirty(svm->vmcb); + svm->current_vmcb->cpu = svm->vcpu.cpu; } static int svm_create_vcpu(struct kvm_vcpu *vcpu) @@ -1499,11 +1500,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) struct vcpu_svm *svm = to_svm(vcpu); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); - if (unlikely(cpu != vcpu->cpu)) { - svm->asid_generation = 0; - vmcb_mark_all_dirty(svm->vmcb); - } - if (sd->current_vmcb != svm->vmcb) { sd->current_vmcb = svm->vmcb; indirect_branch_prediction_barrier(); @@ -3435,6 +3431,17 @@ static void pre_svm_run(struct vcpu_svm *svm) { struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu); + /* + * If the previous vmrun of the vmcb occurred on + * a different physical cpu then we must mark the vmcb dirty. + */ + + if (unlikely(svm->current_vmcb->cpu != svm->vcpu.cpu)) { + svm->asid_generation = 0; + vmcb_mark_all_dirty(svm->vmcb); + svm->current_vmcb->cpu = svm->vcpu.cpu; + } + if (sev_guest(svm->vcpu.kvm)) return pre_sev_run(svm, svm->vcpu.cpu); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 818b37388d8c..a37281097751 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -84,6 +84,7 @@ struct kvm_vcpu; struct kvm_vmcb_info { struct vmcb *ptr; unsigned long pa; + int cpu; }; struct svm_nested_state {