mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 15:11:16 +00:00
KVM: x86/mmu: Add helper to generate mask of reserved HPA bits
Add a helper to generate the mask of reserved PA bits in the host. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210204000117.3303214-10-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
5b7f575ccd
commit
6f8e65a601
1 changed files with 9 additions and 5 deletions
|
@ -4123,6 +4123,11 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
|
||||||
vcpu->arch.reserved_gpa_bits, execonly);
|
vcpu->arch.reserved_gpa_bits, execonly);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u64 reserved_hpa_bits(void)
|
||||||
|
{
|
||||||
|
return rsvd_bits(shadow_phys_bits, 63);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* the page table on host is the shadow page table for the page
|
* the page table on host is the shadow page table for the page
|
||||||
* table in guest or amd nested guest, its mmu features completely
|
* table in guest or amd nested guest, its mmu features completely
|
||||||
|
@ -4142,7 +4147,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
|
||||||
*/
|
*/
|
||||||
shadow_zero_check = &context->shadow_zero_check;
|
shadow_zero_check = &context->shadow_zero_check;
|
||||||
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
|
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
|
||||||
rsvd_bits(shadow_phys_bits, 63),
|
reserved_hpa_bits(),
|
||||||
context->shadow_root_level, uses_nx,
|
context->shadow_root_level, uses_nx,
|
||||||
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
|
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
|
||||||
is_pse(vcpu), true);
|
is_pse(vcpu), true);
|
||||||
|
@ -4179,14 +4184,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
if (boot_cpu_is_amd())
|
if (boot_cpu_is_amd())
|
||||||
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
|
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
|
||||||
rsvd_bits(shadow_phys_bits, 63),
|
reserved_hpa_bits(),
|
||||||
context->shadow_root_level, false,
|
context->shadow_root_level, false,
|
||||||
boot_cpu_has(X86_FEATURE_GBPAGES),
|
boot_cpu_has(X86_FEATURE_GBPAGES),
|
||||||
true, true);
|
true, true);
|
||||||
else
|
else
|
||||||
__reset_rsvds_bits_mask_ept(shadow_zero_check,
|
__reset_rsvds_bits_mask_ept(shadow_zero_check,
|
||||||
rsvd_bits(shadow_phys_bits, 63),
|
reserved_hpa_bits(), false);
|
||||||
false);
|
|
||||||
|
|
||||||
if (!shadow_me_mask)
|
if (!shadow_me_mask)
|
||||||
return;
|
return;
|
||||||
|
@ -4206,7 +4210,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_mmu *context, bool execonly)
|
struct kvm_mmu *context, bool execonly)
|
||||||
{
|
{
|
||||||
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
|
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
|
||||||
rsvd_bits(shadow_phys_bits, 63), execonly);
|
reserved_hpa_bits(), execonly);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BYTE_MASK(access) \
|
#define BYTE_MASK(access) \
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue