From e221804dad4e6fe3a0cf192ba3c42cd2f328bdac Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 16 Aug 2023 12:21:22 +1000 Subject: [PATCH 1/2] x86/sev: Do not handle #VC for DR7 read/write With MSR_AMD64_SEV_DEBUG_SWAP enabled, the guest is not expected to receive a #VC for reads or writes of DR7. Update the SNP_FEATURES_PRESENT mask with MSR_AMD64_SNP_DEBUG_SWAP so an SNP guest doesn't gracefully terminate during SNP feature negotiation if MSR_AMD64_SEV_DEBUG_SWAP is enabled. Since a guest is not expected to receive a #VC on DR7 accesses when MSR_AMD64_SEV_DEBUG_SWAP is enabled, return an error from the #VC handler in this situation. Signed-off-by: Alexey Kardashevskiy Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Carlos Bilbao Reviewed-by: Tom Lendacky Reviewed-by: Pankaj Gupta Link: https://lore.kernel.org/r/20230816022122.981998-1-aik@amd.com --- arch/x86/boot/compressed/sev.c | 2 +- arch/x86/kernel/sev.c | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c index c3e343bd4760..e83e71089cf1 100644 --- a/arch/x86/boot/compressed/sev.c +++ b/arch/x86/boot/compressed/sev.c @@ -365,7 +365,7 @@ static void enforce_vmpl0(void) * by the guest kernel. As and when a new feature is implemented in the * guest kernel, a corresponding bit should be added to the mask. */ -#define SNP_FEATURES_PRESENT (0) +#define SNP_FEATURES_PRESENT MSR_AMD64_SNP_DEBUG_SWAP void snp_check_features(void) { diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 1ee7bed453de..d380c9399480 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -1575,6 +1575,9 @@ static enum es_result vc_handle_dr7_write(struct ghcb *ghcb, long val, *reg = vc_insn_get_rm(ctxt); enum es_result ret; + if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) + return ES_VMM_ERROR; + if (!reg) return ES_DECODE_FAILED; @@ -1612,6 +1615,9 @@ static enum es_result vc_handle_dr7_read(struct ghcb *ghcb, struct sev_es_runtime_data *data = this_cpu_read(runtime_data); long *reg = vc_insn_get_rm(ctxt); + if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) + return ES_VMM_ERROR; + if (!reg) return ES_DECODE_FAILED; From ac3f9c9f1b37edaa7d1a9b908bc79d843955a1a2 Mon Sep 17 00:00:00 2001 From: Steve Rutherford Date: Thu, 24 Aug 2023 15:37:31 -0700 Subject: [PATCH 2/2] x86/sev: Make enc_dec_hypercall() accept a size instead of npages enc_dec_hypercall() accepted a page count instead of a size, which forced its callers to round up. As a result, non-page aligned vaddrs caused pages to be spuriously marked as decrypted via the encryption status hypercall, which in turn caused consistent corruption of pages during live migration. Live migration requires accurate encryption status information to avoid migrating pages from the wrong perspective. Fixes: 064ce6c550a0 ("mm: x86: Invoke hypercall when page encryption status is changed") Signed-off-by: Steve Rutherford Signed-off-by: Ingo Molnar Reviewed-by: Tom Lendacky Reviewed-by: Pankaj Gupta Tested-by: Ben Hillier Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230824223731.2055016-1-srutherford@google.com --- arch/x86/include/asm/mem_encrypt.h | 6 +++--- arch/x86/kernel/kvm.c | 4 +--- arch/x86/mm/mem_encrypt_amd.c | 13 ++++++------- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 7f97a8a97e24..473b16d73b47 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -50,8 +50,8 @@ void __init sme_enable(struct boot_params *bp); int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size); int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, - bool enc); +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, + unsigned long size, bool enc); void __init mem_encrypt_free_decrypted_mem(void); @@ -85,7 +85,7 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; static inline int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } static inline void __init -early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {} +early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {} static inline void mem_encrypt_free_decrypted_mem(void) { } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 1cceac5984da..526d4da3dcd4 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -966,10 +966,8 @@ static void __init kvm_init_platform(void) * Ensure that _bss_decrypted section is marked as decrypted in the * shared pages list. */ - nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted, - PAGE_SIZE); early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted, - nr_pages, 0); + __end_bss_decrypted - __start_bss_decrypted, 0); /* * If not booted using EFI, enable Live migration support. diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 54bbd5163e8d..6faea41e99b6 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -288,11 +288,10 @@ static bool amd_enc_cache_flush_required(void) return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT); } -static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) +static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) { #ifdef CONFIG_PARAVIRT - unsigned long sz = npages << PAGE_SHIFT; - unsigned long vaddr_end = vaddr + sz; + unsigned long vaddr_end = vaddr + size; while (vaddr < vaddr_end) { int psize, pmask, level; @@ -342,7 +341,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e snp_set_memory_private(vaddr, npages); if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) - enc_dec_hypercall(vaddr, npages, enc); + enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); return true; } @@ -466,7 +465,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, ret = 0; - early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc); + early_set_mem_enc_dec_hypercall(start, size, enc); out: __flush_tlb_all(); return ret; @@ -482,9 +481,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) return early_set_memory_enc_dec(vaddr, size, true); } -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) { - enc_dec_hypercall(vaddr, npages, enc); + enc_dec_hypercall(vaddr, size, enc); } void __init sme_early_init(void)