From e834df6cfc71d8e5ce2c27a0184145ea125c3f0f Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 15 Mar 2021 03:00:44 -0500 Subject: [PATCH 1/3] powerpc/pseries/mobility: use struct for shared state The atomic_t counter is the only shared state for the join/suspend sequence so far, but that will change. Contain it in a struct (pseries_suspend_info), and document its intended use. No functional change. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210315080045.460331-2-nathanl@linux.ibm.com --- arch/powerpc/platforms/pseries/mobility.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index ea4d6a660e0d..a6739ce9feac 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -452,9 +452,21 @@ static int do_suspend(void) return ret; } +/** + * struct pseries_suspend_info - State shared between CPUs for join/suspend. + * @counter: Threads are to increment this upon resuming from suspend + * or if an error is received from H_JOIN. The thread which performs + * the first increment (i.e. sets it to 1) is responsible for + * waking the other threads. + */ +struct pseries_suspend_info { + atomic_t counter; +}; + static int do_join(void *arg) { - atomic_t *counter = arg; + struct pseries_suspend_info *info = arg; + atomic_t *counter = &info->counter; long hvrc; int ret; @@ -535,11 +547,15 @@ static int pseries_suspend(u64 handle) int ret; while (true) { - atomic_t counter = ATOMIC_INIT(0); + struct pseries_suspend_info info; unsigned long vasi_state; int vasi_err; - ret = stop_machine(do_join, &counter, cpu_online_mask); + info = (struct pseries_suspend_info) { + .counter = ATOMIC_INIT(0), + }; + + ret = stop_machine(do_join, &info, cpu_online_mask); if (ret == 0) break; /* From 274cb1ca2e7ce02cab56f5f4c61a74aeb566f931 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 15 Mar 2021 03:00:45 -0500 Subject: [PATCH 2/3] powerpc/pseries/mobility: handle premature return from H_JOIN The pseries join/suspend sequence in its current form was written with the assumption that it was the only user of H_PROD and that it needn't handle spurious successful returns from H_JOIN. That's wrong; powerpc's paravirt spinlock code uses H_PROD, and CPUs entering do_join() can be woken prematurely from H_JOIN with a status of H_SUCCESS as a result. This causes all CPUs to exit the sequence early, preventing suspend from occurring at all. Add a 'done' boolean flag to the pseries_suspend_info struct, and have the waking thread set it before waking the other threads. Threads which receive H_SUCCESS from H_JOIN retry if the 'done' flag is still unset. Fixes: 9327dc0aeef3 ("powerpc/pseries/mobility: use stop_machine for join/suspend") Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210315080045.460331-3-nathanl@linux.ibm.com --- arch/powerpc/platforms/pseries/mobility.c | 26 ++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index a6739ce9feac..e83e0891272d 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -458,9 +458,12 @@ static int do_suspend(void) * or if an error is received from H_JOIN. The thread which performs * the first increment (i.e. sets it to 1) is responsible for * waking the other threads. + * @done: False if join/suspend is in progress. True if the operation is + * complete (successful or not). */ struct pseries_suspend_info { atomic_t counter; + bool done; }; static int do_join(void *arg) @@ -470,6 +473,7 @@ static int do_join(void *arg) long hvrc; int ret; +retry: /* Must ensure MSR.EE off for H_JOIN. */ hard_irq_disable(); hvrc = plpar_hcall_norets(H_JOIN); @@ -485,8 +489,20 @@ static int do_join(void *arg) case H_SUCCESS: /* * The suspend is complete and this cpu has received a - * prod. + * prod, or we've received a stray prod from unrelated + * code (e.g. paravirt spinlocks) and we need to join + * again. + * + * This barrier orders the return from H_JOIN above vs + * the load of info->done. It pairs with the barrier + * in the wakeup/prod path below. */ + smp_mb(); + if (READ_ONCE(info->done) == false) { + pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying", + smp_processor_id()); + goto retry; + } ret = 0; break; case H_BAD_MODE: @@ -500,6 +516,13 @@ static int do_join(void *arg) if (atomic_inc_return(counter) == 1) { pr_info("CPU %u waking all threads\n", smp_processor_id()); + WRITE_ONCE(info->done, true); + /* + * This barrier orders the store to info->done vs subsequent + * H_PRODs to wake the other CPUs. It pairs with the barrier + * in the H_SUCCESS case above. + */ + smp_mb(); prod_others(); } /* @@ -553,6 +576,7 @@ static int pseries_suspend(u64 handle) info = (struct pseries_suspend_info) { .counter = ATOMIC_INIT(0), + .done = false, }; ret = stop_machine(do_join, &info, cpu_online_mask); From 53f1d31708f6240e4615b0927df31f182e389e2f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Fri, 26 Mar 2021 12:37:55 +0530 Subject: [PATCH 3/3] powerpc/mm/book3s64: Use the correct storage key value when calling H_PROTECT H_PROTECT expects the flag value to include flags: AVPN, pp0, pp1, pp2, key0-key4, Noexec, CMO Option flags This patch updates hpte_updatepp() to fetch the storage key value from the linux page table and use the same in H_PROTECT hcall. native_hpte_updatepp() is not updated because the kernel doesn't clear the existing storage key value there. The kernel also doesn't use hpte_updatepp() callback for updating storage keys. This fixes the below kernel crash observed with KUAP enabled. BUG: Unable to handle kernel data access on write at 0xc009fffffc440000 Faulting instruction address: 0xc0000000000b7030 Key fault AMR: 0xfcffffffffffffff IAMR: 0xc0000077bc498100 Found HPTE: v = 0x40070adbb6fffc05 r = 0x1ffffffffff1194 Oops: Kernel access of bad area, sig: 11 [#1] LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries ... CFAR: c000000000010100 DAR: c009fffffc440000 DSISR: 02200000 IRQMASK: 0 ... NIP memset+0x68/0x104 LR pcpu_alloc+0x54c/0xb50 Call Trace: pcpu_alloc+0x55c/0xb50 (unreliable) blk_stat_alloc_callback+0x94/0x150 blk_mq_init_allocated_queue+0x64/0x560 blk_mq_init_queue+0x54/0xb0 scsi_mq_alloc_queue+0x30/0xa0 scsi_alloc_sdev+0x1cc/0x300 scsi_probe_and_add_lun+0xb50/0x1020 __scsi_scan_target+0x17c/0x790 scsi_scan_channel+0x90/0xe0 scsi_scan_host_selected+0x148/0x1f0 do_scan_async+0x2c/0x2a0 async_run_entry_fn+0x78/0x220 process_one_work+0x264/0x540 worker_thread+0xa8/0x600 kthread+0x190/0x1a0 ret_from_kernel_thread+0x5c/0x6c With KUAP enabled the kernel uses storage key 3 for all its translations. But as shown by the debug print, in this specific case we have the hash page table entry created with key value 0. Found HPTE: v = 0x40070adbb6fffc05 r = 0x1ffffffffff1194 and DSISR indicates a key fault. This can happen due to parallel fault on the same EA by different CPUs: CPU 0 CPU 1 fault on X H_PAGE_BUSY set fault on X finish fault handling and clear H_PAGE_BUSY check for H_PAGE_BUSY continue with fault handling. This implies CPU1 will end up calling hpte_updatepp for address X and the kernel updated the hash pte entry with key 0 Fixes: d94b827e89dc ("powerpc/book3s64/kuap: Use Key 3 for kernel mapping with hash translation") Reported-by: Murilo Opsfelder Araujo Signed-off-by: Aneesh Kumar K.V Debugged-by: Michael Ellerman Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210326070755.304625-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/platforms/pseries/lpar.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 764170fdb0f7..3805519a6469 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, want_v = hpte_encode_avpn(vpn, psize, ssize); - flags = (newpp & 7) | H_AVPN; + flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN; + flags |= (newpp & HPTE_R_KEY_HI) >> 48; if (mmu_has_feature(MMU_FTR_KERNEL_RO)) /* Move pp0 into bit 8 (IBM 55) */ flags |= (newpp & HPTE_R_PP0) >> 55;