From a17b9e4c9c5e9c4da4385908af0377af11529266 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Tue, 18 Oct 2016 13:42:09 +0800 Subject: [PATCH 01/39] clk: sunxi-ng: sun6i-a31: Force AHB1 clock to use PLL6 as parent On the A31, the DMA engine only works if AHB1 is clocked from PLL6. In addition, the hstimer is clocked from AHB1, and if AHB1 is clocked from the CPU clock, and cpufreq is working, we get an unstable timer. Force the AHB1 clock to use PLL6 as its parent. Previously this was done in the device tree with the assigned-clocks and assigned-clocks-parent bindings. However with this new monolithic driver, the system critical clocks aren't exported through the device tree. The alternative is to force this setting in the driver before the clocks are registered. This is also done in newer versions of mainline U-boot. But people still using an older version, or even the vendor version, can still hit this issue. Hence the need to do it in the kernel as well. Reported-by: Hans de Goede Reported-by: Maxime Ripard Fixes: c6e6c96d8fa6 ("clk: sunxi-ng: Add A31/A31s clocks") Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/ccu-sun6i-a31.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 79596463e0d9..4a82a49cff5e 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = { static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu", 0x050, 0, 3, axi_div_table, 0); +#define SUN6I_A31_AHB1_REG 0x054 + static const char * const ahb1_parents[] = { "osc32k", "osc24M", "axi", "pll-periph" }; @@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node) val &= BIT(16); writel(val, reg + SUN6I_A31_PLL_MIPI_REG); + /* Force AHB1 to PLL6 / 3 */ + val = readl(reg + SUN6I_A31_AHB1_REG); + /* set PLL6 pre-div = 3 */ + val &= ~GENMASK(7, 6); + val |= 0x2 << 6; + /* select PLL6 / pre-div */ + val &= ~GENMASK(13, 12); + val |= 0x3 << 12; + writel(val, reg + SUN6I_A31_AHB1_REG); + sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk, From ac95330b96376550ae7a533d1396272d675adfa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phan=20Rafin?= Date: Fri, 4 Nov 2016 00:53:56 +0100 Subject: [PATCH 02/39] clk: sunxi: Fix M factor computation for APB1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit cfa636886033 ("clk: sunxi: factors: Consolidate get_factors parameters into a struct") introduced a regression for m factor computation in sun4i_get_apb1_factors function. The old code reassigned the "parent_rate" parameter to the targeted divisor value and was buggy for the returned frequency but not for the computed factors. Now, returned frequency is good but m factor is incorrectly computed (its max value 31 is always set resulting in a significantly slower frequency than the requested one...) This patch simply restores the original proper computation for m while keeping the good changes for returned rate. Fixes: cfa636886033 ("clk: sunxi: factors: Consolidate get_factors parameters into a struct") Signed-off-by: Stéphan Rafin Signed-off-by: Maxime Ripard --- drivers/clk/sunxi/clk-sunxi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 838b22aa8b67..f2c9274b8bd5 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req) else calcp = 3; - calcm = (req->parent_rate >> calcp) - 1; + calcm = (div >> calcp) - 1; req->rate = (req->parent_rate >> calcp) / (calcm + 1); req->m = calcm; From 18f6084a989ba1b38702f9af37a2e4049a924be6 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Thu, 10 Nov 2016 09:35:27 -0500 Subject: [PATCH 03/39] scsi: mpt3sas: Fix secure erase premature termination This is a work around for a bug with LSI Fusion MPT SAS2 when perfoming secure erase. Due to the very long time the operation takes, commands issued during the erase will time out and will trigger execution of the abort hook. Even though the abort hook is called for the specific command which timed out, this leads to entire device halt (scsi_state terminated) and premature termination of the secure erase. Set device state to busy while ATA passthrough commands are in progress. [mkp: hand applied to 4.9/scsi-fixes, tweaked patch description] Signed-off-by: Andrey Grodzovsky Acked-by: Sreekanth Reddy Cc: Cc: Sathya Prakash Cc: Chaitra P B Cc: Suganath Prabu Subramani Cc: Sreekanth Reddy Cc: Hannes Reinecke Cc: Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8aa769a2d919..91b70bc46e7f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -4010,7 +4010,10 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) SAM_STAT_CHECK_CONDITION; } - +static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) +{ + return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); +} /** * scsih_qcmd - main scsi request entry point @@ -4038,6 +4041,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (ioc->logging_level & MPT_DEBUG_SCSI) scsi_print_command(scmd); + /* + * Lock the device for any subsequent command until command is + * done. + */ + if (ata_12_16_cmd(scmd)) + scsi_internal_device_block(scmd->device); + sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -4613,6 +4623,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) if (scmd == NULL) return 1; + if (ata_12_16_cmd(scmd)) + scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); if (mpi_reply == NULL) { From c733ab3512431436a26e0381829b45794cb13fb0 Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Mon, 14 Nov 2016 19:26:22 -0200 Subject: [PATCH 04/39] scsi: qla2xxx: do not abort all commands in the adapter during EEH recovery The previous commit 1535aa75a3d8 ("qla2xxx: fix invalid DMA access after command aborts in PCI device remove") introduced a regression during an EEH recovery, since the change to the qla2x00_abort_all_cmds() function calls qla2xxx_eh_abort(), which verifies the EEH recovery condition but handles it heavy-handed. (commit a465537ad1a4 "qla2xxx: Disable the adapter and skip error recovery in case of register disconnect.") This problem warrants a more general/optimistic solution right into qla2xxx_eh_abort() (eg in case a real command abort arrives during EEH recovery, or if it takes long enough to trigger command aborts); but it's still worth to add a check to ensure the code added by the previous commit is correct and contained within its owner function. This commit just adds a 'if (!ha->flags.eeh_busy)' check around it. (ahem; a trivial fix for this -rc series; sorry for this oversight.) With it applied, both PCI device remove and EEH recovery works fine. Fixes: 1535aa75a3d8 ("scsi: qla2xxx: fix invalid DMA access after command aborts in PCI device remove") Signed-off-by: Mauricio Faria de Oliveira Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_os.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 567fa080e261..56d6142852a5 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1456,15 +1456,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - /* Get a reference to the sp and drop the lock. - * The reference ensures this sp->done() call - * - and not the call in qla2xxx_eh_abort() - - * ends the SCSI command (with result 'res'). + /* Don't abort commands in adapter during EEH + * recovery as it's not accessible/responding. */ - sp_get(sp); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - qla2xxx_eh_abort(GET_CMD_SP(sp)); - spin_lock_irqsave(&ha->hardware_lock, flags); + if (!ha->flags.eeh_busy) { + /* Get a reference to the sp and drop the lock. + * The reference ensures this sp->done() call + * - and not the call in qla2xxx_eh_abort() - + * ends the SCSI command (with result 'res'). + */ + sp_get(sp); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + qla2xxx_eh_abort(GET_CMD_SP(sp)); + spin_lock_irqsave(&ha->hardware_lock, flags); + } req->outstanding_cmds[cnt] = NULL; sp->done(vha, sp, res); } From 864c2357ca898c6171fe5284f5ecc795c8ce27a8 Mon Sep 17 00:00:00 2001 From: David Carrillo-Cisneros Date: Tue, 1 Nov 2016 11:52:58 -0700 Subject: [PATCH 05/39] perf/core: Do not set cpuctx->cgrp for unscheduled cgroups Commit: db4a835601b7 ("perf/core: Set cgroup in CPU contexts for new cgroup events") failed to verify that event->cgrp is actually the scheduled cgroup in a CPU before setting cpuctx->cgrp. This patch fixes that. Now that there is a different path for scheduled and unscheduled cgroup, add a warning to catch when cpuctx->cgrp is still set after the last cgroup event has been unsheduled. To verify the bug: # Create 2 cgroups. mkdir /dev/cgroups/devices/g1 mkdir /dev/cgroups/devices/g2 # launch a task, bind it to a cpu and move it to g1 CPU=2 while :; do : ; done & P=$! taskset -pc $CPU $P echo $P > /dev/cgroups/devices/g1/tasks # monitor g2 (it runs no tasks) and observe output perf stat -e cycles -I 1000 -C $CPU -G g2 # time counts unit events 1.000091408 7,579,527 cycles g2 2.000350111 cycles g2 3.000589181 cycles g2 4.000771428 cycles g2 # note first line that displays that a task run in g2, despite # g2 having no tasks. This is because cpuctx->cgrp was wrongly # set when context of new event was installed. # After applying the fix we obtain the right output: perf stat -e cycles -I 1000 -C $CPU -G g2 # time counts unit events 1.000119615 cycles g2 2.000389430 cycles g2 3.000590962 cycles g2 Signed-off-by: David Carrillo-Cisneros Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Nilay Vaish Cc: Paul Turner Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vegard Nossum Link: http://lkml.kernel.org/r/1478026378-86083-1-git-send-email-davidcc@google.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/kernel/events/core.c b/kernel/events/core.c index 0e292132efac..ff230bb4a02e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event, * this will always be called from the right CPU. */ cpuctx = __get_cpu_context(ctx); + + /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */ + if (perf_cgroup_from_task(current, ctx) != event->cgrp) { + /* + * We are removing the last cpu event in this context. + * If that event is not active in this cpu, cpuctx->cgrp + * should've been cleared by perf_cgroup_switch. + */ + WARN_ON_ONCE(!add && cpuctx->cgrp); + return; + } cpuctx->cgrp = add ? event->cgrp : NULL; } From 9164b4ceb7b492a77c7fe770a4b9d1375c9cd45a Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Tue, 15 Nov 2016 13:01:57 +0100 Subject: [PATCH 06/39] x86/sysfb: Add support for 64bit EFI lfb_base The screen_info object was extended to support 64-bit lfb_base addresses in: ae2ee627dc87 ("efifb: Add support for 64-bit frame buffer addresses") However, the x86 simple-framebuffer setup code never made use of it. Fix it to properly assemble and verify the lfb_base before advertising simple-framebuffer devices. In particular, this means if VIDEO_CAPABILITY_64BIT_BASE is set, the screen_info->ext_lfb_base field will contain the upper 32bit of the actual lfb_base. Make sure the address is not 0 (i.e., unset), as well as does not overflow the physical address type. Signed-off-by: David Herrmann Acked-by: Thomas Gleixner Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Zijlstra Cc: Tom Gundersen Link: http://lkml.kernel.org/r/20161115120158.15388-2-dh.herrmann@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/sysfb_simplefb.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c index 764a29f84de7..35b86415871f 100644 --- a/arch/x86/kernel/sysfb_simplefb.c +++ b/arch/x86/kernel/sysfb_simplefb.c @@ -67,6 +67,20 @@ __init int create_simplefb(const struct screen_info *si, struct platform_device *pd; struct resource res; unsigned long len; + u64 base; + + /* + * If the 64BIT_BASE capability is set, ext_lfb_base will contain the + * upper half of the base address. Assemble the address, then make sure + * it is valid and we can actually access it. + */ + base = si->lfb_base; + if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE) + base |= (u64)si->ext_lfb_base << 32; + if (!base || (u64)(resource_size_t)base != base) { + printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n"); + return -EINVAL; + } /* don't use lfb_size as it may contain the whole VMEM instead of only * the part that is occupied by the framebuffer */ @@ -81,8 +95,8 @@ __init int create_simplefb(const struct screen_info *si, memset(&res, 0, sizeof(res)); res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; res.name = simplefb_resname; - res.start = si->lfb_base; - res.end = si->lfb_base + len - 1; + res.start = base; + res.end = res.start + len - 1; if (res.end <= res.start) return -EINVAL; From f96acec8c8020807429d21324547f4b904c37177 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Tue, 15 Nov 2016 13:01:58 +0100 Subject: [PATCH 07/39] x86/sysfb: Fix lfb_size calculation The screen_info.lfb_size field is shifted by 16 bits *only* in case of VBE. This has historical reasons since VBE advertised it similarly. However, in case of EFI framebuffers, the size is no longer shifted. Fix the x86 simple-framebuffer setup code to use the correct size in the non-VBE case. While at it, avoid variable abbreviations and rename 'len' to 'length', and use the correct types matching the screen_info definition. Signed-off-by: David Herrmann Acked-by: Thomas Gleixner Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Zijlstra Cc: Tom Gundersen Link: http://lkml.kernel.org/r/20161115120158.15388-3-dh.herrmann@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/sysfb_simplefb.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c index 35b86415871f..85195d447a92 100644 --- a/arch/x86/kernel/sysfb_simplefb.c +++ b/arch/x86/kernel/sysfb_simplefb.c @@ -66,8 +66,8 @@ __init int create_simplefb(const struct screen_info *si, { struct platform_device *pd; struct resource res; - unsigned long len; - u64 base; + u64 base, size; + u32 length; /* * If the 64BIT_BASE capability is set, ext_lfb_base will contain the @@ -82,11 +82,20 @@ __init int create_simplefb(const struct screen_info *si, return -EINVAL; } - /* don't use lfb_size as it may contain the whole VMEM instead of only - * the part that is occupied by the framebuffer */ - len = mode->height * mode->stride; - len = PAGE_ALIGN(len); - if (len > (u64)si->lfb_size << 16) { + /* + * Don't use lfb_size as IORESOURCE size, since it may contain the + * entire VMEM, and thus require huge mappings. Use just the part we + * need, that is, the part where the framebuffer is located. But verify + * that it does not exceed the advertised VMEM. + * Note that in case of VBE, the lfb_size is shifted by 16 bits for + * historical reasons. + */ + size = si->lfb_size; + if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) + size <<= 16; + length = mode->height * mode->stride; + length = PAGE_ALIGN(length); + if (length > size) { printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); return -EINVAL; } @@ -96,7 +105,7 @@ __init int create_simplefb(const struct screen_info *si, res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; res.name = simplefb_resname; res.start = base; - res.end = res.start + len - 1; + res.end = res.start + length - 1; if (res.end <= res.start) return -EINVAL; From c499336cea8bbe15554c6fcea2138658c5395bfe Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 15 Nov 2016 13:40:10 -0500 Subject: [PATCH 08/39] perf/x86/uncore: Fix crash by removing bogus event_list[] handling for SNB client uncore IMC Vince Weaver reported the following bug when KASAN is enabled: [ 205.748005] BUG: KASAN: slab-out-of-bounds in snb_uncore_imc_event_del+0x6c/0xa0 at addr ffff8800caa43768 [ 205.758324] Read of size 8 by task perf_fuzzer/6618 It's caused by accessing box->event_list. For client IMC, there are no generic counters. It defines its own fixed free running counters. So event_list and n_events are unused. They can be removed safely, which fixes the bug. ( There's still the separate question of how uninitialized state snuck into this data structure - but that's a separate fix. ) Reported-by: Vince Weaver Tested-by: Vince Weaver Signed-off-by: Kan Liang Cc: Peter Zijlstra Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Stephane Eranian Cc: Vince Weaver Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Alexander Shishkin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: acme@kernel.org Cc: davej@codemonkey.org.uk Cc: dvyukov@google.com Cc: eranian@gmail.com Link: http://lkml.kernel.org/r/1479235210-29090-1-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore_snb.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 81195cca7eae..a3dcc12bef4a 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -490,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags) snb_uncore_imc_event_start(event, 0); - box->n_events++; - return 0; } static void snb_uncore_imc_event_del(struct perf_event *event, int flags) { - struct intel_uncore_box *box = uncore_event_to_box(event); - int i; - snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); - - for (i = 0; i < box->n_events; i++) { - if (event == box->event_list[i]) { - --box->n_events; - break; - } - } } int snb_pci2phy_map_init(int devid) From 3ca0b51decf780ce6277b088a9f28cd6fb71e372 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 16 Nov 2016 11:02:00 -0800 Subject: [PATCH 09/39] clk: berlin: Pass correct type to hw provider registration Dan Carpenter reports that we're passing a pointer to a pointer here when we should just be passing a pointer. Pass the right pointer so that the of_clk_hw_onecell_get() sees the appropriate data pointer on its end. Reported-by: Dan Carpenter Cc: Jisheng Zhang Cc: Alexandre Belloni Cc: Sebastian Hesselbarth Cc: Stephen Boyd Fixes: f6475e298297 ("clk: berlin: Migrate to clk_hw based registration and OF APIs") Signed-off-by: Stephen Boyd --- drivers/clk/berlin/bg2.c | 2 +- drivers/clk/berlin/bg2q.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c index edf3b96b3b73..1d99292e2039 100644 --- a/drivers/clk/berlin/bg2.c +++ b/drivers/clk/berlin/bg2.c @@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np) } /* register clk-provider */ - of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); return; diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c index 0718e831475f..3b784b593afd 100644 --- a/drivers/clk/berlin/bg2q.c +++ b/drivers/clk/berlin/bg2q.c @@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np) } /* register clk-provider */ - of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); return; From bdfdabfedc30c9574dde6198a1739d2be03bf934 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 16 Nov 2016 11:02:00 -0800 Subject: [PATCH 10/39] clk: efm32gg: Pass correct type to hw provider registration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dan Carpenter reports that we're passing a pointer to a pointer here when we should just be passing a pointer. Pass the right pointer so that the of_clk_hw_onecell_get() sees the appropriate data pointer on its end. Reported-by: Dan Carpenter Cc: Stephen Boyd Cc: Uwe Kleine-König Fixes: 9337631f52a8 ("clk: efm32gg: Migrate to clk_hw based OF and registration APIs") Signed-off-by: Stephen Boyd --- drivers/clk/clk-efm32gg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c index 8802a2dd56ac..f674778fb3ac 100644 --- a/drivers/clk/clk-efm32gg.c +++ b/drivers/clk/clk-efm32gg.c @@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np) hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0", "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); - of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); } CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); From 553bbc11aa6c1f9e0f529a06aeeca15fbe4a3985 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 16 Nov 2016 15:17:09 +0100 Subject: [PATCH 11/39] x86/boot: Avoid warning for zero-filling .bss The latest binutils are warning about a .fill directive with an explicit value in a .bss section: arch/x86/kernel/head_32.S: Assembler messages: arch/x86/kernel/head_32.S:677: Warning: ignoring fill value in section `.bss..page_aligned' arch/x86/kernel/head_32.S:679: Warning: ignoring fill value in section `.bss..page_aligned' This comes from the 'ENTRY()' macro padding the space between the symbols with 'nop' via: .align 4,0x90 Open-coding the .globl directive without the padding avoids that warning, as all the symbols are already page aligned. Signed-off-by: Arnd Bergmann Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20161116141726.2013389-1-arnd@arndb.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_32.S | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index b6b2f0264af3..2dabea46f039 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS initial_pg_pmd: .fill 1024*KPMDS,4,0 #else -ENTRY(initial_page_table) +.globl initial_page_table +initial_page_table: .fill 1024,4,0 #endif initial_pg_fixmap: .fill 1024,4,0 -ENTRY(empty_zero_page) +.globl empty_zero_page +empty_zero_page: .fill 4096,1,0 -ENTRY(swapper_pg_dir) +.globl swapper_pg_dir +swapper_pg_dir: .fill 1024,4,0 EXPORT_SYMBOL(empty_zero_page) From c2d75e03d6307bda0e14b616818a6f7b09fd623a Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 17 Nov 2016 09:57:23 -0600 Subject: [PATCH 12/39] x86/unwind: Prevent KASAN false positive warnings in guess unwinder The guess unwinder scans the entire stack, which can cause KASAN "stack-out-of-bounds" false positive warnings. Tell KASAN to ignore it. Reported-by: Peter Zijlstra Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: davej@codemonkey.org.uk Cc: dvyukov@google.com Link: http://lkml.kernel.org/r/61939c0b2b2d63ce97ba59cba3b00fd47c2962cf.1479398226.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/unwind_guess.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c index 2d721e533cf4..b80e8bf43cc6 100644 --- a/arch/x86/kernel/unwind_guess.c +++ b/arch/x86/kernel/unwind_guess.c @@ -7,11 +7,13 @@ unsigned long unwind_get_return_address(struct unwind_state *state) { + unsigned long addr = READ_ONCE_NOCHECK(*state->sp); + if (unwind_done(state)) return 0; return ftrace_graph_ret_addr(state->task, &state->graph_idx, - *state->sp, state->sp); + addr, state->sp); } EXPORT_SYMBOL_GPL(unwind_get_return_address); @@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state) return false; do { + unsigned long addr = READ_ONCE_NOCHECK(*state->sp); + for (state->sp++; state->sp < info->end; state->sp++) - if (__kernel_text_address(*state->sp)) + if (__kernel_text_address(addr)) return true; state->sp = info->next_sp; From 91e08ab0c8515450258d7ad9033bfe69bebad25a Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 17 Nov 2016 09:57:24 -0600 Subject: [PATCH 13/39] x86/dumpstack: Prevent KASAN false positive warnings The oops stack dump code scans the entire stack, which can cause KASAN "stack-out-of-bounds" false positive warnings. Tell KASAN to ignore it. Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: davej@codemonkey.org.uk Cc: dvyukov@google.com Link: http://lkml.kernel.org/r/5f6e80c4b0c7f7f0b6211900847a247cdaad753c.1479398226.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/dumpstack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 9b7cf5c28f5f..85f854b98a9d 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, for (; stack < stack_info.end; stack++) { unsigned long real_addr; int reliable = 0; - unsigned long addr = *stack; + unsigned long addr = READ_ONCE_NOCHECK(*stack); unsigned long *ret_addr_p = unwind_get_return_address_ptr(&state); From e40ed1542dd779e5037a22c6b534e57127472365 Mon Sep 17 00:00:00 2001 From: Janakarajan Natarajan Date: Thu, 17 Nov 2016 10:15:06 -0600 Subject: [PATCH 14/39] perf/x86: Add perf support for AMD family-17h processors This patch enables perf core PMU support for the new AMD family-17h processors. In family-17h, there is no PMC-event constraint. All events, irrespective of the type, can be measured using any of the six generic performance counters. Signed-off-by: Janakarajan Natarajan Acked-by: Borislav Petkov Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Suravee Suthikulpanit Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/1479399306-13375-1-git-send-email-Janakarajan.Natarajan@amd.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/core.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index f5f4b3fbbbc2..afb222b63cae 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void) pr_cont("Fam15h "); x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; break; - + case 0x17: + pr_cont("Fam17h "); + /* + * In family 17h, there are no event constraints in the PMC hardware. + * We fallback to using default amd_get_event_constraints. + */ + break; default: pr_err("core perfctr but no constraints; unknown hardware!\n"); return -ENODEV; From 23ea44c2150d14b97518435a65cc74111804fbeb Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 10 Nov 2016 16:06:28 -0500 Subject: [PATCH 15/39] NFSv4.1: Fix a regression in DELEGRETURN We don't want to call nfs4_free_revoked_stateid() in the case where the delegreturn was successful. Reported-by: Benjamin Coddington Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker --- fs/nfs/nfs4proc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 45b38ee4813c..8e25327077e2 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5569,6 +5569,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) switch (task->tk_status) { case 0: renew_lease(data->res.server, data->timestamp); + break; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_EXPIRED: @@ -5579,8 +5580,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) case -NFS4ERR_OLD_STATEID: case -NFS4ERR_STALE_STATEID: task->tk_status = 0; - if (data->roc) - pnfs_roc_set_barrier(data->inode, data->roc_barrier); break; default: if (nfs4_async_handle_error(task, data->res.server, @@ -5590,6 +5589,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) } } data->rpc_status = task->tk_status; + if (data->roc && data->rpc_status == 0) + pnfs_roc_set_barrier(data->inode, data->roc_barrier); } static void nfs4_delegreturn_release(void *calldata) From 3e7dfb1659c2888fc0152ec2b02a5e932397bb0a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 14 Nov 2016 11:19:55 -0500 Subject: [PATCH 16/39] NFSv4: Fix CLOSE races with OPEN If the reply to a successful CLOSE call races with an OPEN to the same file, we can end up scribbling over the stateid that represents the new open state. The race looks like: Client Server ====== ====== CLOSE stateid A on file "foo" CLOSE stateid A, return stateid C OPEN file "foo" OPEN "foo", return stateid B Receive reply to OPEN Reset open state for "foo" Associate stateid B to "foo" Receive CLOSE for A Reset open state for "foo" Replace stateid B with C The fix is to examine the argument of the CLOSE, and check for a match with the current stateid "other" field. If the two do not match, then the above race occurred, and we should just ignore the CLOSE. Reported-by: Benjamin Coddington Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker --- fs/nfs/nfs4_fs.h | 7 +++++++ fs/nfs/nfs4proc.c | 12 ++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 9b3a82abab07..1452177c822d 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -542,6 +542,13 @@ static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state) return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; } +static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state, + const nfs4_stateid *stateid) +{ + return test_bit(NFS_OPEN_STATE, &state->flags) && + nfs4_stateid_match_other(&state->open_stateid, stateid); +} + #else #define nfs4_close_state(a, b) do { } while (0) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8e25327077e2..0b3cdf856333 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1451,7 +1451,6 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state) } static void nfs_clear_open_stateid_locked(struct nfs4_state *state, - nfs4_stateid *arg_stateid, nfs4_stateid *stateid, fmode_t fmode) { clear_bit(NFS_O_RDWR_STATE, &state->flags); @@ -1469,10 +1468,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, } if (stateid == NULL) return; - /* Handle races with OPEN */ - if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || - (nfs4_stateid_match_other(stateid, &state->open_stateid) && - !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { + /* Handle OPEN+OPEN_DOWNGRADE races */ + if (nfs4_stateid_match_other(stateid, &state->open_stateid) && + !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { nfs_resync_open_stateid_locked(state); return; } @@ -1486,7 +1484,9 @@ static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { write_seqlock(&state->seqlock); - nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); + /* Ignore, if the CLOSE argment doesn't match the current stateid */ + if (nfs4_state_match_open_stateid_other(state, arg_stateid)) + nfs_clear_open_stateid_locked(state, stateid, fmode); write_sequnlock(&state->seqlock); if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) nfs4_schedule_state_manager(state->owner->so_server->nfs_client); From 5cc7861eb5b425c7a30ff7676a4b9d0ca62d5c76 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 14 Nov 2016 11:19:56 -0500 Subject: [PATCH 17/39] NFSv4: Don't call close if the open stateid has already been cleared Ensure we test to see if the open stateid is actually set, before we send a CLOSE. Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker --- fs/nfs/nfs4proc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 0b3cdf856333..2d1481eb1929 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3122,7 +3122,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) } else if (is_rdwr) calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; - if (!nfs4_valid_open_stateid(state)) + if (!nfs4_valid_open_stateid(state) || + test_bit(NFS_OPEN_STATE, &state->flags) == 0) call_close = 0; spin_unlock(&state->owner->so_lock); From d41cbfc9a64d11835a5b5b90caa7d6f3a88eb1df Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Mon, 14 Nov 2016 11:51:37 -0500 Subject: [PATCH 18/39] NFSv4.1: Handle NFS4ERR_OLD_STATEID in nfs4_reclaim_open_state Now that we're doing TEST_STATEID in nfs4_reclaim_open_state(), we can have a NFS4ERR_OLD_STATEID returned from nfs41_open_expired() . Instead of marking state recovery as failed, mark the state for recovery again. Signed-off-by: Benjamin Coddington Signed-off-by: Anna Schumaker --- fs/nfs/nfs4state.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5f4281ec5f72..0959c9661662 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1547,6 +1547,7 @@ restart: ssleep(1); case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_OLD_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_RECLAIM_BAD: case -NFS4ERR_RECLAIM_CONFLICT: From fc0e81b2bea0ebceb71889b61d2240856141c9ee Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 19 Nov 2016 18:42:40 -0800 Subject: [PATCH 19/39] x86/traps: Ignore high word of regs->cs in early_fixup_exception() On the 80486 DX, it seems that some exceptions may leave garbage in the high bits of CS. This causes sporadic failures in which early_fixup_exception() refuses to fix up an exception. As far as I can tell, this has been buggy for a long time, but the problem seems to have been exacerbated by commits: 1e02ce4cccdc ("x86: Store a per-cpu shadow copy of CR4") e1bfc11c5a6f ("x86/init: Fix cr4_init_shadow() on CR4-less machines") This appears to have broken for as long as we've had early exception handling. [ Note to stable maintainers: This patch is needed all the way back to 3.4, but it will only apply to 4.6 and up, as it depends on commit: 0e861fbb5bda ("x86/head: Move early exception panic code into early_fixup_exception()") If you want to backport to kernels before 4.6, please don't backport the prerequisites (there was a big chain of them that rewrote a lot of the early exception machinery); instead, ask me and I can send you a one-liner that will apply. ] Reported-by: Matthew Whitehead Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: stable@vger.kernel.org Fixes: 4c5023a3fa2e ("x86-32: Handle exception table entries during early boot") Link: http://lkml.kernel.org/r/cb32c69920e58a1a58e7b5cad975038a69c0ce7d.1479609510.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/extable.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 79ae939970d3..fcd06f7526de 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) if (early_recursion_flag > 2) goto halt_loop; - if (regs->cs != __KERNEL_CS) + /* + * Old CPUs leave the high bits of CS on the stack + * undefined. I'm not sure which CPUs do this, but at least + * the 486 DX works this way. + */ + if ((regs->cs & 0xFFFF) != __KERNEL_CS) goto fail; /* From ed68d7e9b9cfb64f3045ffbcb108df03c09a0f98 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 19 Nov 2016 15:37:30 -0800 Subject: [PATCH 20/39] x86/boot: Fail the boot if !M486 and CPUID is missing Linux will have all kinds of sporadic problems on systems that don't have the CPUID instruction unless CONFIG_M486=y. In particular, sync_core() will explode. I believe that these kernels had a better chance of working before commit 05fb3c199bb0 ("x86/boot: Initialize FPU and X86_FEATURE_ALWAYS even if we don't have CPUID"). That commit inadvertently fixed a serious bug: we used to fail to detect the FPU if CPUID wasn't present. Because we also used to forget to set X86_FEATURE_ALWAYS, we end up with no cpu feature bits set at all. This meant that alternative patching didn't do anything and, if paravirt was disabled, we could plausibly finish the entire boot process without calling sync_core(). Rather than trying to work around these issues, just have the kernel fail loudly if it's running on a CPUID-less 486, doesn't have CPUID, and doesn't have CONFIG_M486 set. Reported-by: Matthew Whitehead Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/70eac6639f23df8be5fe03fa1984aedd5d40077a.1479598603.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/boot/cpu.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c index 26240dde081e..4224ede43b4e 100644 --- a/arch/x86/boot/cpu.c +++ b/arch/x86/boot/cpu.c @@ -87,6 +87,12 @@ int validate_cpu(void) return -1; } + if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) && + !has_eflag(X86_EFLAGS_ID)) { + printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n"); + return -1; + } + if (err_flags) { puts("This kernel requires the following features " "not present on the CPU:\n"); From b22cbe404a9cc3c7949e380fa1861e31934c8978 Mon Sep 17 00:00:00 2001 From: Yu-cheng Yu Date: Thu, 17 Nov 2016 09:11:35 -0800 Subject: [PATCH 21/39] x86/fpu: Fix invalid FPU ptrace state after execve() Robert O'Callahan reported that after an execve PTRACE_GETREGSET NT_X86_XSTATE continues to return the pre-exec register values until the exec'ed task modifies FPU state. The test code is at: https://bugzilla.redhat.com/attachment.cgi?id=1164286. What is happening is fpu__clear() does not properly clear fpstate. Fix it by doing just that. Reported-by: Robert O'Callahan Signed-off-by: Yu-cheng Yu Cc: Cc: Andy Lutomirski Cc: Borislav Petkov Cc: David Hansen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Ravi V. Shankar Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1479402695-6553-1-git-send-email-yu-cheng.yu@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/fpu/core.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 47004010ad5d..ebb4e95fbd74 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu) { WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ - if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) { - /* FPU state will be reallocated lazily at the first use. */ - fpu__drop(fpu); - } else { - if (!fpu->fpstate_active) { - fpu__activate_curr(fpu); - user_fpu_begin(); - } + fpu__drop(fpu); + + /* + * Make sure fpstate is cleared and initialized. + */ + if (static_cpu_has(X86_FEATURE_FPU)) { + fpu__activate_curr(fpu); + user_fpu_begin(); copy_init_fpstate_to_fpregs(); } } From 8c5c86fb6abec7d76ec4d51a46714161bceab315 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 18 Nov 2016 18:52:24 +0200 Subject: [PATCH 22/39] x86/platform/intel-mid: Register watchdog device after SCU Watchdog device in Intel Tangier relies on SCU to be present. It uses the SCU IPC channel to send commands and receive responses. If watchdog driver is initialized quite before SCU and a command has been sent the result is always an error like the following: intel_mid_wdt: Error stopping watchdog: 0xffffffed Register watchdog device whne SCU is ready to avoid described issue. Signed-off-by: Andy Shevchenko Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20161118165224.175514-1-andriy.shevchenko@linux.intel.com [ Small cleanups. ] Signed-off-by: Ingo Molnar --- .../intel-mid/device_libs/platform_wdt.c | 32 ++++++++++++++++--- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c index de734134bc8d..4f96cd009962 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c @@ -14,7 +14,9 @@ #include #include #include + #include +#include #include #define TANGIER_EXT_TIMER0_MSI 15 @@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = { .probe = tangier_probe, }; -static int __init register_mid_wdt(void) +static int wdt_scu_status_change(struct notifier_block *nb, + unsigned long code, void *data) { - if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) { - wdt_dev.dev.platform_data = &tangier_pdata; - return platform_device_register(&wdt_dev); + if (code == SCU_DOWN) { + platform_device_unregister(&wdt_dev); + return 0; } - return -ENODEV; + return platform_device_register(&wdt_dev); } +static struct notifier_block wdt_scu_notifier = { + .notifier_call = wdt_scu_status_change, +}; + +static int __init register_mid_wdt(void) +{ + if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) + return -ENODEV; + + wdt_dev.dev.platform_data = &tangier_pdata; + + /* + * We need to be sure that the SCU IPC is ready before watchdog device + * can be registered: + */ + intel_scu_notifier_add(&wdt_scu_notifier); + + return 0; +} rootfs_initcall(register_mid_wdt); From a980ce352fcd408d30b044455e5f6e959d6258b6 Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Fri, 18 Nov 2016 13:07:19 -0800 Subject: [PATCH 23/39] x86/build: Build compressed x86 kernels as PIE when !CONFIG_RELOCATABLE as well Since the bootloader may load the compressed x86 kernel at any address, it should always be built as PIE, not just when CONFIG_RELOCATABLE=y. Otherwise, linker in binutils 2.27 will optimize GOT load into the absolute address when building the compressed x86 kernel as a non-PIE executable. Signed-off-by: H.J. Lu Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org [ Small wording changes. ] Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 536ccfcc01c6..34d9e15857c3 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -40,8 +40,8 @@ GCOV_PROFILE := n UBSAN_SANITIZE :=n LDFLAGS := -m elf_$(UTS_MACHINE) -ifeq ($(CONFIG_RELOCATABLE),y) -# If kernel is relocatable, build compressed kernel as PIE. +# Compressed kernel should be built as PIE since it may be loaded at any +# address by the bootloader. ifeq ($(CONFIG_X86_32),y) LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) else @@ -51,7 +51,6 @@ else LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ && echo "-z noreloc-overflow -pie --no-dynamic-linker") endif -endif LDFLAGS_vmlinux := -T hostprogs-y := mkpiggy From e5dce2868818ca8706924f7bdc7939d481eefab0 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 18 Nov 2016 19:27:23 +0200 Subject: [PATCH 24/39] x86/platform/intel-mid: Rename platform_wdt to platform_mrfld_wdt Rename the watchdog platform library file to explicitly show that is used only on Intel Merrifield platforms. Signed-off-by: Andy Shevchenko Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20161118172723.179761-1-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/platform/intel-mid/device_libs/Makefile | 2 +- .../device_libs/{platform_wdt.c => platform_mrfld_wdt.c} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename arch/x86/platform/intel-mid/device_libs/{platform_wdt.c => platform_mrfld_wdt.c} (97%) diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index 429d08be7848..dd6cfa4ad3ac 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile @@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o # MISC Devices obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o -obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o +obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c similarity index 97% rename from arch/x86/platform/intel-mid/device_libs/platform_wdt.c rename to arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index 4f96cd009962..3f1f1c77d090 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c @@ -1,5 +1,5 @@ /* - * platform_wdt.c: Watchdog platform library file + * Intel Merrifield watchdog platform device library file * * (C) Copyright 2014 Intel Corporation * Author: David Cohen From e96271f3ed7e702fa36dd0605c0c5b5f065af816 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Fri, 18 Nov 2016 13:38:43 +0200 Subject: [PATCH 25/39] perf/core: Fix address filter parser The token table passed into match_token() must be null-terminated, which it currently is not in the perf's address filter string parser, as caught by Vince's perf_fuzzer and KASAN. It doesn't blow up otherwise because of the alignment padding of the table to the next element in the .rodata, which is luck. Fixing by adding a null-terminator to the token table. Reported-by: Vince Weaver Tested-by: Vince Weaver Signed-off-by: Alexander Shishkin Acked-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Cc: Thomas Gleixner Cc: dvyukov@google.com Cc: stable@vger.kernel.org # v4.7+ Fixes: 375637bc524 ("perf/core: Introduce address range filtering") Link: http://lkml.kernel.org/r/877f81f264.fsf@ashishki-desk.ger.corp.intel.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/events/core.c b/kernel/events/core.c index ff230bb4a02e..6ee1febdf6ff 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8029,6 +8029,7 @@ restart: * if is not specified, the range is treated as a single address. */ enum { + IF_ACT_NONE = -1, IF_ACT_FILTER, IF_ACT_START, IF_ACT_STOP, @@ -8052,6 +8053,7 @@ static const match_table_t if_tokens = { { IF_SRC_KERNEL, "%u/%u" }, { IF_SRC_FILEADDR, "%u@%s" }, { IF_SRC_KERNELADDR, "%u" }, + { IF_ACT_NONE, NULL }, }; /* From ec638db8cb9ddd5ca08b23f2835b6c9c15eb616d Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Mon, 14 Nov 2016 11:08:45 -0800 Subject: [PATCH 26/39] thermal/powerclamp: add back module device table Commit 3105f234e0aba43e44e277c20f9b32ee8add43d4 replaced module cpu id table with a cpu feature check, which is logically correct. But we need the module device table to allow module auto loading. Cc: stable@vger.kernel.org # 4.8 Fixes:3105f234 thermal/powerclamp: correct cpu support check Signed-off-by: Jacob Pan Signed-off-by: Zhang Rui --- drivers/thermal/intel_powerclamp.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 7a223074df3d..afada655f861 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = { .set_cur_state = powerclamp_set_cur_state, }; +static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = { + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); + static int __init powerclamp_probe(void) { - if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + + if (!x86_match_cpu(intel_powerclamp_ids)) { pr_err("CPU does not support MWAIT"); return -ENODEV; } From d75a6a0e3933acbba44e4ad8d8f3c4d4f76b6e03 Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Fri, 18 Nov 2016 21:11:39 -0500 Subject: [PATCH 27/39] NFSv4.1: Keep a reference on lock states while checking While walking the list of lock_states, keep a reference on each nfs4_lock_state to be checked, otherwise the lock state could be removed while the check performs TEST_STATEID and possible FREE_STATEID. Signed-off-by: Benjamin Coddington Signed-off-by: Anna Schumaker --- fs/nfs/nfs4proc.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 2d1481eb1929..e6dc95e0f97e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2564,15 +2564,23 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) static int nfs41_check_expired_locks(struct nfs4_state *state) { int status, ret = NFS_OK; - struct nfs4_lock_state *lsp; + struct nfs4_lock_state *lsp, *prev = NULL; struct nfs_server *server = NFS_SERVER(state->inode); if (!test_bit(LK_STATE_IN_USE, &state->flags)) goto out; + + spin_lock(&state->state_lock); list_for_each_entry(lsp, &state->lock_states, ls_locks) { if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { struct rpc_cred *cred = lsp->ls_state->owner->so_cred; + atomic_inc(&lsp->ls_count); + spin_unlock(&state->state_lock); + + nfs4_put_lock_state(prev); + prev = lsp; + status = nfs41_test_and_free_expired_stateid(server, &lsp->ls_stateid, cred); @@ -2585,10 +2593,14 @@ static int nfs41_check_expired_locks(struct nfs4_state *state) set_bit(NFS_LOCK_LOST, &lsp->ls_flags); } else if (status != NFS_OK) { ret = status; - break; + nfs4_put_lock_state(prev); + goto out; } + spin_lock(&state->state_lock); } - }; + } + spin_unlock(&state->state_lock); + nfs4_put_lock_state(prev); out: return ret; } From 8acf7a106326eb94e143552de81f34308149121c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 21 Nov 2016 15:34:00 +0800 Subject: [PATCH 28/39] crypto: algif_hash - Fix result clobbering in recvmsg Recently an init call was added to hash_recvmsg so as to reset the hash state in case a sendmsg call was never made. Unfortunately this ended up clobbering the result if the previous sendmsg was done with a MSG_MORE flag. This patch fixes it by excluding that case when we make the init call. Fixes: a8348bca2944 ("algif_hash - Fix NULL hash crash with shash") Reported-by: Patrick Steinhardt Signed-off-by: Herbert Xu --- crypto/algif_hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 05e21b464433..d19b09cdf284 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -214,7 +214,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); - if (!result) { + if (!result && !ctx->more) { err = af_alg_wait_for_completion( crypto_ahash_init(&ctx->req), &ctx->completion); From c8467f7a3620698bf3c22f0e199b550fb611a8ae Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 21 Nov 2016 16:26:19 +0800 Subject: [PATCH 29/39] crypto: scatterwalk - Remove unnecessary aliasing check in map_and_copy The aliasing check in map_and_copy is no longer necessary because the IPsec ESP code no longer provides an IV that points into the actual request data. As this check is now triggering BUG checks due to the vmalloced stack code, I'm removing it. Reported-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/scatterwalk.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 52ce17a3dd63..c16c94f88733 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -68,10 +68,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, sg = scatterwalk_ffwd(tmp, sg, start); - if (sg_page(sg) == virt_to_page(buf) && - sg->offset == offset_in_page(buf)) - return; - scatterwalk_start(&walk, sg); scatterwalk_copychunks(buf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); From 18f649ef344127ef6de23a5a4272dbe2fdb73dde Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 14 Nov 2016 19:46:09 +0100 Subject: [PATCH 30/39] sched/autogroup: Fix autogroup_move_group() to never skip sched_move_task() The PF_EXITING check in task_wants_autogroup() is no longer needed. Remove it, but see the next patch. However the comment is correct in that autogroup_move_group() must always change task_group() for every thread so the sysctl_ check is very wrong; we can race with cgroups and even sys_setsid() is not safe because a task running with task_group() == ag->tg must participate in refcounting: int main(void) { int sctl = open("/proc/sys/kernel/sched_autogroup_enabled", O_WRONLY); assert(sctl > 0); if (fork()) { wait(NULL); // destroy the child's ag/tg pause(); } assert(pwrite(sctl, "1\n", 2, 0) == 2); assert(setsid() > 0); if (fork()) pause(); kill(getppid(), SIGKILL); sleep(1); // The child has gone, the grandchild runs with kref == 1 assert(pwrite(sctl, "0\n", 2, 0) == 2); assert(setsid() > 0); // runs with the freed ag/tg for (;;) sleep(1); return 0; } crashes the kernel. It doesn't really need sleep(1), it doesn't matter if autogroup_move_group() actually frees the task_group or this happens later. Reported-by: Vern Lovejoy Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: hartsjc@redhat.com Cc: vbendel@redhat.com Link: http://lkml.kernel.org/r/20161114184609.GA15965@redhat.com Signed-off-by: Ingo Molnar --- kernel/sched/auto_group.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index a5d966cb8891..ad2b19ad6ca0 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -111,14 +111,11 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) { if (tg != &root_task_group) return false; - /* - * We can only assume the task group can't go away on us if - * autogroup_move_group() can see us on ->thread_group list. + * If we race with autogroup_move_group() the caller can use the old + * value of signal->autogroup but in this case sched_move_task() will + * be called again before autogroup_kref_put(). */ - if (p->flags & PF_EXITING) - return false; - return true; } @@ -138,13 +135,17 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) } p->signal->autogroup = autogroup_kref_get(ag); - - if (!READ_ONCE(sysctl_sched_autogroup_enabled)) - goto out; - + /* + * We can't avoid sched_move_task() after we changed signal->autogroup, + * this process can already run with task_group() == prev->tg or we can + * race with cgroup code which can read autogroup = prev under rq->lock. + * In the latter case for_each_thread() can not miss a migrating thread, + * cpu_cgroup_attach() must not be possible after cgroup_exit() and it + * can't be removed from thread list, we hold ->siglock. + */ for_each_thread(p, t) sched_move_task(t); -out: + unlock_task_sighand(p, &flags); autogroup_kref_put(prev); } From 8e5bfa8c1f8471aa4a2d30be631ef2b50e10abaf Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 14 Nov 2016 19:46:12 +0100 Subject: [PATCH 31/39] sched/autogroup: Do not use autogroup->tg in zombie threads Exactly because for_each_thread() in autogroup_move_group() can't see it and update its ->sched_task_group before _put() and possibly free(). So the exiting task needs another sched_move_task() before exit_notify() and we need to re-introduce the PF_EXITING (or similar) check removed by the previous change for another reason. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: hartsjc@redhat.com Cc: vbendel@redhat.com Cc: vlovejoy@redhat.com Link: http://lkml.kernel.org/r/20161114184612.GA15968@redhat.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 ++ kernel/exit.c | 1 + kernel/sched/auto_group.c | 19 +++++++++++++++++++ 3 files changed, 22 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 348f51b0ec92..e9c009dc3a4a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2567,6 +2567,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p); extern void sched_autogroup_detach(struct task_struct *p); extern void sched_autogroup_fork(struct signal_struct *sig); extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); #ifdef CONFIG_PROC_FS extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); @@ -2576,6 +2577,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { } static inline void sched_autogroup_detach(struct task_struct *p) { } static inline void sched_autogroup_fork(struct signal_struct *sig) { } static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } #endif extern int yield_to(struct task_struct *p, bool preempt); diff --git a/kernel/exit.c b/kernel/exit.c index 9d68c45ebbe3..3076f3089919 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -836,6 +836,7 @@ void __noreturn do_exit(long code) */ perf_event_exit_task(tsk); + sched_autogroup_exit_task(tsk); cgroup_exit(tsk); /* diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index ad2b19ad6ca0..f1c8fd566246 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -115,10 +115,26 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) * If we race with autogroup_move_group() the caller can use the old * value of signal->autogroup but in this case sched_move_task() will * be called again before autogroup_kref_put(). + * + * However, there is no way sched_autogroup_exit_task() could tell us + * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. */ + if (p->flags & PF_EXITING) + return false; + return true; } +void sched_autogroup_exit_task(struct task_struct *p) +{ + /* + * We are going to call exit_notify() and autogroup_move_group() can't + * see this thread after that: we can no longer use signal->autogroup. + * See the PF_EXITING check in task_wants_autogroup(). + */ + sched_move_task(p); +} + static void autogroup_move_group(struct task_struct *p, struct autogroup *ag) { @@ -142,6 +158,9 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) * In the latter case for_each_thread() can not miss a migrating thread, * cpu_cgroup_attach() must not be possible after cgroup_exit() and it * can't be removed from thread list, we hold ->siglock. + * + * If an exiting thread was already removed from thread list we rely on + * sched_autogroup_exit_task(). */ for_each_thread(p, t) sched_move_task(t); From ae31fe51a3cceaa0cabdb3058f69669ecb47f12e Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 22 Nov 2016 10:57:42 +0100 Subject: [PATCH 32/39] perf/x86: Restore TASK_SIZE check on frame pointer The following commit: 75925e1ad7f5 ("perf/x86: Optimize stack walk user accesses") ... switched from copy_from_user_nmi() to __copy_from_user_nmi() with a manual access_ok() check. Unfortunately, copy_from_user_nmi() does an explicit check against TASK_SIZE, whereas the access_ok() uses whatever the current address limit of the task is. We are getting NMIs when __probe_kernel_read() has switched to KERNEL_DS, and then see vmalloc faults when we access what looks like pointers into vmalloc space: [] WARNING: CPU: 3 PID: 3685731 at arch/x86/mm/fault.c:435 vmalloc_fault+0x289/0x290 [] CPU: 3 PID: 3685731 Comm: sh Tainted: G W 4.6.0-5_fbk1_223_gdbf0f40 #1 [] Call Trace: [] [] dump_stack+0x4d/0x6c [] [] __warn+0xd3/0xf0 [] [] warn_slowpath_null+0x1d/0x20 [] [] vmalloc_fault+0x289/0x290 [] [] __do_page_fault+0x330/0x490 [] [] do_page_fault+0xc/0x10 [] [] page_fault+0x22/0x30 [] [] ? perf_callchain_user+0x100/0x2a0 [] [] get_perf_callchain+0x17f/0x190 [] [] perf_callchain+0x67/0x80 [] [] perf_prepare_sample+0x2a0/0x370 [] [] perf_event_output+0x20/0x60 [] [] ? perf_event_update_userpage+0xc7/0x130 [] [] __perf_event_overflow+0x181/0x1d0 [] [] perf_event_overflow+0x14/0x20 [] [] intel_pmu_handle_irq+0x1d3/0x490 [] [] ? copy_user_enhanced_fast_string+0x7/0x10 [] [] ? vunmap_page_range+0x1a1/0x2f0 [] [] ? unmap_kernel_range_noflush+0x11/0x20 [] [] ? ghes_copy_tofrom_phys+0x116/0x1f0 [] [] ? x2apic_send_IPI_self+0x1d/0x20 [] [] perf_event_nmi_handler+0x2d/0x50 [] [] nmi_handle+0x61/0x110 [] [] default_do_nmi+0x44/0x110 [] [] do_nmi+0xdb/0x150 [] [] end_repeat_nmi+0x1a/0x1e [] [] ? copy_user_enhanced_fast_string+0x7/0x10 [] [] ? copy_user_enhanced_fast_string+0x7/0x10 [] [] ? copy_user_enhanced_fast_string+0x7/0x10 [] <> [] ? __probe_kernel_read+0x3e/0xa0 Fix this by moving the valid_user_frame() check to before the uaccess that loads the return address and the pointer to the next frame. Signed-off-by: Johannes Weiner Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: linux-kernel@vger.kernel.org Fixes: 75925e1ad7f5 ("perf/x86: Optimize stack walk user accesses") Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d31735f37ed7..9d4bf3ab049e 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent frame.next_frame = 0; frame.return_address = 0; - if (!access_ok(VERIFY_READ, fp, 8)) + if (!valid_user_frame(fp, sizeof(frame))) break; bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); @@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent if (bytes != 0) break; - if (!valid_user_frame(fp, sizeof(frame))) - break; - perf_callchain_store(entry, cs_base + frame.return_address); fp = compat_ptr(ss_base + frame.next_frame); } @@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs frame.next_frame = NULL; frame.return_address = 0; - if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) + if (!valid_user_frame(fp, sizeof(frame))) break; bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); @@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs if (bytes != 0) break; - if (!valid_user_frame(fp, sizeof(frame))) - break; - perf_callchain_store(entry, frame.return_address); fp = (void __user *)frame.next_frame; } From b8000586c90b4804902058a38d3a59ce5708e695 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 17 Nov 2016 18:17:31 +0100 Subject: [PATCH 33/39] perf/x86/intel: Cure bogus unwind from PEBS entries Vince Weaver reported that perf_fuzzer + KASAN detects that PEBS event unwinds sometimes do 'weird' things. In particular, we seemed to be ending up unwinding from random places on the NMI stack. While it was somewhat expected that the event record BP,SP would not match the interrupt BP,SP in that the interrupt is strictly later than the record event, it was overlooked that it could be on an already overwritten stack. Therefore, don't copy the recorded BP,SP over the interrupted BP,SP when we need stack unwinds. Note that its still possible the unwind doesn't full match the actual event, as its entirely possible to have done an (I)RET between record and interrupt, but on average it should still point in the general direction of where the event came from. Also, it's the best we can do, considering. The particular scenario that triggered the bogus NMI stack unwind was a PEBS event with very short period, upon enabling the event at the tail of the PMI handler (FREEZE_ON_PMI is not used), it instantly triggers a record (while still on the NMI stack) which in turn triggers the next PMI. This then causes back-to-back NMIs and we'll try and unwind the stack-frame from the last NMI, which obviously is now overwritten by our own. Analyzed-by: Josh Poimboeuf Reported-by: Vince Weaver Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Stephane Eranian Cc: Thomas Gleixner Cc: davej@codemonkey.org.uk Cc: dvyukov@google.com Cc: stable@vger.kernel.org Fixes: ca037701a025 ("perf, x86: Add PEBS infrastructure") Link: http://lkml.kernel.org/r/20161117171731.GV3157@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- arch/x86/events/intel/ds.c | 35 +++++++++++++++++++++++------------ arch/x86/events/perf_event.h | 2 +- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 0319311dbdbb..be202390bbd3 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event, } /* - * We use the interrupt regs as a base because the PEBS record - * does not contain a full regs set, specifically it seems to - * lack segment descriptors, which get used by things like - * user_mode(). + * We use the interrupt regs as a base because the PEBS record does not + * contain a full regs set, specifically it seems to lack segment + * descriptors, which get used by things like user_mode(). * - * In the simple case fix up only the IP and BP,SP regs, for - * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. - * A possible PERF_SAMPLE_REGS will have to transfer all regs. + * In the simple case fix up only the IP for PERF_SAMPLE_IP. + * + * We must however always use BP,SP from iregs for the unwinder to stay + * sane; the record BP,SP can point into thin air when the record is + * from a previous PMI context or an (I)RET happend between the record + * and PMI. */ *regs = *iregs; regs->flags = pebs->flags; set_linear_ip(regs, pebs->ip); - regs->bp = pebs->bp; - regs->sp = pebs->sp; if (sample_type & PERF_SAMPLE_REGS_INTR) { regs->ax = pebs->ax; @@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event, regs->dx = pebs->dx; regs->si = pebs->si; regs->di = pebs->di; - regs->bp = pebs->bp; - regs->sp = pebs->sp; - regs->flags = pebs->flags; + /* + * Per the above; only set BP,SP if we don't need callchains. + * + * XXX: does this make sense? + */ + if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { + regs->bp = pebs->bp; + regs->sp = pebs->sp; + } + + /* + * Preserve PERF_EFLAGS_VM from set_linear_ip(). + */ + regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM); #ifndef CONFIG_X86_32 regs->r8 = pebs->r8; regs->r9 = pebs->r9; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 5874d8de1f8d..a77ee026643d 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -113,7 +113,7 @@ struct debug_store { * Per register state. */ struct er_account { - raw_spinlock_t lock; /* per-core: protect structure */ + raw_spinlock_t lock; /* per-core: protect structure */ u64 config; /* extra MSR config */ u64 reg; /* extra MSR number */ atomic_t ref; /* reference count */ From 033ac60c7f21f9996a0fab2fd04f334afbf77b33 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 18 Nov 2016 13:53:54 +0100 Subject: [PATCH 34/39] perf/x86/intel/uncore: Allow only a single PMU/box within an events group Group validation expects all events to be of the same PMU; however is_uncore_pmu() is too wide, it matches _all_ uncore events, even across PMUs. This triggers failure when we group different events from different uncore PMUs, like: perf stat -vv -e '{uncore_cbox_0/config=0x0334/,uncore_qpi_0/event=1/}' -a sleep 1 Fix is_uncore_pmu() by only matching events to the box at hand. Note that generic code; ran after this step; will disallow this mixture of PMU events. Reported-by: Jiri Olsa Tested-by: Jiri Olsa Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: Vince Weaver Link: http://lkml.kernel.org/r/20161118125354.GQ3117@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index efca2685d876..dbaaf7dc8373 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, */ static int uncore_pmu_event_init(struct perf_event *event); -static bool is_uncore_event(struct perf_event *event) +static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) { - return event->pmu->event_init == uncore_pmu_event_init; + return &box->pmu->pmu == event->pmu; } static int @@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, n = box->n_events; - if (is_uncore_event(leader)) { + if (is_box_event(box, leader)) { box->event_list[n] = leader; n++; } @@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, return n; list_for_each_entry(event, &leader->sibling_list, group_entry) { - if (!is_uncore_event(event) || + if (!is_box_event(box, event) || event->state <= PERF_EVENT_STATE_OFF) continue; From d55b352b01bc78fbc3d1bb650140668b87e58bf9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 22 Nov 2016 21:50:52 +0100 Subject: [PATCH 35/39] NFSv4.x: hide array-bounds warning A correct bugfix introduced a harmless warning that shows up with gcc-7: fs/nfs/callback.c: In function 'nfs_callback_up': fs/nfs/callback.c:214:14: error: array subscript is outside array bounds [-Werror=array-bounds] What happens here is that the 'minorversion == 0' check tells the compiler that we assume minorversion can be something other than 0, but when CONFIG_NFS_V4_1 is disabled that would be invalid and result in an out-of-bounds access. The added check for IS_ENABLED(CONFIG_NFS_V4_1) tells gcc that this really can't happen, which makes the code slightly smaller and also avoids the warning. The bugfix that introduced the warning is marked for stable backports, we want this one backported to the same releases. Fixes: 98b0f80c2396 ("NFSv4.x: Fix a refcount leak in nfs_callback_up_net") Cc: stable@vger.kernel.org # v3.7+ Signed-off-by: Arnd Bergmann Signed-off-by: Anna Schumaker --- fs/nfs/callback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 532d8e242d4d..484bebc20bca 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -197,7 +197,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, } ret = -EPROTONOSUPPORT; - if (minorversion == 0) + if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0) ret = nfs4_callback_up_net(serv, net); else if (xprt->ops->bc_up) ret = xprt->ops->bc_up(serv, net); From e658a6f14d7c0243205f035979d0ecf6c12a036f Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Wed, 16 Nov 2016 11:18:05 -0500 Subject: [PATCH 36/39] tile: avoid using clocksource_cyc2ns with absolute cycle count For large values of "mult" and long uptimes, the intermediate result of "cycles * mult" can overflow 64 bits. For example, the tile platform calls clocksource_cyc2ns with a 1.2 GHz clock; we have mult = 853, and after 208.5 days, we overflow 64 bits. Since clocksource_cyc2ns() is intended to be used for relative cycle counts, not absolute cycle counts, performance is more importance than accepting a wider range of cycle values. So, just use mult_frac() directly in tile's sched_clock(). Commit 4cecf6d401a0 ("sched, x86: Avoid unnecessary overflow in sched_clock") by Salman Qazi results in essentially the same generated code for x86 as this change does for tile. In fact, a follow-on change by Salman introduced mult_frac() and switched to using it, so the C code was largely identical at that point too. Peter Zijlstra then added mul_u64_u32_shr() and switched x86 to use it. This is, in principle, better; by optimizing the 64x64->64 multiplies to be 32x32->64 multiplies we can potentially save some time. However, the compiler piplines the 64x64->64 multiplies pretty well, and the conditional branch in the generic mul_u64_u32_shr() causes some bubbles in execution, with the result that it's pretty much a wash. If tilegx provided its own implementation of mul_u64_u32_shr() without the conditional branch, we could potentially save 3 cycles, but that seems like small gain for a fair amount of additional build scaffolding; no other platform currently provides a mul_u64_u32_shr() override, and tile doesn't currently have an header to put the override in. Additionally, gcc currently has an optimization bug that prevents it from recognizing the opportunity to use a 32x32->64 multiply, and so the result would be no better than the existing mult_frac() until such time as the compiler is fixed. For now, just using mult_frac() seems like the right answer. Cc: stable@kernel.org [v3.4+] Signed-off-by: Chris Metcalf --- arch/tile/kernel/time.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 178989e6d3e3..ea960d660917 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) */ unsigned long long sched_clock(void) { - return clocksource_cyc2ns(get_cycles(), - sched_clock_mult, SCHED_CLOCK_SHIFT); + return mult_frac(get_cycles(), + sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT); } int setup_profiling_timer(unsigned int multiplier) From 2513940989fa2c56d0aeb4f5792d22804d92ab4c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 25 Nov 2016 09:45:28 +1100 Subject: [PATCH 37/39] locking/selftest: Fix output since KERN_CONT changes Since the KERN_CONT changes the locking-selftest output is messed up, eg: ---------------------------------------------------------------------------- | spin |wlock |rlock |mutex | wsem | rsem | -------------------------------------------------------------------------- A-A deadlock: ok | ok | ok | ok | ok | ok | Use pr_cont() to get it looking normal again: ---------------------------------------------------------------------------- | spin |wlock |rlock |mutex | wsem | rsem | -------------------------------------------------------------------------- A-A deadlock: ok | ok | ok | ok | ok | ok | Reported-by: Christian Kujau Signed-off-by: Michael Ellerman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linuxppc-dev@ozlabs.org Link: http://lkml.kernel.org/r/1480027528-934-1-git-send-email-mpe@ellerman.id.au Signed-off-by: Ingo Molnar --- lib/locking-selftest.c | 66 +++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 872a15a2a637..f3a217ea0388 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -980,23 +980,23 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) #ifndef CONFIG_PROVE_LOCKING if (expected == FAILURE && debug_locks) { expected_testcase_failures++; - printk("failed|"); + pr_cont("failed|"); } else #endif if (debug_locks != expected) { unexpected_testcase_failures++; - printk("FAILED|"); + pr_cont("FAILED|"); dump_stack(); } else { testcase_successes++; - printk(" ok |"); + pr_cont(" ok |"); } testcase_total++; if (debug_locks_verbose) - printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n", + pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n", lockclass_mask, debug_locks, expected); /* * Some tests (e.g. double-unlock) might corrupt the preemption @@ -1021,26 +1021,26 @@ static inline void print_testname(const char *testname) #define DO_TESTCASE_1(desc, name, nr) \ print_testname(desc"/"#nr); \ dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ - printk("\n"); + pr_cont("\n"); #define DO_TESTCASE_1B(desc, name, nr) \ print_testname(desc"/"#nr); \ dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \ - printk("\n"); + pr_cont("\n"); #define DO_TESTCASE_3(desc, name, nr) \ print_testname(desc"/"#nr); \ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ - printk("\n"); + pr_cont("\n"); #define DO_TESTCASE_3RW(desc, name, nr) \ print_testname(desc"/"#nr); \ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ - printk("\n"); + pr_cont("\n"); #define DO_TESTCASE_6(desc, name) \ print_testname(desc); \ @@ -1050,7 +1050,7 @@ static inline void print_testname(const char *testname) dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ - printk("\n"); + pr_cont("\n"); #define DO_TESTCASE_6_SUCCESS(desc, name) \ print_testname(desc); \ @@ -1060,7 +1060,7 @@ static inline void print_testname(const char *testname) dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ - printk("\n"); + pr_cont("\n"); /* * 'read' variant: rlocks must not trigger. @@ -1073,7 +1073,7 @@ static inline void print_testname(const char *testname) dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ - printk("\n"); + pr_cont("\n"); #define DO_TESTCASE_2I(desc, name, nr) \ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ @@ -1726,25 +1726,25 @@ static void ww_tests(void) dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW); dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW); dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); print_testname("ww contexts mixing"); dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW); dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); print_testname("finishing ww context"); dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW); dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW); dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW); dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); print_testname("locking mismatches"); dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW); dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW); dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); print_testname("EDEADLK handling"); dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW); @@ -1757,11 +1757,11 @@ static void ww_tests(void) dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW); dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW); dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); print_testname("spinlock nest unlocked"); dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); printk(" -----------------------------------------------------\n"); printk(" |block | try |context|\n"); @@ -1771,25 +1771,25 @@ static void ww_tests(void) dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW); dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW); dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); print_testname("try"); dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW); dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW); dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); print_testname("block"); dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW); dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW); dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); print_testname("spinlock"); dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW); dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW); dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW); - printk("\n"); + pr_cont("\n"); } void locking_selftest(void) @@ -1829,32 +1829,32 @@ void locking_selftest(void) printk(" --------------------------------------------------------------------------\n"); print_testname("recursive read-lock"); - printk(" |"); + pr_cont(" |"); dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK); - printk(" |"); + pr_cont(" |"); dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM); - printk("\n"); + pr_cont("\n"); print_testname("recursive read-lock #2"); - printk(" |"); + pr_cont(" |"); dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK); - printk(" |"); + pr_cont(" |"); dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM); - printk("\n"); + pr_cont("\n"); print_testname("mixed read-write-lock"); - printk(" |"); + pr_cont(" |"); dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK); - printk(" |"); + pr_cont(" |"); dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM); - printk("\n"); + pr_cont("\n"); print_testname("mixed write-read-lock"); - printk(" |"); + pr_cont(" |"); dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK); - printk(" |"); + pr_cont(" |"); dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM); - printk("\n"); + pr_cont("\n"); printk(" --------------------------------------------------------------------------\n"); From dbb26055defd03d59f678cb5f2c992abe05b064a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 30 Nov 2016 21:04:41 +0000 Subject: [PATCH 38/39] locking/rtmutex: Prevent dequeue vs. unlock race David reported a futex/rtmutex state corruption. It's caused by the following problem: CPU0 CPU1 CPU2 l->owner=T1 rt_mutex_lock(l) lock(l->wait_lock) l->owner = T1 | HAS_WAITERS; enqueue(T2) boost() unlock(l->wait_lock) schedule() rt_mutex_lock(l) lock(l->wait_lock) l->owner = T1 | HAS_WAITERS; enqueue(T3) boost() unlock(l->wait_lock) schedule() signal(->T2) signal(->T3) lock(l->wait_lock) dequeue(T2) deboost() unlock(l->wait_lock) lock(l->wait_lock) dequeue(T3) ===> wait list is now empty deboost() unlock(l->wait_lock) lock(l->wait_lock) fixup_rt_mutex_waiters() if (wait_list_empty(l)) { owner = l->owner & ~HAS_WAITERS; l->owner = owner ==> l->owner = T1 } lock(l->wait_lock) rt_mutex_unlock(l) fixup_rt_mutex_waiters() if (wait_list_empty(l)) { owner = l->owner & ~HAS_WAITERS; cmpxchg(l->owner, T1, NULL) ===> Success (l->owner = NULL) l->owner = owner ==> l->owner = T1 } That means the problem is caused by fixup_rt_mutex_waiters() which does the RMW to clear the waiters bit unconditionally when there are no waiters in the rtmutexes rbtree. This can be fatal: A concurrent unlock can release the rtmutex in the fastpath because the waiters bit is not set. If the cmpxchg() gets in the middle of the RMW operation then the previous owner, which just unlocked the rtmutex is set as the owner again when the write takes place after the successfull cmpxchg(). The solution is rather trivial: verify that the owner member of the rtmutex has the waiters bit set before clearing it. This does not require a cmpxchg() or other atomic operations because the waiters bit can only be set and cleared with the rtmutex wait_lock held. It's also safe against the fast path unlock attempt. The unlock attempt via cmpxchg() will either see the bit set and take the slowpath or see the bit cleared and release it atomically in the fastpath. It's remarkable that the test program provided by David triggers on ARM64 and MIPS64 really quick, but it refuses to reproduce on x86-64, while the problem exists there as well. That refusal might explain that this got not discovered earlier despite the bug existing from day one of the rtmutex implementation more than 10 years ago. Thanks to David for meticulously instrumenting the code and providing the information which allowed to decode this subtle problem. Reported-by: David Daney Tested-by: David Daney Signed-off-by: Thomas Gleixner Reviewed-by: Steven Rostedt Acked-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mark Rutland Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Will Deacon Cc: stable@vger.kernel.org Fixes: 23f78d4a03c5 ("[PATCH] pi-futex: rt mutex core") Link: http://lkml.kernel.org/r/20161130210030.351136722@linutronix.de Signed-off-by: Ingo Molnar --- kernel/locking/rtmutex.c | 68 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 1ec0f48962b3..2c49d76f96c3 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) static void fixup_rt_mutex_waiters(struct rt_mutex *lock) { - if (!rt_mutex_has_waiters(lock)) - clear_rt_mutex_waiters(lock); + unsigned long owner, *p = (unsigned long *) &lock->owner; + + if (rt_mutex_has_waiters(lock)) + return; + + /* + * The rbtree has no waiters enqueued, now make sure that the + * lock->owner still has the waiters bit set, otherwise the + * following can happen: + * + * CPU 0 CPU 1 CPU2 + * l->owner=T1 + * rt_mutex_lock(l) + * lock(l->lock) + * l->owner = T1 | HAS_WAITERS; + * enqueue(T2) + * boost() + * unlock(l->lock) + * block() + * + * rt_mutex_lock(l) + * lock(l->lock) + * l->owner = T1 | HAS_WAITERS; + * enqueue(T3) + * boost() + * unlock(l->lock) + * block() + * signal(->T2) signal(->T3) + * lock(l->lock) + * dequeue(T2) + * deboost() + * unlock(l->lock) + * lock(l->lock) + * dequeue(T3) + * ==> wait list is empty + * deboost() + * unlock(l->lock) + * lock(l->lock) + * fixup_rt_mutex_waiters() + * if (wait_list_empty(l) { + * l->owner = owner + * owner = l->owner & ~HAS_WAITERS; + * ==> l->owner = T1 + * } + * lock(l->lock) + * rt_mutex_unlock(l) fixup_rt_mutex_waiters() + * if (wait_list_empty(l) { + * owner = l->owner & ~HAS_WAITERS; + * cmpxchg(l->owner, T1, NULL) + * ===> Success (l->owner = NULL) + * + * l->owner = owner + * ==> l->owner = T1 + * } + * + * With the check for the waiter bit in place T3 on CPU2 will not + * overwrite. All tasks fiddling with the waiters bit are + * serialized by l->lock, so nothing else can modify the waiters + * bit. If the bit is set then nothing can change l->owner either + * so the simple RMW is safe. The cmpxchg() will simply fail if it + * happens in the middle of the RMW because the waiters bit is + * still set. + */ + owner = READ_ONCE(*p); + if (owner & RT_MUTEX_HAS_WAITERS) + WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); } /* From 1be5d4fa0af34fb7bafa205aeb59f5c7cc7a089d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 30 Nov 2016 21:04:42 +0000 Subject: [PATCH 39/39] locking/rtmutex: Use READ_ONCE() in rt_mutex_owner() While debugging the rtmutex unlock vs. dequeue race Will suggested to use READ_ONCE() in rt_mutex_owner() as it might race against the cmpxchg_release() in unlock_rt_mutex_safe(). Will: "It's a minor thing which will most likely not matter in practice" Careful search did not unearth an actual problem in todays code, but it's better to be safe than surprised. Suggested-by: Will Deacon Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: David Daney Cc: Linus Torvalds Cc: Mark Rutland Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Steven Rostedt Cc: Link: http://lkml.kernel.org/r/20161130210030.431379999@linutronix.de Signed-off-by: Ingo Molnar --- kernel/locking/rtmutex_common.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 4f5f83c7d2d3..e317e1cbb3eb 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p) static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) { - return (struct task_struct *) - ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL); + unsigned long owner = (unsigned long) READ_ONCE(lock->owner); + + return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL); } /*