diff --git a/config/boards/cubox-i.conf b/config/boards/cubox-i.conf index 1e67aad23..fac2a71e3 100644 --- a/config/boards/cubox-i.conf +++ b/config/boards/cubox-i.conf @@ -4,3 +4,4 @@ BOARDFAMILY="imx6" BOOTCONFIG="mx6cuboxi_defconfig" KERNEL_TARGET="current" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/firefly-rk3399.csc b/config/boards/firefly-rk3399.csc index 5a85a09bd..1811a0670 100644 --- a/config/boards/firefly-rk3399.csc +++ b/config/boards/firefly-rk3399.csc @@ -4,3 +4,4 @@ BOARDFAMILY="rk3399" BOOTCONFIG="firefly-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/kvim1.csc b/config/boards/kvim1.csc index 1f3685da4..ff8ab7f69 100644 --- a/config/boards/kvim1.csc +++ b/config/boards/kvim1.csc @@ -4,3 +4,4 @@ BOARDFAMILY="meson-gxl" BOOTCONFIG="khadas-vim_defconfig" KERNEL_TARGET="current,dev" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/lafrite.conf b/config/boards/lafrite.conf index e43535976..09764a6e9 100644 --- a/config/boards/lafrite.conf +++ b/config/boards/lafrite.conf @@ -6,3 +6,4 @@ BOOT_FDT_FILE="amlogic/meson-gxl-s805x-libretech-ac.dtb" KERNEL_TARGET="current,dev" FULL_DESKTOP="yes" ASOUND_STATE="asound.state.mesongx" +BOOT_LOGO="desktop" diff --git a/config/boards/lepotato.conf b/config/boards/lepotato.conf index 71ec061bd..9f889f94b 100644 --- a/config/boards/lepotato.conf +++ b/config/boards/lepotato.conf @@ -6,3 +6,4 @@ KERNEL_TARGET="current,dev" SERIALCON="ttyAML0" FULL_DESKTOP="yes" ASOUND_STATE="asound.state.mesongx" +BOOT_LOGO="desktop" diff --git a/config/boards/nanopct4.conf b/config/boards/nanopct4.conf index bea22d363..2a6cc1bbe 100644 --- a/config/boards/nanopct4.conf +++ b/config/boards/nanopct4.conf @@ -5,3 +5,4 @@ BOOTCONFIG="nanopc-t4-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" ASOUND_STATE="asound.state.rt5651" +BOOT_LOGO="desktop" diff --git a/config/boards/nanopik2-s905.conf b/config/boards/nanopik2-s905.conf index aa57f8f64..74bf8920f 100644 --- a/config/boards/nanopik2-s905.conf +++ b/config/boards/nanopik2-s905.conf @@ -5,3 +5,4 @@ BOOTCONFIG="nanopi-k2_defconfig" KERNEL_TARGET="current,dev" FULL_DESKTOP="yes" ASOUND_STATE="asound.state.mesongx" +BOOT_LOGO="desktop" diff --git a/config/boards/nanopim4.conf b/config/boards/nanopim4.conf index 130c789c1..3e24406c7 100644 --- a/config/boards/nanopim4.conf +++ b/config/boards/nanopim4.conf @@ -5,3 +5,4 @@ BOOTCONFIG="nanopi-m4-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" ASOUND_STATE="asound.state.rt5651" +BOOT_LOGO="desktop" diff --git a/config/boards/nanopim4v2.conf b/config/boards/nanopim4v2.conf index f55397590..bff4a900e 100644 --- a/config/boards/nanopim4v2.conf +++ b/config/boards/nanopim4v2.conf @@ -5,3 +5,4 @@ BOOTCONFIG="nanopi-m4v2-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" ASOUND_STATE="asound.state.rt5651" +BOOT_LOGO="desktop" diff --git a/config/boards/nanopineo4.conf b/config/boards/nanopineo4.conf index 5ab03db84..415b6d534 100644 --- a/config/boards/nanopineo4.conf +++ b/config/boards/nanopineo4.conf @@ -4,3 +4,4 @@ BOARDFAMILY="rk3399" BOOTCONFIG="nanopi-neo4-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/odroidc4.conf b/config/boards/odroidc4.conf index 0793e62d8..3373ea8d2 100644 --- a/config/boards/odroidc4.conf +++ b/config/boards/odroidc4.conf @@ -7,3 +7,4 @@ FULL_DESKTOP="yes" SERIALCON="ttyAML0" ASOUND_STATE="asound.state.meson64" FORCE_BOOTSCRIPT_UPDATE="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/odroidn2.conf b/config/boards/odroidn2.conf index 9a5ec2d96..531769f51 100644 --- a/config/boards/odroidn2.conf +++ b/config/boards/odroidn2.conf @@ -7,3 +7,4 @@ KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" FORCE_BOOTSCRIPT_UPDATE="yes" ASOUND_STATE="asound.state.meson64" +BOOT_LOGO="desktop" diff --git a/config/boards/orangepi-rk3399.conf b/config/boards/orangepi-rk3399.conf index 7b6098acd..7e7fc55d5 100644 --- a/config/boards/orangepi-rk3399.conf +++ b/config/boards/orangepi-rk3399.conf @@ -4,3 +4,4 @@ BOARDFAMILY="rk3399" BOOTCONFIG="orangepi-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/orangepi4.conf b/config/boards/orangepi4.conf index 147bab382..4d7c3930d 100644 --- a/config/boards/orangepi4.conf +++ b/config/boards/orangepi4.conf @@ -5,3 +5,4 @@ BOOTCONFIG="orangepi-4-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" ASOUND_STATE="asound.state.rt5651" +BOOT_LOGO="desktop" diff --git a/config/boards/pinebook-a64.conf b/config/boards/pinebook-a64.conf index b50d4af82..1129a2906 100644 --- a/config/boards/pinebook-a64.conf +++ b/config/boards/pinebook-a64.conf @@ -6,3 +6,4 @@ DESKTOP_AUTOLOGIN="no" KERNEL_TARGET="current,dev" FULL_DESKTOP="yes" PACKAGE_LIST_DESKTOP_BOARD="xfce4-power-manager" +BOOT_LOGO="desktop" diff --git a/config/boards/pinebook-pro.wip b/config/boards/pinebook-pro.wip index 49facd578..12bdbc4cf 100644 --- a/config/boards/pinebook-pro.wip +++ b/config/boards/pinebook-pro.wip @@ -6,3 +6,4 @@ BOOT_FDT_FILE="rockchip/rk3399-pinebook-pro.dtb" DESKTOP_AUTOLOGIN="no" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/roc-rk3399-pc.csc b/config/boards/roc-rk3399-pc.csc index e50152338..935192302 100644 --- a/config/boards/roc-rk3399-pc.csc +++ b/config/boards/roc-rk3399-pc.csc @@ -4,3 +4,4 @@ BOARDFAMILY="rk3399" BOOTCONFIG="roc-pc-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/rockpi-4a.conf b/config/boards/rockpi-4a.conf index e786f5ccb..2b01d7020 100644 --- a/config/boards/rockpi-4a.conf +++ b/config/boards/rockpi-4a.conf @@ -4,3 +4,4 @@ BOARDFAMILY="rockchip64" BOOTCONFIG="rock-pi-4-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/rockpi-4b.conf b/config/boards/rockpi-4b.conf index bae61e24d..6b8207b1a 100644 --- a/config/boards/rockpi-4b.conf +++ b/config/boards/rockpi-4b.conf @@ -4,3 +4,4 @@ BOARDFAMILY="rockchip64" BOOTCONFIG="rock-pi-4-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/rockpro64.conf b/config/boards/rockpro64.conf index d5a1cabc5..cb9817900 100644 --- a/config/boards/rockpro64.conf +++ b/config/boards/rockpro64.conf @@ -4,3 +4,4 @@ BOARDFAMILY="rockchip64" BOOTCONFIG="rockpro64-rk3399_defconfig" KERNEL_TARGET="legacy,current,dev" FULL_DESKTOP="yes" +BOOT_LOGO="desktop" diff --git a/config/boards/teres-a64.conf b/config/boards/teres-a64.conf index 314585739..de22ccf50 100644 --- a/config/boards/teres-a64.conf +++ b/config/boards/teres-a64.conf @@ -6,3 +6,4 @@ DESKTOP_AUTOLOGIN="no" KERNEL_TARGET="current,dev" FULL_DESKTOP="yes" PACKAGE_LIST_DESKTOP_BOARD="xfce4-power-manager" +BOOT_LOGO="desktop" diff --git a/config/bootenv/clearfog-default.txt b/config/bootenv/clearfog.txt similarity index 100% rename from config/bootenv/clearfog-default.txt rename to config/bootenv/clearfog.txt diff --git a/config/bootenv/cubox-default.txt b/config/bootenv/cubox.txt similarity index 100% rename from config/bootenv/cubox-default.txt rename to config/bootenv/cubox.txt diff --git a/config/bootenv/helios4-default.txt b/config/bootenv/helios4.txt similarity index 100% rename from config/bootenv/helios4-default.txt rename to config/bootenv/helios4.txt diff --git a/config/bootenv/imx7d-default.txt b/config/bootenv/imx7d.txt similarity index 100% rename from config/bootenv/imx7d-default.txt rename to config/bootenv/imx7d.txt diff --git a/config/bootenv/meson64-next.txt b/config/bootenv/meson.txt similarity index 76% rename from config/bootenv/meson64-next.txt rename to config/bootenv/meson.txt index 4a79dcaa8..a522d9923 100644 --- a/config/bootenv/meson64-next.txt +++ b/config/bootenv/meson.txt @@ -1,6 +1,3 @@ verbosity=1 -logo=disabled console=both ethaddr=00:50:43:84:fb:2f - - diff --git a/config/bootenv/mt7623-default.txt b/config/bootenv/mt7623.txt similarity index 100% rename from config/bootenv/mt7623-default.txt rename to config/bootenv/mt7623.txt diff --git a/config/bootenv/pine64-default.txt b/config/bootenv/pine64-default.txt deleted file mode 100644 index 5ba1cc5f1..000000000 --- a/config/bootenv/pine64-default.txt +++ /dev/null @@ -1,5 +0,0 @@ -verbosity=1 -console=both -disp_mode=720p60 -camera_type=none -pine64_lcd=off diff --git a/config/bootenv/rk322x-default.txt b/config/bootenv/rk322x.txt similarity index 100% rename from config/bootenv/rk322x-default.txt rename to config/bootenv/rk322x.txt diff --git a/config/bootenv/odroidc2-next.txt b/config/bootenv/rockchip.txt similarity index 100% rename from config/bootenv/odroidc2-next.txt rename to config/bootenv/rockchip.txt diff --git a/config/bootenv/s5p6818-default.txt b/config/bootenv/s5p6818-default.txt deleted file mode 100644 index af16115c0..000000000 --- a/config/bootenv/s5p6818-default.txt +++ /dev/null @@ -1,2 +0,0 @@ -verbosity=1 -console=both diff --git a/config/bootenv/odroidxu4-default.txt b/config/bootenv/s5p6818.txt similarity index 100% rename from config/bootenv/odroidxu4-default.txt rename to config/bootenv/s5p6818.txt diff --git a/config/bootenv/sun50iw1-next.txt b/config/bootenv/sun50iw1-next.txt deleted file mode 100644 index b9bafac33..000000000 --- a/config/bootenv/sun50iw1-next.txt +++ /dev/null @@ -1 +0,0 @@ -verbosity=1 diff --git a/config/bootenv/sun50iw2-next.txt b/config/bootenv/sun50iw2-next.txt deleted file mode 100644 index af16115c0..000000000 --- a/config/bootenv/sun50iw2-next.txt +++ /dev/null @@ -1,2 +0,0 @@ -verbosity=1 -console=both diff --git a/config/bootenv/sun8i-default.txt b/config/bootenv/sun8i-default.txt deleted file mode 100644 index af16115c0..000000000 --- a/config/bootenv/sun8i-default.txt +++ /dev/null @@ -1,2 +0,0 @@ -verbosity=1 -console=both diff --git a/config/bootenv/sunxi-default.txt b/config/bootenv/sunxi.txt similarity index 100% rename from config/bootenv/sunxi-default.txt rename to config/bootenv/sunxi.txt diff --git a/config/bootenv/udoo-default.txt b/config/bootenv/udoo-default.txt deleted file mode 100644 index b9bafac33..000000000 --- a/config/bootenv/udoo-default.txt +++ /dev/null @@ -1 +0,0 @@ -verbosity=1 diff --git a/config/bootenv/rockchip-default.txt b/config/bootenv/udoo.txt similarity index 100% rename from config/bootenv/rockchip-default.txt rename to config/bootenv/udoo.txt diff --git a/config/bootscripts/boot-cubox.cmd b/config/bootscripts/boot-cubox.cmd index 588a8ad7e..6a7dbe979 100644 --- a/config/bootscripts/boot-cubox.cmd +++ b/config/bootscripts/boot-cubox.cmd @@ -7,6 +7,7 @@ setenv rootdev "/dev/mmcblk0p1" setenv verbosity "1" setenv console "display" +setenv bootlogo "false" setenv rootfstype "ext4" setenv disp_mode "1920x1080m60" setenv earlycon "off" @@ -29,6 +30,7 @@ fi if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=tty1"; fi if test "${console}" = "serial" || test "${console}" = "both"; then setenv consoleargs "${consoleargs} console=ttymxc0,115200"; fi if test "${earlycon}" = "on"; then setenv consoleargs "earlycon ${consoleargs}"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi setenv bootargs "root=${rootdev} rootfstype=${rootfstype} rootwait ${consoleargs} consoleblank=0 video=mxcfb0:dev=hdmi,${disp_mode},if=RGB24,bpp=32 coherent_pool=2M cma=256M@2G rd.dm=0 rd.luks=0 rd.lvm=0 raid=noautodetect pci=nomsi vt.global_cursor_default=0 loglevel=${verbosity} usb-storage.quirks=${usbstoragequirks} ${extraargs}" ext2load mmc 0 ${fdt_addr} /boot/dtb/${fdt_file} || fatload mmc 0 ${fdt_addr} /dtb/${fdt_file} || ext2load mmc 0 ${fdt_addr} /dtb/${fdt_file} diff --git a/config/bootscripts/boot-meson64.cmd b/config/bootscripts/boot-meson64.cmd index 2cdbefdf0..e7c5b6c6e 100644 --- a/config/bootscripts/boot-meson64.cmd +++ b/config/bootscripts/boot-meson64.cmd @@ -10,6 +10,7 @@ setenv overlay_error "false" setenv rootdev "/dev/mmcblk1p1" setenv verbosity "1" setenv console "both" +setenv bootlogo "false" setenv rootfstype "ext4" setenv docker_optimizations "on" @@ -27,6 +28,7 @@ if test "${console}" = "serial"; then setenv consoleargs "console=ttyAML0,115200 if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=ttyAML0,115200 console=tty1"; fi if test "${console}" = "serial"; then setenv consoleargs "console=ttyAML0,115200"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi setenv bootargs "root=${rootdev} rootwait rootfstype=${rootfstype} ${consoleargs} consoleblank=0 coherent_pool=2M loglevel=${verbosity} ubootpart=${partuuid} usb-storage.quirks=${usbstoragequirks} ${extraargs} ${extraboardargs}" diff --git a/config/bootscripts/boot-mvebu-next.cmd b/config/bootscripts/boot-mvebu.cmd similarity index 100% rename from config/bootscripts/boot-mvebu-next.cmd rename to config/bootscripts/boot-mvebu.cmd diff --git a/config/bootscripts/boot-odroid-c2.ini b/config/bootscripts/boot-odroid-c2.ini index d5959ad1b..53b98c2b0 100644 --- a/config/bootscripts/boot-odroid-c2.ini +++ b/config/bootscripts/boot-odroid-c2.ini @@ -89,6 +89,9 @@ setenv m_bpp "24" # setenv vout "dvi" # setenv vout "vga" +# Display splash logo +setenv bootlogo "false" + # HDMI HotPlug Detection control # Allows you to force HDMI thinking that the cable is connected. # true = HDMI will believe that cable is always connected @@ -155,8 +158,10 @@ if test -e mmc 0:1 boot/.next; then setenv condev "console=ttyAML0,115200n8"; el ########################################### setenv verbosity "1" +if test "${bootlogo}" = "true"; then setenv bootsplash "bootsplash.bootfile=bootsplash.armbian"; fi + # Boot arguments -setenv bootargs "root=${rootdev} rootwait rootflags=data=writeback rw rootfstype=${rootfstype} ${condev} no_console_suspend consoleblank=0 hdmimode=${m} m_bpp=${m_bpp} vout=${vout} fsck.repair=yes loglevel=${verbosity} net.ifnames=0 ${extraargs}" +setenv bootargs "root=${rootdev} rootwait rootflags=data=writeback rw rootfstype=${rootfstype} ${condev} no_console_suspend consoleblank=0 hdmimode=${m} m_bpp=${m_bpp} vout=${vout} fsck.repair=yes loglevel=${verbosity} ${bootsplash} net.ifnames=0 ${extraargs}" # Mainline boot arguments if test -e mmc 0:1 boot/.next; then setenv bootargs "root=${rootdev} rootwait rootflags=data=writeback rw rootfstype=${rootfstype} ${condev} no_console_suspend consoleblank=0 fsck.repair=yes loglevel=${verbosity} net.ifnames=0 ${extraargs}"; fi diff --git a/config/bootscripts/boot-odroid-c4.ini b/config/bootscripts/boot-odroid-c4.ini index 9c96191c7..3c91c5c98 100644 --- a/config/bootscripts/boot-odroid-c4.ini +++ b/config/bootscripts/boot-odroid-c4.ini @@ -90,6 +90,12 @@ setenv max_freq_a55 "1908" # 1.908 GHz, default value # max cpu-cores setenv maxcpus "4" +# Display splash logo +setenv bootlogo "false" + +if test "${bootlogo}" = "true"; then setenv bootsplash "bootsplash.bootfile=bootsplash.armbian"; fi + + ### Normal HDMI Monitors if test "${display_autodetect}" = "true"; then hdmitx edid; fi if test "${hdmimode}" = "custombuilt"; then setenv cmode "modeline=${modeline}"; fi @@ -98,8 +104,11 @@ if test "${cec}" = "true"; then setenv cec_enable "hdmitx=cec3f"; fi # VU7 Settings if test "${disable_vu7}" = "false"; then setenv hid_quirks "usbhid.quirks=0x0eef:0x0005:0x0004"; fi +# load armbianEnv.txt +if ext4load mmc ${devno}:1 0x44000000 /boot/armbianEnv.txt || fatload mmc ${devno}:1 0x44000000 armbianEnv.txt || ext4load mmc ${devno}:1 0x44000000 armbianEnv.txt; then env import -t 0x44000000 ${filesize}; fi + # Boot Args -setenv bootargs "root=${rootdev} rootwait rootflags=data=writeback rw rootfstype=${rootfstype} ${condev} ${amlogic} no_console_suspend fsck.repair=yes net.ifnames=0 elevator=noop hdmimode=${hdmimode} cvbsmode=576cvbs maxcpus=${maxcpus} voutmode=${voutmode} ${cmode} disablehpd=${disablehpd} cvbscable=${cvbscable} overscan=${overscan} ${hid_quirks} monitor_onoff=${monitor_onoff} logo=osd0,loaded ${cec_enable} sdrmode=${sdrmode}" +setenv bootargs "root=${rootdev} rootwait rootflags=data=writeback rw rootfstype=${rootfstype} ${condev} ${amlogic} no_console_suspend fsck.repair=yes net.ifnames=0 elevator=noop hdmimode=${hdmimode} cvbsmode=576cvbs maxcpus=${maxcpus} voutmode=${voutmode} ${cmode} disablehpd=${disablehpd} cvbscable=${cvbscable} overscan=${overscan} ${hid_quirks} monitor_onoff=${monitor_onoff} logo=osd0,loaded ${cec_enable} ${bootsplash} sdrmode=${sdrmode}" # Set load addresses setenv dtb_loadaddr "0x1000000" diff --git a/config/bootscripts/boot-odroid-n2-mainline.ini b/config/bootscripts/boot-odroid-n2-mainline.ini index 137b7348f..6b344b2da 100644 --- a/config/bootscripts/boot-odroid-n2-mainline.ini +++ b/config/bootscripts/boot-odroid-n2-mainline.ini @@ -87,12 +87,17 @@ setenv max_freq_a53 "1896" # 1.896 GHz, default value # setenv maxcpus "5" setenv maxcpus "6" +# Bootlogo +setenv bootlogo "false" + ### Normal HDMI Monitors if test "${display_autodetect}" = "true"; then hdmitx edid; fi if test "${hdmimode}" = "custombuilt"; then setenv cmode "modeline=${modeline}"; fi +if ext4load mmc ${devno}:1 0x44000000 /boot/armbianEnv.txt || fatload mmc ${devno}:1 0x44000000 armbianEnv.txt || ext4load mmc ${devno}:1 0x44000000 armbianEnv.txt; then env import -t 0x44000000 ${filesize}; fi # Boot Args -setenv bootargs "root=${rootdev} rootwait rootflags=data=writeback rw clk_ignore_unused rootfstype=${rootfstype} ${condev} ${amlogic} no_console_suspend fsck.repair=yes net.ifnames=0 elevator=noop hdmimode=${hdmimode} cvbsmode=576cvbs max_freq_a53=${max_freq_a53} max_freq_a73=${max_freq_a73} maxcpus=${maxcpus} voutmode=${voutmode} ${cmode} disablehpd=${disablehpd} cvbscable=${cvbscable} overscan=${overscan}" +if test "${bootlogo}" = "true"; then setenv bootsplash "bootsplash.bootfile=bootsplash.armbian"; fi +setenv bootargs "root=${rootdev} rootwait ${bootsplash} rootflags=data=writeback rw clk_ignore_unused rootfstype=${rootfstype} ${condev} ${amlogic} no_console_suspend fsck.repair=yes net.ifnames=0 elevator=noop hdmimode=${hdmimode} cvbsmode=576cvbs max_freq_a53=${max_freq_a53} max_freq_a73=${max_freq_a73} maxcpus=${maxcpus} voutmode=${voutmode} ${cmode} disablehpd=${disablehpd} cvbscable=${cvbscable} overscan=${overscan}" # Set load addresses setenv dtb_loadaddr "0x1000000" diff --git a/config/bootscripts/boot-odroid-n2.ini b/config/bootscripts/boot-odroid-n2.ini index 4bae9df8c..84b67b736 100644 --- a/config/bootscripts/boot-odroid-n2.ini +++ b/config/bootscripts/boot-odroid-n2.ini @@ -5,6 +5,9 @@ setenv rootfstype "ext4" ODROIDN2-UBOOT-CONFIG +# Display splash logo +setenv bootlogo "false" + # Default Console Device Setting setenv condev "console=ttyS0,115200n8 console=tty1 loglevel=1" # on both @@ -89,10 +92,14 @@ setenv maxcpus "6" ### Normal HDMI Monitors if test "${display_autodetect}" = "true"; then hdmitx edid; fi +if test "${bootlogo}" = "true"; then setenv bootsplash "bootsplash.bootfile=bootsplash.armbian"; fi if test "${hdmimode}" = "custombuilt"; then setenv cmode "modeline=${modeline}"; fi +# Read armbianEnv.txt +if ext4load mmc ${devno}:1 0x44000000 /boot/armbianEnv.txt || fatload mmc ${devno}:1 0x44000000 armbianEnv.txt || ext4load mmc ${devno}:1 0x44000000 armbianEnv.txt; then env import -t 0x44000000 ${filesize}; fi + # Boot Args -setenv bootargs "root=${rootdev} rootwait rootflags=data=writeback rw rootfstype=${rootfstype} ${condev} ${amlogic} no_console_suspend fsck.repair=yes net.ifnames=0 elevator=noop hdmimode=${hdmimode} cvbsmode=576cvbs max_freq_a53=${max_freq_a53} max_freq_a73=${max_freq_a73} maxcpus=${maxcpus} voutmode=${voutmode} ${cmode} disablehpd=${disablehpd} cvbscable=${cvbscable} overscan=${overscan}" +setenv bootargs "root=${rootdev} rootwait rootflags=data=writeback rw rootfstype=${rootfstype} ${condev} ${amlogic} no_console_suspend fsck.repair=yes net.ifnames=0 elevator=noop hdmimode=${hdmimode} cvbsmode=576cvbs max_freq_a53=${max_freq_a53} max_freq_a73=${max_freq_a73} maxcpus=${maxcpus} voutmode=${voutmode} ${cmode} disablehpd=${disablehpd} ${bootsplash} cvbscable=${cvbscable} overscan=${overscan}" # Set load addresses setenv dtb_loadaddr "0x1000000" diff --git a/config/bootscripts/boot-odroid-xu4-default.ini b/config/bootscripts/boot-odroid-xu4.ini similarity index 98% rename from config/bootscripts/boot-odroid-xu4-default.ini rename to config/bootscripts/boot-odroid-xu4.ini index 7d8552668..6f730e317 100644 --- a/config/bootscripts/boot-odroid-xu4-default.ini +++ b/config/bootscripts/boot-odroid-xu4.ini @@ -14,15 +14,15 @@ setenv macaddr "00:1e:06:61:7a:55" setenv rootdev "/dev/mmcblk0p1" setenv rootfstype "ext4" setenv console "both" +setenv bootlogo "false" setenv verbosity "1" # To update boot loader on your eMMC use the nand-sata-install tool # run copy_uboot_sd2emmc -if ext4load mmc 0:1 0x44000000 /boot/armbianEnv.txt || fatload mmc 0:1 0x44000000 armbianEnv.txt || ext4load mmc 0:1 0x44000000 armbianEnv.txt; then env import -t 0x44000000 ${filesize}; fi - if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=tty1"; fi if test "${console}" = "serial" || test "${console}" = "both"; then setenv consoleargs "console=ttySAC2,115200n8 ${consoleargs}"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi setenv bootrootfs "${consoleargs} consoleblank=0 loglevel=${verbosity} root=${rootdev} rootfstype=${rootfstype} rootwait rw" diff --git a/config/bootscripts/boot-pine64-default.cmd b/config/bootscripts/boot-pine64-default.cmd deleted file mode 100644 index c98ee9cfb..000000000 --- a/config/bootscripts/boot-pine64-default.cmd +++ /dev/null @@ -1,118 +0,0 @@ -# DO NOT EDIT THIS FILE -# -# Please edit /boot/armbianEnv.txt to set supported parameters -# - -# default values - -setenv rootdev "/dev/mmcblk0p1" -setenv verbosity "1" -setenv console "both" -setenv disp_mem_reserves "off" -setenv disp_mode "720p60" -setenv rootfstype "ext4" -setenv camera_type "none" -setenv pine64_lcd "off" - -if test -e mmc ${boot_part} ${prefix}armbianEnv.txt; then - load mmc ${boot_part} ${load_addr} ${prefix}armbianEnv.txt - env import -t ${load_addr} ${filesize} -fi - -if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=tty1"; fi -if test "${console}" = "serial" || test "${console}" = "both"; then setenv consoleargs "${consoleargs} console=ttyS0,115200n8"; fi - -setenv bootargs "root=${rootdev} rootfstype=${rootfstype} rootwait ${consoleargs} no_console_suspend earlycon=uart,mmio32,0x01c28000 mac_addr=${ethaddr} consoleblank=0 loglevel=${verbosity} ${extraargs} ${extraboardargs}" - -# determine board type from DT compiled into u-boot binary, currently SoPine is not autodetected -fdt get value dt_name / dt-name -if test "${dt_name}" = "sun50iw1p1-pine64so"; then - setenv pine64_model "pine64so" -elif test "${dt_name}" = "sun50iw1p1-orangepiwin"; then - setenv pine64_model "orangepiwin" -elif test "${dt_name}" = "sun50iw1p1-bananapim64"; then - setenv pine64_model "bananapim64" -elif test "${dt_name}" = "sun50iw1p1-olinuxino-a64"; then - setenv pine64_model "olinuxino-a64" -fi - -load mmc ${boot_part} ${fdt_addr} ${prefix}dtb/sun50iw1p1-${pine64_model}.dtb -load mmc ${boot_part} ${initrd_addr} ${prefix}uInitrd -load mmc ${boot_part} ${kernel_addr} ${prefix}Image - -fdt addr ${fdt_addr} -fdt resize - -# set display resolution from uEnv.txt or other environment file -# default to 720p60 -if test "${disp_mode}" = "480i"; then setenv fdt_disp_mode "<0x00000000>" -elif test "${disp_mode}" = "576i"; then setenv fdt_disp_mode "<0x00000001>" -elif test "${disp_mode}" = "480p"; then setenv fdt_disp_mode "<0x00000002>" -elif test "${disp_mode}" = "576p"; then setenv fdt_disp_mode "<0x00000003>" -elif test "${disp_mode}" = "720p50"; then setenv fdt_disp_mode "<0x00000004>" -elif test "${disp_mode}" = "720p60"; then setenv fdt_disp_mode "<0x00000005>" -elif test "${disp_mode}" = "1080i50"; then setenv fdt_disp_mode "<0x00000006>" -elif test "${disp_mode}" = "1080i60"; then setenv fdt_disp_mode "<0x00000007>" -elif test "${disp_mode}" = "1080p24"; then setenv fdt_disp_mode "<0x00000008>" -elif test "${disp_mode}" = "1080p50"; then setenv fdt_disp_mode "<0x00000009>" -elif test "${disp_mode}" = "1080p60"; then setenv fdt_disp_mode "<0x0000000a>" -elif test "${disp_mode}" = "2160p30"; then setenv fdt_disp_mode "<0x0000001c>" -elif test "${disp_mode}" = "2160p25"; then setenv fdt_disp_mode "<0x0000001d>" -elif test "${disp_mode}" = "2160p24"; then setenv fdt_disp_mode "<0x0000001e>" -else setenv fdt_disp_mode "<0x00000005>" -fi - -if test "${pine64_lcd}" = "1" || test "${pine64_lcd}" = "on"; then - fdt set /soc@01c00000/disp@01000000 screen0_output_type "<0x00000001>" - fdt set /soc@01c00000/disp@01000000 screen0_output_mode "<0x00000004>" - fdt set /soc@01c00000/disp@01000000 screen1_output_mode ${fdt_disp_mode} - - fdt set /soc@01c00000/lcd0@01c0c000 lcd_used "<0x00000001>" - - fdt set /soc@01c00000/boot_disp output_type "<0x00000001>" - fdt set /soc@01c00000/boot_disp output_mode "<0x00000004>" - - fdt set /soc@01c00000/ctp status "okay" - fdt set /soc@01c00000/ctp ctp_used "<0x00000001>" - fdt set /soc@01c00000/ctp ctp_name "gt911_DB2" -elif test "${pine64_model}" != "pine64-pinebook"; then - fdt set /soc@01c00000/disp@01000000 screen0_output_mode ${fdt_disp_mode} -fi - -# DVI compatibility -if test "${disp_dvi_compat}" = "1" || test "${disp_dvi_compat}" = "on"; then - fdt set /soc@01c00000/hdmi@01ee0000 hdmi_hdcp_enable "<0x00000000>" - fdt set /soc@01c00000/hdmi@01ee0000 hdmi_cts_compatibility "<0x00000001>" -fi - -if test "${disp_mem_reserves}" = "off"; then - # TODO: Remove reserved memory from DT or disable devices? -fi - -# default, only set status -if test "${camera_type}" = "s5k4ec"; then - fdt set /soc@01c00000/vfe@0/ status "okay" - fdt set /soc@01c00000/vfe@0/dev@0/ status "okay" -fi - -# change name, i2c address and vdd voltage -if test "${camera_type}" = "ov5640"; then - fdt set /soc@01c00000/vfe@0/dev@0/ csi0_dev0_mname "ov5640" - fdt set /soc@01c00000/vfe@0/dev@0/ csi0_dev0_twi_addr "<0x00000078>" - fdt set /soc@01c00000/vfe@0/dev@0/ csi0_dev0_iovdd_vol "<0x001b7740>" - fdt set /soc@01c00000/vfe@0/ status "okay" - fdt set /soc@01c00000/vfe@0/dev@0/ status "okay" -fi - -# GMAC TX/RX delay processing -if test -n "${gmac-tx-delay}"; then - fdt set /soc@01c00000/eth@01c30000/ tx-delay "<0x${gmac-tx-delay}>" -fi -if test -n "${gmac-rx-delay}"; then - fdt set /soc@01c00000/eth@01c30000/ rx-delay "<0x${gmac-rx-delay}>" -fi - -booti ${kernel_addr} ${initrd_addr} ${fdt_addr} - -# Recompile with: -# mkimage -C none -A arm -T script -d /boot/boot.cmd /boot/boot.scr diff --git a/config/bootscripts/boot-rk322x.cmd b/config/bootscripts/boot-rk322x.cmd index 68ac3b55e..0f144f52e 100644 --- a/config/bootscripts/boot-rk322x.cmd +++ b/config/bootscripts/boot-rk322x.cmd @@ -10,6 +10,7 @@ setenv overlay_error "false" setenv rootdev "/dev/mmcblk0p1" setenv verbosity "1" setenv console "both" +setenv bootlogo "false" setenv rootfstype "ext4" setenv docker_optimizations "on" @@ -24,6 +25,7 @@ if test "${logo}" = "disabled"; then setenv logo "logo.nologo"; fi # get PARTUUID of first partition on SD/eMMC the boot script was loaded from if test "${devtype}" = "mmc"; then part uuid mmc ${devnum}:1 partuuid; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi setenv bootargs "earlyprintk root=${rootdev} console=ttyS2,115200n8 console=tty1 rootwait rootfstype=${rootfstype} ${consoleargs} consoleblank=0 loglevel=${verbosity} ubootpart=${partuuid} usb-storage.quirks=${usbstoragequirks} ${extraargs} ${extraboardargs}" diff --git a/config/bootscripts/boot-rockchip.cmd b/config/bootscripts/boot-rockchip.cmd index 930ade76b..aaac3f5fc 100644 --- a/config/bootscripts/boot-rockchip.cmd +++ b/config/bootscripts/boot-rockchip.cmd @@ -10,6 +10,7 @@ setenv overlay_error "false" setenv rootdev "/dev/mmcblk0p1" setenv verbosity "1" setenv console "both" +setenv bootlogo "false" setenv rootfstype "ext4" setenv docker_optimizations "on" setenv earlycon "off" @@ -28,6 +29,7 @@ if test "${console}" = "ttyS2,115200n8"; then setenv console "both"; fi if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=tty1"; fi if test "${console}" = "serial" || test "${console}" = "both"; then setenv consoleargs "console=ttyS2,115200n8 ${consoleargs}"; fi if test "${earlycon}" = "on"; then setenv consoleargs "earlycon ${consoleargs}"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi # get PARTUUID of first partition on SD/eMMC the boot script was loaded from if test "${devtype}" = "mmc"; then part uuid mmc ${devnum}:1 partuuid; fi diff --git a/config/bootscripts/boot-rockchip64.cmd b/config/bootscripts/boot-rockchip64.cmd index 1b9658c73..85294ef40 100644 --- a/config/bootscripts/boot-rockchip64.cmd +++ b/config/bootscripts/boot-rockchip64.cmd @@ -9,6 +9,7 @@ setenv overlay_error "false" setenv rootdev "/dev/mmcblk0p1" setenv verbosity "1" setenv console "both" +setenv bootlogo "false" setenv rootfstype "ext4" setenv docker_optimizations "on" setenv earlycon "off" @@ -25,6 +26,7 @@ if test "${logo}" = "disabled"; then setenv logo "logo.nologo"; fi if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=tty1"; fi if test "${console}" = "serial" || test "${console}" = "both"; then setenv consoleargs "console=ttyS2,1500000 ${consoleargs}"; fi if test "${earlycon}" = "on"; then setenv consoleargs "earlycon ${consoleargs}"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi # get PARTUUID of first partition on SD/eMMC the boot script was loaded from if test "${devtype}" = "mmc"; then part uuid mmc ${devnum}:1 partuuid; fi diff --git a/config/bootscripts/boot-s5p6818.cmd b/config/bootscripts/boot-s5p6818.cmd index a2ee7affd..76822100e 100644 --- a/config/bootscripts/boot-s5p6818.cmd +++ b/config/bootscripts/boot-s5p6818.cmd @@ -7,6 +7,7 @@ setenv rootdev "/dev/mmcblk2p1" setenv rootfstype "ext4" setenv verbosity "1" +setenv bootlogo "false" setenv fdt_addr "0x48000000" setenv ramdisk_addr_r "0x49000000" setenv kernel_addr_r "0x4a000000" @@ -22,7 +23,9 @@ if ext4load mmc ${devnum}:1 ${kernel_addr_r} ${prefix}/armbianEnv.txt; then env import -t ${kernel_addr_r} ${filesize} fi -setenv bootargs "console=ttySAC0,115200n8 console=tty1 root=${rootdev} rootwait rootfstype=${rootfstype} loglevel=${verbosity} usb-storage.quirks=${usbstoragequirks} ${extraargs}" +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi + +setenv bootargs "console=ttySAC0,115200n8 console=tty1 ${consoleargs} root=${rootdev} rootwait rootfstype=${rootfstype} loglevel=${verbosity} usb-storage.quirks=${usbstoragequirks} ${extraargs}" if ext4load mmc ${devnum}:1 ${fdt_addr} ${prefix}dtb/nexell/${fdtfile} || ext4load mmc 1:1 ${fdt_addr} ${prefix}dtb/nexell/s5p6818-nanopi3-rev07.dtb; then echo "Loading DTB"; fi ext4load mmc ${devnum}:1 ${ramdisk_addr_r} ${prefix}uInitrd diff --git a/config/bootscripts/boot-sun50i-next.cmd b/config/bootscripts/boot-sun50i-next.cmd index d0e2a04a2..c98681485 100644 --- a/config/bootscripts/boot-sun50i-next.cmd +++ b/config/bootscripts/boot-sun50i-next.cmd @@ -10,6 +10,7 @@ setenv verbosity "1" setenv rootfstype "ext4" setenv console "both" setenv docker_optimizations "on" +setenv bootlogo "false" # Print boot source itest.b *0x10028 == 0x00 && echo "U-boot loaded from SD" @@ -25,6 +26,7 @@ fi if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=ttyS0,115200 console=tty1"; fi if test "${console}" = "serial"; then setenv consoleargs "console=ttyS0,115200"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi # get PARTUUID of first partition on SD/eMMC it was loaded from # mmc 0 is always mapped to device u-boot (2016.09+) was loaded from diff --git a/config/bootscripts/boot-sunxi.cmd b/config/bootscripts/boot-sunxi.cmd index 9c4405150..7bded2614 100644 --- a/config/bootscripts/boot-sunxi.cmd +++ b/config/bootscripts/boot-sunxi.cmd @@ -12,6 +12,7 @@ setenv disp_mem_reserves "off" setenv disp_mode "1920x1080p60" setenv rootfstype "ext4" setenv docker_optimizations "on" +setenv bootlogo "false" setenv devnum "0" setenv rootdev "/dev/mmcblk${devnum}p1" setenv earlycon "off" @@ -36,11 +37,10 @@ if test -e ${devtype} ${devnum} ${prefix}armbianEnv.txt; then env import -t ${load_addr} ${filesize} fi -if test "${logo}" = "disabled"; then setenv logo "logo.nologo"; fi - if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=ttyS0,115200 console=tty1"; fi if test "${console}" = "serial"; then setenv consoleargs "console=ttyS0,115200"; fi if test "${earlycon}" = "on"; then setenv consoleargs "earlycon ${consoleargs}"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi setenv bootargs "root=${rootdev} rootwait rootfstype=${rootfstype} ${consoleargs} hdmi.audio=EDID:0 disp.screen0_output_mode=${disp_mode} consoleblank=0 loglevel=${verbosity} ubootpart=${partuuid} ubootsource=${devtype} usb-storage.quirks=${usbstoragequirks} ${extraargs} ${extraboardargs}" diff --git a/config/bootscripts/boot-udoo.cmd b/config/bootscripts/boot-udoo.cmd index a3450b340..b56fa2690 100644 --- a/config/bootscripts/boot-udoo.cmd +++ b/config/bootscripts/boot-udoo.cmd @@ -10,6 +10,7 @@ setenv ramdisk_addr "0x14800000" setenv rootdev "/dev/mmcblk0p1" setenv verbosity "1" setenv console "both" +setenv bootlogo "false" setenv disp_mode "1920x1080M60" setenv rootfstype "ext4" setenv earlycon "off" @@ -24,6 +25,7 @@ fi if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=ttymxc1,115200 console=tty1"; fi if test "${console}" = "serial"; then setenv consoleargs "console=ttymxc1,115200"; fi if test "${earlycon}" = "on"; then setenv consoleargs "earlycon ${consoleargs}"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi setenv bootargs "root=${rootdev} rootfstype=${rootfstype} rootwait ${consoleargs} video=mxcfb0:dev=hdmi,${disp_mode},if=RGB24,bpp=32 rd.dm=0 rd.luks=0 rd.lvm=0 raid=noautodetect pci=nomsi ahci_imx.hotplug=1 vt.global_cursor_default=0 loglevel=${verbosity} usb-storage.quirks=${usbstoragequirks} ${extraargs}" run findfdt diff --git a/config/kernel/linux-imx6-current.config b/config/kernel/linux-imx6-current.config index 61e12e734..1f472a3b5 100644 --- a/config/kernel/linux-imx6-current.config +++ b/config/kernel/linux-imx6-current.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.7.5 Kernel Configuration +# Linux/arm 5.7.6 Kernel Configuration # # @@ -3542,6 +3542,7 @@ CONFIG_WLCORE=m CONFIG_WLCORE_SPI=m CONFIG_WLCORE_SDIO=m CONFIG_WILINK_PLATFORM_DATA=y +CONFIG_RTL8723DU=m CONFIG_RTL8723DS=m CONFIG_RTL8822BU=m CONFIG_RTL8188EU=m @@ -6322,6 +6323,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y +CONFIG_BOOTSPLASH=y # end of Console display driver support CONFIG_LOGO=y diff --git a/config/kernel/linux-meson64-current.config b/config/kernel/linux-meson64-current.config index a363e3741..19b520be0 100644 --- a/config/kernel/linux-meson64-current.config +++ b/config/kernel/linux-meson64-current.config @@ -5723,11 +5723,7 @@ CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y # CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set CONFIG_BOOTSPLASH=y # end of Console display driver support - -CONFIG_LOGO=y -CONFIG_LOGO_LINUX_MONO=y -CONFIG_LOGO_LINUX_VGA16=y -CONFIG_LOGO_LINUX_CLUT224=y +# CONFIG_LOGO is not set # end of Graphics support CONFIG_SOUND=m diff --git a/config/kernel/linux-odroidxu4-current.config b/config/kernel/linux-odroidxu4-current.config index c18db70cc..4c5f6d3a6 100644 --- a/config/kernel/linux-odroidxu4-current.config +++ b/config/kernel/linux-odroidxu4-current.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.4.35 Kernel Configuration +# Linux/arm 5.4.49 Kernel Configuration # # @@ -12,7 +12,6 @@ CONFIG_CLANG_VERSION=0 CONFIG_CC_CAN_LINK=y CONFIG_CC_HAS_ASM_GOTO=y CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y CONFIG_IRQ_WORK=y CONFIG_BUILDTIME_EXTABLE_SORT=y @@ -2357,6 +2356,7 @@ CONFIG_WLAN_VENDOR_TI=y # CONFIG_WL12XX is not set # CONFIG_WL18XX is not set # CONFIG_WLCORE is not set +CONFIG_RTL8723DU=m CONFIG_RTL8723DS=m CONFIG_RTL8822BU=m CONFIG_RTL8188EU=m @@ -3454,9 +3454,6 @@ CONFIG_MEDIA_SUPPORT=y # # Multimedia core support # - -CONFIG_STAGING_MEDIA=y - CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_ANALOG_TV_SUPPORT=y CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y @@ -3466,6 +3463,7 @@ CONFIG_MEDIA_CEC_SUPPORT=y # CONFIG_MEDIA_CEC_RC is not set CONFIG_MEDIA_CONTROLLER=y CONFIG_MEDIA_CONTROLLER_DVB=y +# CONFIG_MEDIA_CONTROLLER_REQUEST_API is not set CONFIG_VIDEO_DEV=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_V4L2=y @@ -3482,40 +3480,6 @@ CONFIG_DVB_CORE=y # CONFIG_DVB_MMAP is not set CONFIG_DVB_NET=y CONFIG_TTPCI_EEPROM=m -CONFIG_FB_TFT=m -CONFIG_FB_TFT_AGM1264K_FL=m -CONFIG_FB_TFT_BD663474=m -CONFIG_FB_TFT_HX8340BN=m -CONFIG_FB_TFT_HX8347D=m -CONFIG_FB_TFT_HX8353D=m -CONFIG_FB_TFT_HX8357D=m -CONFIG_FB_TFT_ILI9163=m -CONFIG_FB_TFT_ILI9320=m -CONFIG_FB_TFT_ILI9325=m -CONFIG_FB_TFT_ILI9340=m -CONFIG_FB_TFT_ILI9341=m -CONFIG_FB_TFT_ILI9481=m -CONFIG_FB_TFT_ILI9486=m -CONFIG_FB_TFT_PCD8544=m -CONFIG_FB_TFT_RA8875=m -CONFIG_FB_TFT_S6D02A1=m -CONFIG_FB_TFT_S6D1121=m -# CONFIG_FB_TFT_SH1106 is not set -CONFIG_FB_TFT_SSD1289=m -CONFIG_FB_TFT_SSD1305=m -CONFIG_FB_TFT_SSD1306=m -CONFIG_FB_TFT_SSD1331=m -CONFIG_FB_TFT_SSD1351=m -CONFIG_FB_TFT_ST7735R=m -CONFIG_FB_TFT_ST7789V=m -CONFIG_FB_TFT_TINYLCD=m -CONFIG_FB_TFT_TLS8204=m -CONFIG_FB_TFT_UC1611=m -CONFIG_FB_TFT_UC1701=m -CONFIG_FB_TFT_UPD161704=m -CONFIG_FB_TFT_WATTEROTT=m -CONFIG_FB_TFT_HKTFT35=m -CONFIG_FB_TFT_HKTFT32=m CONFIG_DVB_MAX_ADAPTERS=8 # CONFIG_DVB_DYNAMIC_MINORS is not set # CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set @@ -4213,6 +4177,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y # CONFIG_FB_FOREIGN_ENDIAN is not set CONFIG_FB_SYS_FOPS=y CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=m # CONFIG_FB_MODE_HELPERS is not set # CONFIG_FB_TILEBLITTING is not set @@ -4273,6 +4238,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set # CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_BOOTSPLASH=y # end of Console display driver support CONFIG_LOGO=y @@ -5191,6 +5157,144 @@ CONFIG_VIRTIO_INPUT=m # CONFIG_GREYBUS is not set CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_COMEDI is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_R8188EU is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7280 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Active energy metering IC +# +# CONFIG_ADE7854 is not set +# end of Active energy metering IC + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# end of Speakup console speech + +CONFIG_STAGING_MEDIA=y + +# +# soc_camera sensor drivers +# + +# +# Android +# +# end of Android + +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +CONFIG_FB_TFT=m +CONFIG_FB_TFT_AGM1264K_FL=m +CONFIG_FB_TFT_BD663474=m +CONFIG_FB_TFT_HX8340BN=m +CONFIG_FB_TFT_HX8347D=m +CONFIG_FB_TFT_HX8353D=m +CONFIG_FB_TFT_HX8357D=m +CONFIG_FB_TFT_ILI9163=m +CONFIG_FB_TFT_ILI9320=m +CONFIG_FB_TFT_ILI9325=m +CONFIG_FB_TFT_ILI9340=m +CONFIG_FB_TFT_ILI9341=m +CONFIG_FB_TFT_ILI9481=m +CONFIG_FB_TFT_ILI9486=m +CONFIG_FB_TFT_PCD8544=m +CONFIG_FB_TFT_RA8875=m +CONFIG_FB_TFT_S6D02A1=m +CONFIG_FB_TFT_S6D1121=m +# CONFIG_FB_TFT_SH1106 is not set +CONFIG_FB_TFT_SSD1289=m +CONFIG_FB_TFT_SSD1305=m +CONFIG_FB_TFT_SSD1306=m +CONFIG_FB_TFT_SSD1331=m +CONFIG_FB_TFT_SSD1351=m +CONFIG_FB_TFT_ST7735R=m +CONFIG_FB_TFT_ST7789V=m +CONFIG_FB_TFT_TINYLCD=m +CONFIG_FB_TFT_TLS8204=m +CONFIG_FB_TFT_UC1611=m +CONFIG_FB_TFT_UC1701=m +CONFIG_FB_TFT_UPD161704=m +CONFIG_FB_TFT_WATTEROTT=m +CONFIG_FB_TFT_HKTFT35=m +CONFIG_FB_TFT_HKTFT32=m +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +# CONFIG_MOST is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set + +# +# Gasket devices +# +# end of Gasket devices + +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_USB_WUSB_CBAF is not set +# CONFIG_UWB is not set +# CONFIG_EXFAT_FS is not set # CONFIG_GOLDFISH is not set CONFIG_MFD_CROS_EC=y CONFIG_CHROME_PLATFORMS=y diff --git a/config/kernel/linux-odroidxu4-dev.config b/config/kernel/linux-odroidxu4-dev.config index 04cb11ffa..7caffc714 100644 --- a/config/kernel/linux-odroidxu4-dev.config +++ b/config/kernel/linux-odroidxu4-dev.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.7.4 Kernel Configuration +# Linux/arm 5.7.6 Kernel Configuration # # @@ -2317,6 +2317,7 @@ CONFIG_WLAN_VENDOR_TI=y # CONFIG_WL12XX is not set # CONFIG_WL18XX is not set # CONFIG_WLCORE is not set +CONFIG_RTL8723DU=m CONFIG_RTL8723DS=m CONFIG_RTL8822BU=m CONFIG_RTL8188EU=m @@ -4227,8 +4228,9 @@ CONFIG_HDMI=y CONFIG_DUMMY_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y +CONFIG_BOOTSPLASH=y # end of Console display driver support CONFIG_LOGO=y diff --git a/config/kernel/linux-odroidxu4-legacy.config b/config/kernel/linux-odroidxu4-legacy.config index 24f597816..84e16ec1e 100644 --- a/config/kernel/linux-odroidxu4-legacy.config +++ b/config/kernel/linux-odroidxu4-legacy.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 4.14.176 Kernel Configuration +# Linux/arm 4.14.186 Kernel Configuration # CONFIG_ARM=y CONFIG_ARM_HAS_SG_CHAIN=y @@ -2140,7 +2140,6 @@ CONFIG_WLAN_VENDOR_TI=y # CONFIG_WL12XX is not set # CONFIG_WL18XX is not set # CONFIG_WLCORE is not set -CONFIG_RTL8723DS=m CONFIG_RTL8822BU=m CONFIG_RTL8188EU=m # CONFIG_RTL8821CU is not set diff --git a/config/kernel/linux-sunxi-current.config b/config/kernel/linux-sunxi-current.config index 779a2e9f6..07e603701 100644 --- a/config/kernel/linux-sunxi-current.config +++ b/config/kernel/linux-sunxi-current.config @@ -2631,6 +2631,7 @@ CONFIG_WLAN_VENDOR_TI=y # CONFIG_WL12XX is not set # CONFIG_WL18XX is not set # CONFIG_WLCORE is not set +CONFIG_RTL8723DU=m CONFIG_RTL8723DS=m CONFIG_RTL8822BU=m CONFIG_RTL8188EU=m @@ -4921,6 +4922,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y # CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_BOOTSPLASH=y # end of Console display driver support CONFIG_LOGO=y diff --git a/config/kernel/linux-sunxi64-current.config b/config/kernel/linux-sunxi64-current.config index 7a25d293d..26a65272b 100644 --- a/config/kernel/linux-sunxi64-current.config +++ b/config/kernel/linux-sunxi64-current.config @@ -2648,6 +2648,7 @@ CONFIG_RTW88=m # CONFIG_WLAN_VENDOR_RSI is not set # CONFIG_WLAN_VENDOR_ST is not set # CONFIG_WLAN_VENDOR_TI is not set +CONFIG_RTL8723DU=m CONFIG_RTL8723DS=m CONFIG_RTL8822BU=m CONFIG_RTL8188EU=m @@ -4896,6 +4897,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y # CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_BOOTSPLASH=y # end of Console display driver support CONFIG_LOGO=y diff --git a/config/sources/families/imx6.conf b/config/sources/families/imx6.conf index 330e96f46..129ab04fa 100644 --- a/config/sources/families/imx6.conf +++ b/config/sources/families/imx6.conf @@ -14,7 +14,7 @@ case $BOARD in BOOTBRANCH='tag:v2017.11' SERIALCON=ttymxc1 BOOTSCRIPT="boot-udoo.cmd:boot.cmd" - BOOTENV_FILE='udoo-default.txt' + BOOTENV_FILE='udoo.txt' UBOOT_USE_GCC='< 7.0' ;; @@ -24,7 +24,7 @@ case $BOARD in BOOTSOURCE='https://github.com/SolidRun/u-boot.git' BOOTBRANCH='branch:v2018.01-solidrun-imx6' BOOTSCRIPT='boot-cubox.cmd:boot.cmd' - BOOTENV_FILE='cubox-default.txt' + BOOTENV_FILE='cubox.txt' UBOOT_TARGET_MAP=';emmc;SPL:SPL.emmc u-boot.img:u-boot.img.emmc ;sdhc;SPL:SPL.sdhc u-boot.img:u-boot.img.sdhc ;sdhc;SPL:SPL.sata u-boot.img:u-boot.img.sata @@ -36,7 +36,7 @@ case $BOARD in BOOTBRANCH="branch:v2017.03" BOOTSCRIPT='boot-udoo.cmd:boot.cmd' - BOOTENV_FILE='cubox-default.txt' + BOOTENV_FILE='cubox.txt' ;; diff --git a/config/sources/families/imx7d.conf b/config/sources/families/imx7d.conf index ee639ee2b..1e390f674 100644 --- a/config/sources/families/imx7d.conf +++ b/config/sources/families/imx7d.conf @@ -3,7 +3,7 @@ BOOTDIR='u-boot-sabre' BOOTBRANCH='branch:imx_v2018.03_4.14.98_2.0.0_ga' BOOTCONFIG="mx7dsabresd_defconfig" #BOOTSCRIPT='boot-imx7d.cmd:boot.cmd' -BOOTENV_FILE='imx7d-default.txt' +BOOTENV_FILE='imx7d.txt' ARCH=armhf UBOOT_TARGET_MAP=";;u-boot-dtb.imx" diff --git a/config/sources/families/include/meson64_common.inc b/config/sources/families/include/meson64_common.inc index 0c571b4f2..942089202 100644 --- a/config/sources/families/include/meson64_common.inc +++ b/config/sources/families/include/meson64_common.inc @@ -1,6 +1,6 @@ UBOOT_TARGET_MAP="u-boot-dtb.img;;u-boot.bin.sd.bin:u-boot.bin u-boot-dtb.img" BOOTSCRIPT="boot-meson64.cmd:boot.cmd" -BOOTENV_FILE='meson64-next.txt' +BOOTENV_FILE='meson.txt' LINUXFAMILY=meson64 ARCH=arm64 SERIALCON=ttyAML0 diff --git a/config/sources/families/include/mvebu-clearfog.inc b/config/sources/families/include/mvebu-clearfog.inc index b9fd5def7..8250e5c82 100644 --- a/config/sources/families/include/mvebu-clearfog.inc +++ b/config/sources/families/include/mvebu-clearfog.inc @@ -8,7 +8,7 @@ case $BRANCH in BOOTBRANCH='branch:v2018.01-solidrun-a38x' BOOTDIR='u-boot-armada' BOOTPATCHDIR='u-boot-clearfog' - BOOTSCRIPT='boot-mvebu-next.cmd:boot.cmd' + BOOTSCRIPT='boot-mvebu.cmd:boot.cmd' UBOOT_TARGET_MAP=";sdhc;u-boot-spl-sdhc.kwb:u-boot.mmc ;mmc;u-boot-spl-mmc.kwb:u-boot.emmc diff --git a/config/sources/families/include/mvebu-helios4.inc b/config/sources/families/include/mvebu-helios4.inc index 917f9a619..3d99b4986 100644 --- a/config/sources/families/include/mvebu-helios4.inc +++ b/config/sources/families/include/mvebu-helios4.inc @@ -6,7 +6,7 @@ case $BRANCH in BOOTBRANCH='tag:v2019.04' BOOTDIR=$MAINLINE_UBOOT_DIR BOOTPATCHDIR='u-boot-helios4' - BOOTSCRIPT='boot-mvebu-next.cmd:boot.cmd' + BOOTSCRIPT='boot-mvebu.cmd:boot.cmd' UBOOT_TARGET_MAP=";sdhc;u-boot-spl.kwb:u-boot.mmc ;spi;u-boot-spl.kwb:u-boot.flash diff --git a/config/sources/families/include/rockchip64_common.inc b/config/sources/families/include/rockchip64_common.inc index c61e2b929..64b29f17c 100644 --- a/config/sources/families/include/rockchip64_common.inc +++ b/config/sources/families/include/rockchip64_common.inc @@ -2,7 +2,7 @@ ARCH=arm64 KERNEL_IMAGE_TYPE=Image OFFSET=16 BOOTSCRIPT='boot-rockchip64.cmd:boot.cmd' -BOOTENV_FILE='rockchip-default.txt' +BOOTENV_FILE='rockchip.txt' UBOOT_TARGET_MAP=";;idbloader.bin uboot.img trust.bin" BOOTDELAY=0 OVERLAY_PREFIX='rockchip' diff --git a/config/sources/families/include/sunxi64_common.inc b/config/sources/families/include/sunxi64_common.inc index a8b4554f1..7f736013a 100644 --- a/config/sources/families/include/sunxi64_common.inc +++ b/config/sources/families/include/sunxi64_common.inc @@ -2,6 +2,7 @@ ARCH=arm64 ATF_TARGET_MAP="PLAT=$ATF_PLAT DEBUG=1 bl31;;build/$ATF_PLAT/debug/bl31.bin" BOOTDELAY=1 BOOTPATCHDIR='u-boot-sunxi' +BOOTENV_FILE='sunxi.txt' UBOOT_TARGET_MAP=';;spl/sunxi-spl.bin u-boot.itb' BOOTSCRIPT='boot-sun50i-next.cmd:boot.cmd' LINUXFAMILY=sunxi64 diff --git a/config/sources/families/include/sunxi_common.inc b/config/sources/families/include/sunxi_common.inc index bec0165ab..f64d37106 100644 --- a/config/sources/families/include/sunxi_common.inc +++ b/config/sources/families/include/sunxi_common.inc @@ -3,7 +3,7 @@ BOOTDELAY=1 BOOTPATCHDIR='u-boot-sunxi' UBOOT_TARGET_MAP=';;u-boot-sunxi-with-spl.bin' BOOTSCRIPT="boot-sunxi.cmd:boot.cmd" -BOOTENV_FILE='sunxi-default.txt' +BOOTENV_FILE='sunxi.txt' LINUXFAMILY=sunxi UBOOT_FW_ENV='0x88000,0x20000' # /etc/fw_env.config offset and env size ASOUND_STATE='asound.state.sunxi-next' diff --git a/config/sources/families/meson-gxbb.conf b/config/sources/families/meson-gxbb.conf index ebe08c2a3..b4ac6dedb 100644 --- a/config/sources/families/meson-gxbb.conf +++ b/config/sources/families/meson-gxbb.conf @@ -1,7 +1,6 @@ source "${BASH_SOURCE%/*}/include/meson64_common.inc" if [[ $BOARD == odroidc2 ]]; then UBOOT_TARGET_MAP=";;$SRC/cache/sources/odroidc2-blobs/bl1.bin.hardkernel u-boot.bin" - BOOTENV_FILE='odroidc2-next.txt' fi uboot_custom_postprocess() diff --git a/config/sources/families/mt7623.conf b/config/sources/families/mt7623.conf index b207ff9d8..d157b5564 100644 --- a/config/sources/families/mt7623.conf +++ b/config/sources/families/mt7623.conf @@ -1,5 +1,5 @@ BOOTSCRIPT='boot-mt7623.cmd:boot.cmd' -BOOTENV_FILE='mt7623-default.txt' +BOOTENV_FILE='mt7623.txt' UBOOT_TARGET_MAP=";;$SRC/packages/blobs/mt7623n/BPI-R2-HEAD440-0k.img $SRC/packages/blobs/mt7623n/BPI-R2-HEAD1-512b.img $SRC/packages/blobs/mt7623n/BPI-R2-preloader-2k.img $SRC/packages/blobs/mt7623n/BPI-R2-EMMC-boot0-0K-0905.img u-boot.bin" BOOTPATCHDIR='u-boot-mt7623' ARCH=arm64 diff --git a/config/sources/families/mvebu.conf b/config/sources/families/mvebu.conf index 31330cb03..510bbea88 100644 --- a/config/sources/families/mvebu.conf +++ b/config/sources/families/mvebu.conf @@ -1,10 +1,10 @@ ARCH=armhf if [[ $BOARD == helios4 ]]; then source "${BASH_SOURCE%/*}/include/mvebu-helios4.inc" - BOOTENV_FILE='helios4-default.txt' + BOOTENV_FILE='helios4.txt' else source "${BASH_SOURCE%/*}/include/mvebu-clearfog.inc" - BOOTENV_FILE='clearfog-default.txt' + BOOTENV_FILE='clearfog.txt' fi case $BRANCH in @@ -23,7 +23,7 @@ case $BRANCH in dev) BOOTBRANCH='tag:v2019.04' BOOTPATCHDIR='u-boot-mvebu-dev' - BOOTSCRIPT='boot-mvebu-next.cmd:boot.cmd' + BOOTSCRIPT='boot-mvebu.cmd:boot.cmd' UBOOT_TARGET_MAP=";sdhc;u-boot-spl.kwb:u-boot.mmc ;spi;u-boot-spl.kwb:u-boot.flash diff --git a/config/sources/families/odroidxu4.conf b/config/sources/families/odroidxu4.conf index e39d3ef9b..f7110d642 100644 --- a/config/sources/families/odroidxu4.conf +++ b/config/sources/families/odroidxu4.conf @@ -1,8 +1,7 @@ ARCH=armhf BOOTSOURCE='https://github.com/hardkernel/u-boot.git' -BOOTSCRIPT='boot-odroid-xu4-default.ini:boot.ini' +BOOTSCRIPT='boot-odroid-xu4.ini:boot.ini' BOOTDIR='u-boot-odroidxu' -BOOTENV_FILE='odroidxu4-default.txt' BOOTBRANCH='branch:odroidxu4-v2017.05' UBOOT_TARGET_MAP=';;sd_fuse/bl1.bin.hardkernel sd_fuse/bl2.bin.hardkernel.720k_uboot u-boot-dtb.bin sd_fuse/tzsw.bin.hardkernel' UBOOT_USE_GCC='> 6.3' diff --git a/config/sources/families/rk322x.conf b/config/sources/families/rk322x.conf index 72e995468..c621aedc6 100644 --- a/config/sources/families/rk322x.conf +++ b/config/sources/families/rk322x.conf @@ -1,5 +1,5 @@ BOOTSCRIPT="boot-rk322x.cmd:boot.cmd" -BOOTENV_FILE='rk322x-default.txt' +BOOTENV_FILE='rk322x.txt' OVERLAY_PREFIX='rk322x' UBOOT_TARGET_MAP="all u-boot.itb;;u-boot-rk322x-with-spl.bin" UBOOT_USE_GCC='< 9.0' diff --git a/config/sources/families/rockchip.conf b/config/sources/families/rockchip.conf index 682c01b71..6c84f5098 100644 --- a/config/sources/families/rockchip.conf +++ b/config/sources/families/rockchip.conf @@ -1,6 +1,6 @@ ARCH=armhf BOOTSCRIPT="boot-rockchip.cmd:boot.cmd" -BOOTENV_FILE='rockchip-default.txt' +BOOTENV_FILE='rockchip.txt' OVERLAY_PREFIX='rockchip' UBOOT_TARGET_MAP=";;$SRC/packages/blobs/rockchip/rk3288_boot.bin u-boot-rockchip-with-spl.bin" UBOOT_USE_GCC='< 8.0' diff --git a/config/sources/families/s5p6818.conf b/config/sources/families/s5p6818.conf index f55617445..a389883e1 100644 --- a/config/sources/families/s5p6818.conf +++ b/config/sources/families/s5p6818.conf @@ -3,7 +3,7 @@ BOOTSOURCE='https://github.com/rafaello7/u-boot-nanopi-m3' BOOTBRANCH='branch:master' BOOTDIR='u-boot-s5p6818' BOOTSCRIPT='boot-s5p6818.cmd:boot.cmd' -BOOTENV_FILE='s5p6818-default.txt' +BOOTENV_FILE='s5p6818.txt' UBOOT_TARGET_MAP=";;boot.img bootemmc.img" ATF_COMPILE="no" case $BRANCH in diff --git a/config/sources/families/sun50iw1.conf b/config/sources/families/sun50iw1.conf index 396aa9bd8..701eea1c8 100644 --- a/config/sources/families/sun50iw1.conf +++ b/config/sources/families/sun50iw1.conf @@ -1,7 +1,6 @@ ATF_PLAT="sun50i_a64" source "${BASH_SOURCE%/*}/include/sunxi64_common.inc" OVERLAY_PREFIX='sun50i-a64' -BOOTENV_FILE='sun50iw1-next.txt' [[ -z $CPUMIN ]] && CPUMIN=480000 [[ -z $CPUMAX ]] && CPUMAX=1010000 GOVERNOR=performance diff --git a/config/sources/families/sun50iw2.conf b/config/sources/families/sun50iw2.conf index 9bf4ae2be..515f4bf66 100644 --- a/config/sources/families/sun50iw2.conf +++ b/config/sources/families/sun50iw2.conf @@ -1,12 +1,7 @@ ATF_PLAT="sun50i_a64" - source "${BASH_SOURCE%/*}/include/sunxi64_common.inc" - OVERLAY_PREFIX='sun50i-h5' -BOOTENV_FILE='sun50iw2-next.txt' - [[ -z $CPUMIN ]] && CPUMIN=480000 [[ -z $CPUMAX ]] && CPUMAX=1368000 GOVERNOR=ondemand - ASOUND_STATE='asound.state.sun50iw2-dev' diff --git a/config/sources/families/sun50iw6.conf b/config/sources/families/sun50iw6.conf index 8018a35af..e271065d9 100644 --- a/config/sources/families/sun50iw6.conf +++ b/config/sources/families/sun50iw6.conf @@ -1,12 +1,7 @@ ATF_PLAT="sun50i_h6"; - source "${BASH_SOURCE%/*}/include/sunxi64_common.inc" - OVERLAY_PREFIX='sun50i-h6' -BOOTENV_FILE='sun50iw2-next.txt' - [[ -z $CPUMIN ]] && CPUMIN=480000 [[ -z $CPUMAX ]] && CPUMAX=1810000 GOVERNOR=ondemand - ASOUND_STATE='asound.state.sun50iw2-dev' diff --git a/config/sources/families/sun7i.conf b/config/sources/families/sun7i.conf index 9ae6142e1..1986d076b 100644 --- a/config/sources/families/sun7i.conf +++ b/config/sources/families/sun7i.conf @@ -1,6 +1,5 @@ source "${BASH_SOURCE%/*}/include/sunxi_common.inc" OVERLAY_PREFIX='sun7i-a20' - [[ -z $CPUMIN ]] && CPUMIN=480000 [[ -z $CPUMAX ]] && CPUMAX=1010000 diff --git a/config/templates/Dockerfile b/config/templates/Dockerfile index 246a7811c..7cc3db544 100644 --- a/config/templates/Dockerfile +++ b/config/templates/Dockerfile @@ -12,7 +12,7 @@ RUN apt update && DEBIAN_FRONTEND=noninteractive apt -y upgrade && \ pv bc lzop zip binfmt-support build-essential ccache debootstrap ntpdate gawk gcc-arm-linux-gnueabihf \ qemu-user-static u-boot-tools uuid-dev zlib1g-dev unzip libusb-1.0-0-dev parted pkg-config libncurses5-dev whiptail \ debian-keyring debian-archive-keyring f2fs-tools libfile-fcntllock-perl rsync libssl-dev nfs-kernel-server btrfs-progs \ - ncurses-term p7zip-full kmod dosfstools libc6-dev-armhf-cross fakeroot xxd \ + ncurses-term p7zip-full kmod dosfstools libc6-dev-armhf-cross fakeroot xxd imagemagick \ curl patchutils python2 python3 liblz4-tool libpython2.7-dev linux-base swig libpython3-dev python3-dev \ systemd-container udev g++-8-arm-linux-gnueabihf lib32stdc++6 cpio tzdata psmisc acl \ libc6-i386 lib32ncurses6 lib32tinfo6 locales ncurses-base zlib1g:i386 pixz bison libbison-dev flex libfl-dev \ diff --git a/lib/build-all-ng.sh b/lib/build-all-ng.sh index 443133bbd..2e0c69d1f 100644 --- a/lib/build-all-ng.sh +++ b/lib/build-all-ng.sh @@ -59,7 +59,7 @@ unset LINUXFAMILY LINUXCONFIG KERNELDIR KERNELSOURCE KERNELBRANCH BOOTDIR BOOTSO DEB_STORAGE REPO_STORAGE REPO_CONFIG REPOSITORY_UPDATE PACKAGE_LIST_RELEASE LOCAL_MIRROR COMPILE_ATF \ PACKAGE_LIST_DESKTOP_BOARD PACKAGE_LIST_DESKTOP_FAMILY ATF_COMPILE ATFPATCHDIR OFFSET BOOTSOURCEDIR BOOT_USE_BLOBS \ BOOT_SOC DDR_BLOB MINILOADER_BLOB BL31_BLOB BOOT_RK3328_USE_AYUFAN_ATF BOOT_USE_BLOBS BOOT_RK3399_LEGACY_HYBRID \ - BOOT_USE_MAINLINE_ATF BOOT_USE_TPL_SPL_BLOB OFFLINE_WORK IMAGE_PARTITION_TABLE + BOOT_USE_MAINLINE_ATF BOOT_USE_TPL_SPL_BLOB OFFLINE_WORK IMAGE_PARTITION_TABLE BOOT_LOGO } pack_upload () diff --git a/lib/compilation-prepare.sh b/lib/compilation-prepare.sh index 7802d4236..c5e4b66af 100644 --- a/lib/compilation-prepare.sh +++ b/lib/compilation-prepare.sh @@ -71,6 +71,28 @@ compilation_prepare() process_patch_file "${SRC}/patch/misc/general-packaging-4.9.y.patch" "applying" fi + # + # Linux splah file + # + + if linux-version compare "${version}" ge 5.4; then + + display_alert "Adding" "Kernel splash file" "info" + process_patch_file "${SRC}/patch/misc/0001-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0002-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0003-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0004-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0005-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0006-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0007-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0008-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0009-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0010-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0011-bootsplash.patch" "applying" + process_patch_file "${SRC}/patch/misc/0012-bootsplash.patch" "applying" + + fi + # # mac80211 wireless driver injection features from Kali Linux # diff --git a/lib/distributions.sh b/lib/distributions.sh index 0dce6453f..617cb88d3 100644 --- a/lib/distributions.sh +++ b/lib/distributions.sh @@ -298,7 +298,6 @@ install_common() # copy boot splash images cp "${SRC}"/packages/blobs/splash/armbian-u-boot.bmp "${SDCARD}"/boot/boot.bmp - cp "${SRC}"/packages/blobs/splash/armbian-desktop.png "${SDCARD}"/boot/boot-desktop.png # execute $LINUXFAMILY-specific tweaks [[ $(type -t family_tweaks) == function ]] && family_tweaks @@ -427,6 +426,8 @@ install_common() # nsswitch settings for sane DNS behavior: remove resolve, assure libnss-myhostname support sed "s/hosts\:.*/hosts: files mymachines dns myhostname/g" -i "${SDCARD}"/etc/nsswitch.conf + [[ $BOOT_LOGO == yes || $BOOT_LOGO == desktop && $BUILD_DESKTOP == yes ]] && boot_logo + } diff --git a/lib/general.sh b/lib/general.sh index 5e421b20d..6df464412 100644 --- a/lib/general.sh +++ b/lib/general.sh @@ -481,6 +481,125 @@ fingerprint_image() } +#-------------------------------------------------------------------------------------------------------------------------------- +# Create kernel boot logo from packages/blobs/splash/logo.png and packages/blobs/splash/spinner.gif (animated) +# and place to the file /lib/firmware/bootsplash +#-------------------------------------------------------------------------------------------------------------------------------- +function boot_logo () +{ +display_alert "Building kernel splash logo" "$RELEASE" "info" + + LOGO=${SRC}/packages/blobs/splash/logo.png + LOGO_WIDTH=$(identify $LOGO | cut -d " " -f 3 | cut -d x -f 1) + LOGO_HEIGHT=$(identify $LOGO | cut -d " " -f 3 | cut -d x -f 2) + THROBBER=${SRC}/packages/blobs/splash/spinner.gif + THROBBER_WIDTH=$(identify $THROBBER | head -1 | cut -d " " -f 3 | cut -d x -f 1) + THROBBER_HEIGHT=$(identify $THROBBER | head -1 | cut -d " " -f 3 | cut -d x -f 2) + convert -alpha remove -background "#000000" $LOGO "${SDCARD}"/tmp/logo.rgb + convert -alpha remove -background "#000000" $THROBBER "${SDCARD}"/tmp/throbber%02d.rgb + ${SRC}/packages/blobs/splash/bootsplash-packer \ + --bg_red 0x00 \ + --bg_green 0x00 \ + --bg_blue 0x00 \ + --frame_ms 48 \ + --picture \ + --pic_width $LOGO_WIDTH \ + --pic_height $LOGO_HEIGHT \ + --pic_position 0 \ + --blob "${SDCARD}"/tmp/logo.rgb \ + --picture \ + --pic_width $THROBBER_WIDTH \ + --pic_height $THROBBER_HEIGHT \ + --pic_position 0x05 \ + --pic_position_offset 200 \ + --pic_anim_type 1 \ + --pic_anim_loop 0 \ + --blob "${SDCARD}"/tmp/throbber00.rgb \ + --blob "${SDCARD}"/tmp/throbber01.rgb \ + --blob "${SDCARD}"/tmp/throbber02.rgb \ + --blob "${SDCARD}"/tmp/throbber03.rgb \ + --blob "${SDCARD}"/tmp/throbber04.rgb \ + --blob "${SDCARD}"/tmp/throbber05.rgb \ + --blob "${SDCARD}"/tmp/throbber06.rgb \ + --blob "${SDCARD}"/tmp/throbber07.rgb \ + --blob "${SDCARD}"/tmp/throbber08.rgb \ + --blob "${SDCARD}"/tmp/throbber09.rgb \ + --blob "${SDCARD}"/tmp/throbber10.rgb \ + --blob "${SDCARD}"/tmp/throbber11.rgb \ + --blob "${SDCARD}"/tmp/throbber12.rgb \ + --blob "${SDCARD}"/tmp/throbber13.rgb \ + --blob "${SDCARD}"/tmp/throbber14.rgb \ + --blob "${SDCARD}"/tmp/throbber15.rgb \ + --blob "${SDCARD}"/tmp/throbber16.rgb \ + --blob "${SDCARD}"/tmp/throbber17.rgb \ + --blob "${SDCARD}"/tmp/throbber18.rgb \ + --blob "${SDCARD}"/tmp/throbber19.rgb \ + --blob "${SDCARD}"/tmp/throbber20.rgb \ + --blob "${SDCARD}"/tmp/throbber21.rgb \ + --blob "${SDCARD}"/tmp/throbber22.rgb \ + --blob "${SDCARD}"/tmp/throbber23.rgb \ + --blob "${SDCARD}"/tmp/throbber24.rgb \ + --blob "${SDCARD}"/tmp/throbber25.rgb \ + --blob "${SDCARD}"/tmp/throbber26.rgb \ + --blob "${SDCARD}"/tmp/throbber27.rgb \ + --blob "${SDCARD}"/tmp/throbber28.rgb \ + --blob "${SDCARD}"/tmp/throbber29.rgb \ + --blob "${SDCARD}"/tmp/throbber30.rgb \ + --blob "${SDCARD}"/tmp/throbber31.rgb \ + --blob "${SDCARD}"/tmp/throbber32.rgb \ + --blob "${SDCARD}"/tmp/throbber33.rgb \ + --blob "${SDCARD}"/tmp/throbber34.rgb \ + --blob "${SDCARD}"/tmp/throbber35.rgb \ + --blob "${SDCARD}"/tmp/throbber36.rgb \ + --blob "${SDCARD}"/tmp/throbber37.rgb \ + --blob "${SDCARD}"/tmp/throbber38.rgb \ + --blob "${SDCARD}"/tmp/throbber39.rgb \ + --blob "${SDCARD}"/tmp/throbber40.rgb \ + --blob "${SDCARD}"/tmp/throbber41.rgb \ + --blob "${SDCARD}"/tmp/throbber42.rgb \ + --blob "${SDCARD}"/tmp/throbber43.rgb \ + --blob "${SDCARD}"/tmp/throbber44.rgb \ + --blob "${SDCARD}"/tmp/throbber45.rgb \ + --blob "${SDCARD}"/tmp/throbber46.rgb \ + --blob "${SDCARD}"/tmp/throbber47.rgb \ + --blob "${SDCARD}"/tmp/throbber48.rgb \ + --blob "${SDCARD}"/tmp/throbber49.rgb \ + --blob "${SDCARD}"/tmp/throbber50.rgb \ + --blob "${SDCARD}"/tmp/throbber51.rgb \ + --blob "${SDCARD}"/tmp/throbber52.rgb \ + --blob "${SDCARD}"/tmp/throbber53.rgb \ + --blob "${SDCARD}"/tmp/throbber54.rgb \ + --blob "${SDCARD}"/tmp/throbber55.rgb \ + --blob "${SDCARD}"/tmp/throbber56.rgb \ + --blob "${SDCARD}"/tmp/throbber57.rgb \ + --blob "${SDCARD}"/tmp/throbber58.rgb \ + --blob "${SDCARD}"/tmp/throbber59.rgb \ + --blob "${SDCARD}"/tmp/throbber60.rgb \ + --blob "${SDCARD}"/tmp/throbber61.rgb \ + --blob "${SDCARD}"/tmp/throbber62.rgb \ + --blob "${SDCARD}"/tmp/throbber63.rgb \ + --blob "${SDCARD}"/tmp/throbber64.rgb \ + --blob "${SDCARD}"/tmp/throbber65.rgb \ + --blob "${SDCARD}"/tmp/throbber66.rgb \ + --blob "${SDCARD}"/tmp/throbber67.rgb \ + --blob "${SDCARD}"/tmp/throbber68.rgb \ + --blob "${SDCARD}"/tmp/throbber69.rgb \ + --blob "${SDCARD}"/tmp/throbber70.rgb \ + --blob "${SDCARD}"/tmp/throbber71.rgb \ + --blob "${SDCARD}"/tmp/throbber72.rgb \ + --blob "${SDCARD}"/tmp/throbber73.rgb \ + --blob "${SDCARD}"/tmp/throbber74.rgb \ + "${SDCARD}"/lib/firmware/bootsplash.armbian >/dev/null 2>&1 + [[ -f "${SDCARD}"/boot/armbianEnv.txt ]] && grep -q '^bootlogo' "${SDCARD}"/boot/armbianEnv.txt && \ + sed -i 's/^bootlogo.*/bootlogo=true/' "${SDCARD}"/boot/armbianEnv.txt || echo 'bootlogo=true' >> "${SDCARD}"/boot/armbianEnv.txt + [[ -f "${SDCARD}"/boot/boot.ini ]] && sed -i 's/^setenv bootlogo.*/setenv bootlogo "true"/' "${SDCARD}"/boot/boot.ini + + # enable additional services + chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-ask-password-console.path >/dev/null 2>&1" + chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-hide-when-booted.service >/dev/null 2>&1" + chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-show-on-shutdown.service >/dev/null 2>&1" +} + function distro_menu () @@ -856,7 +975,7 @@ prepare_host() local hostdeps="wget ca-certificates device-tree-compiler pv bc lzop zip binfmt-support build-essential ccache debootstrap ntpdate \ gawk gcc-arm-linux-gnueabihf qemu-user-static u-boot-tools uuid-dev zlib1g-dev unzip libusb-1.0-0-dev fakeroot \ parted pkg-config libncurses5-dev whiptail debian-keyring debian-archive-keyring f2fs-tools libfile-fcntllock-perl rsync libssl-dev \ - nfs-kernel-server btrfs-progs ncurses-term p7zip-full kmod dosfstools libc6-dev-armhf-cross \ + nfs-kernel-server btrfs-progs ncurses-term p7zip-full kmod dosfstools libc6-dev-armhf-cross imagemagick \ curl patchutils liblz4-tool libpython2.7-dev linux-base swig aptly acl python3-dev python3-distutils \ locales ncurses-base pixz dialog systemd-container udev lib32stdc++6 libc6-i386 lib32ncurses5 lib32tinfo5 \ bison libbison-dev flex libfl-dev cryptsetup gpg gnupg1 cpio aria2 pigz dirmngr python3-distutils" @@ -1089,7 +1208,7 @@ download_and_verify() local remotedir=$1 local filename=$2 - local localdir="${SRC}"/cache/${remotedir//_} + local localdir=$SRC/cache/${remotedir//_} local dirname=${filename//.tar.xz} if [[ $DOWNLOAD_MIRROR == china ]]; then diff --git a/packages/blobs/splash/armbian-desktop.png b/packages/blobs/splash/armbian-desktop.png deleted file mode 100644 index 7ebabd355..000000000 Binary files a/packages/blobs/splash/armbian-desktop.png and /dev/null differ diff --git a/packages/blobs/splash/bootsplash-packer b/packages/blobs/splash/bootsplash-packer new file mode 100755 index 000000000..32dc58496 Binary files /dev/null and b/packages/blobs/splash/bootsplash-packer differ diff --git a/packages/blobs/splash/logo.png b/packages/blobs/splash/logo.png new file mode 100644 index 000000000..23051d78c Binary files /dev/null and b/packages/blobs/splash/logo.png differ diff --git a/packages/blobs/splash/spinner.gif b/packages/blobs/splash/spinner.gif new file mode 100644 index 000000000..0084e5024 Binary files /dev/null and b/packages/blobs/splash/spinner.gif differ diff --git a/packages/bsp/common/etc/initramfs-tools/hooks/bootsplash.sh b/packages/bsp/common/etc/initramfs-tools/hooks/bootsplash.sh new file mode 100755 index 000000000..4471e0bd7 --- /dev/null +++ b/packages/bsp/common/etc/initramfs-tools/hooks/bootsplash.sh @@ -0,0 +1,11 @@ +#!/bin/sh +# Copy splash file to initrd +# +mkdir -p "${DESTDIR}"/lib/firmware +splashfile=/lib/firmware/bootsplash.armbian + +if [ -f "${splashfile}" ]; then + cp "${splashfile}" "${DESTDIR}"/lib/firmware +fi + +exit 0 diff --git a/packages/bsp/common/lib/systemd/system/bootsplash-ask-password-console.path b/packages/bsp/common/lib/systemd/system/bootsplash-ask-password-console.path new file mode 100644 index 000000000..b49226de1 --- /dev/null +++ b/packages/bsp/common/lib/systemd/system/bootsplash-ask-password-console.path @@ -0,0 +1,17 @@ +[Unit] +Description=Dispatch Password Requests to Console Directory Watch when bootsplash is active +DefaultDependencies=no +Conflicts=shutdown.target +Conflicts=systemd-ask-password-console.path +Conflicts=systemd-ask-password-console.service +After=plymouth-start.service +Before=paths.target shutdown.target cryptsetup.target +ConditionPathExists=!/run/plymouth/pid +ConditionPathExists=/sys/devices/platform/bootsplash.0/enabled + +[Path] +DirectoryNotEmpty=/run/systemd/ask-password +MakeDirectory=yes + +[Install] +WantedBy=sysinit.target diff --git a/packages/bsp/common/lib/systemd/system/bootsplash-ask-password-console.service b/packages/bsp/common/lib/systemd/system/bootsplash-ask-password-console.service new file mode 100644 index 000000000..4f625c04b --- /dev/null +++ b/packages/bsp/common/lib/systemd/system/bootsplash-ask-password-console.service @@ -0,0 +1,16 @@ +[Unit] +Description=Dispatch Password Requests to Console when bootsplash is active +DefaultDependencies=no +Conflicts=shutdown.target +Conflicts=systemd-ask-password-console.path +Conflicts=systemd-ask-password-console.service +After=plymouth-start.service systemd-vconsole-setup.service +Before=shutdown.target +ConditionPathExists=!/run/plymouth/pid +ConditionPathExists=/sys/devices/platform/bootsplash.0/enabled + +[Service] +ExecStartPre=-/bin/sh -c '/bin/cat /sys/devices/platform/bootsplash.0/enabled > /run/systemd/ask-password/bootsplash-state ; exec /bin/echo off > /sys/devices/platform/bootsplash.0/enabled' +ExecStart=/usr/bin/systemd-tty-ask-password-agent --query --console +ExecStartPost=-/bin/sh -c 'exec /bin/cat /run/systemd/ask-password/bootsplash-state > /sys/devices/platform/bootsplash.0/enabled ; /bin/rm /run/systemd/ask-password/bootsplash-state' +Type=oneshot diff --git a/packages/bsp/common/lib/systemd/system/bootsplash-hide-when-booted.service b/packages/bsp/common/lib/systemd/system/bootsplash-hide-when-booted.service new file mode 100644 index 000000000..1f72dd4c3 --- /dev/null +++ b/packages/bsp/common/lib/systemd/system/bootsplash-hide-when-booted.service @@ -0,0 +1,14 @@ +[Unit] +Description=Hide bootsplash after starting the system +After=graphical.target +DefaultDependencies=no +ConditionPathExists=/sys/devices/platform/bootsplash.0/enabled + +[Service] +ExecStartPre=/bin/sh -c 'exec /bin/sleep 15' +ExecStart=/bin/sh -c 'exec /bin/echo off > /sys/devices/platform/bootsplash.0/enabled' +Restart=no +Type=oneshot + +[Install] +WantedBy=graphical.target diff --git a/packages/bsp/common/lib/systemd/system/bootsplash-show-on-shutdown.service b/packages/bsp/common/lib/systemd/system/bootsplash-show-on-shutdown.service new file mode 100644 index 000000000..56a00a40b --- /dev/null +++ b/packages/bsp/common/lib/systemd/system/bootsplash-show-on-shutdown.service @@ -0,0 +1,19 @@ +[Unit] +Description=Show bootsplash on system halt, power off, reboot, or kexec +Before=systemd-halt.service +Before=systemd-kexec.service +Before=systemd-poweroff.service +Before=systemd-reboot.service +DefaultDependencies=no +ConditionPathExists=/sys/devices/platform/bootsplash.0/enabled + +[Service] +ExecStart=/bin/sh -c 'exec /bin/echo on > /sys/devices/platform/bootsplash.0/enabled' +Restart=no +Type=oneshot + +[Install] +WantedBy=halt.target +WantedBy=kexec.target +WantedBy=poweroff.target +WantedBy=reboot.target diff --git a/patch/kernel/meson64-current/0087-WIP-arm64-dts-meson-khadas-vim3-change-LED-behaviour.patch b/patch/kernel/meson64-current/0087-WIP-arm64-dts-meson-khadas-vim3-change-LED-behaviour.patch index 7ca6773b7..2852d4346 100644 --- a/patch/kernel/meson64-current/0087-WIP-arm64-dts-meson-khadas-vim3-change-LED-behaviour.patch +++ b/patch/kernel/meson64-current/0087-WIP-arm64-dts-meson-khadas-vim3-change-LED-behaviour.patch @@ -24,7 +24,7 @@ index 6022805d2032..6d0163f56b0d 100644 @@ -40,14 +40,14 @@ compatible = "gpio-leds"; - white { + led-white { - label = "vim3:white:sys"; - gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>; - linux,default-trigger = "heartbeat"; @@ -33,7 +33,7 @@ index 6022805d2032..6d0163f56b0d 100644 + linux,default-trigger = "default-on"; }; - red { + led-red { label = "vim3:red"; - gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>; + gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>; diff --git a/patch/kernel/odroidc4-legacy/patch-4.9.224-225.patch b/patch/kernel/odroidc4-legacy/patch-4.9.224-225.patch new file mode 100644 index 000000000..6134f53cf --- /dev/null +++ b/patch/kernel/odroidc4-legacy/patch-4.9.224-225.patch @@ -0,0 +1,3057 @@ +diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt +index 4650a00ed012..9bc271cdc9a8 100644 +--- a/Documentation/networking/l2tp.txt ++++ b/Documentation/networking/l2tp.txt +@@ -177,10 +177,10 @@ setsockopt on the PPPoX socket to set a debug mask. + + The following debug mask bits are available: + +-PPPOL2TP_MSG_DEBUG verbose debug (if compiled in) +-PPPOL2TP_MSG_CONTROL userspace - kernel interface +-PPPOL2TP_MSG_SEQ sequence numbers handling +-PPPOL2TP_MSG_DATA data packets ++L2TP_MSG_DEBUG verbose debug (if compiled in) ++L2TP_MSG_CONTROL userspace - kernel interface ++L2TP_MSG_SEQ sequence numbers handling ++L2TP_MSG_DATA data packets + + If enabled, files under a l2tp debugfs directory can be used to dump + kernel state about L2TP tunnels and sessions. To access it, the +diff --git a/Makefile b/Makefile +index 3e58c142f92f..d17a2ad3cc4d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 224 ++SUBLEVEL = 225 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h +index cc414382dab4..561b2ba6bc28 100644 +--- a/arch/arm/include/asm/futex.h ++++ b/arch/arm/include/asm/futex.h +@@ -162,8 +162,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + preempt_enable(); + #endif + +- if (!ret) +- *oval = oldval; ++ /* ++ * Store unconditionally. If ret != 0 the extra store is the least ++ * of the worries but GCC cannot figure out that __futex_atomic_op() ++ * is either setting ret to -EFAULT or storing the old value in ++ * oldval which results in a uninitialized warning at the call site. ++ */ ++ *oval = oldval; + + return ret; + } +diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c +index bc96c8a7fc79..3e4b778f16a5 100644 +--- a/arch/arm64/kernel/machine_kexec.c ++++ b/arch/arm64/kernel/machine_kexec.c +@@ -177,7 +177,8 @@ void machine_kexec(struct kimage *kimage) + /* Flush the reboot_code_buffer in preparation for its execution. */ + __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size); + flush_icache_range((uintptr_t)reboot_code_buffer, +- arm64_relocate_new_kernel_size); ++ (uintptr_t)reboot_code_buffer + ++ arm64_relocate_new_kernel_size); + + /* Flush the kimage list and its buffers. */ + kexec_list_flush(kimage); +diff --git a/drivers/base/component.c b/drivers/base/component.c +index 08da6160e94d..55f0856bd9b5 100644 +--- a/drivers/base/component.c ++++ b/drivers/base/component.c +@@ -162,7 +162,8 @@ static int try_to_bring_up_master(struct master *master, + ret = master->ops->bind(master->dev); + if (ret < 0) { + devres_release_group(master->dev, NULL); +- dev_info(master->dev, "master bind failed: %d\n", ret); ++ if (ret != -EPROBE_DEFER) ++ dev_info(master->dev, "master bind failed: %d\n", ret); + return ret; + } + +@@ -431,8 +432,9 @@ static int component_bind(struct component *component, struct master *master, + devres_release_group(component->dev, NULL); + devres_release_group(master->dev, NULL); + +- dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", +- dev_name(component->dev), component->ops, ret); ++ if (ret != -EPROBE_DEFER) ++ dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", ++ dev_name(component->dev), component->ops, ret); + } + + return ret; +diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c +index 2d4aeba579f7..c16c06b3dd2f 100644 +--- a/drivers/dma/tegra210-adma.c ++++ b/drivers/dma/tegra210-adma.c +@@ -793,7 +793,7 @@ static int tegra_adma_probe(struct platform_device *pdev) + ret = dma_async_device_register(&tdma->dma_dev); + if (ret < 0) { + dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); +- goto irq_dispose; ++ goto rpm_put; + } + + ret = of_dma_controller_register(pdev->dev.of_node, +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 25c006338100..4630b58634d8 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -353,6 +353,7 @@ + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002 + + #define USB_VENDOR_ID_ELAN 0x04f3 + +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index fba655d639af..1207102823de 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -1332,6 +1332,9 @@ static const struct hid_device_id mt_devices[] = { + { .driver_data = MT_CLS_EGALAX_SERIAL, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, ++ { .driver_data = MT_CLS_EGALAX, ++ MT_USB_DEVICE(USB_VENDOR_ID_DWAV, ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, + + /* Elitegroup panel */ + { .driver_data = MT_CLS_SERIAL, +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index eaa312bc3a3c..c4066276eb7b 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -47,7 +47,7 @@ + struct i2c_dev { + struct list_head list; + struct i2c_adapter *adap; +- struct device *dev; ++ struct device dev; + struct cdev cdev; + }; + +@@ -91,12 +91,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) + return i2c_dev; + } + +-static void put_i2c_dev(struct i2c_dev *i2c_dev) ++static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev) + { + spin_lock(&i2c_dev_list_lock); + list_del(&i2c_dev->list); + spin_unlock(&i2c_dev_list_lock); +- kfree(i2c_dev); ++ if (del_cdev) ++ cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev); ++ put_device(&i2c_dev->dev); + } + + static ssize_t name_show(struct device *dev, +@@ -542,6 +544,14 @@ static const struct file_operations i2cdev_fops = { + + static struct class *i2c_dev_class; + ++static void i2cdev_dev_release(struct device *dev) ++{ ++ struct i2c_dev *i2c_dev; ++ ++ i2c_dev = container_of(dev, struct i2c_dev, dev); ++ kfree(i2c_dev); ++} ++ + static int i2cdev_attach_adapter(struct device *dev, void *dummy) + { + struct i2c_adapter *adap; +@@ -558,27 +568,23 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) + + cdev_init(&i2c_dev->cdev, &i2cdev_fops); + i2c_dev->cdev.owner = THIS_MODULE; +- res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1); +- if (res) +- goto error_cdev; +- +- /* register this i2c device with the driver core */ +- i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, +- MKDEV(I2C_MAJOR, adap->nr), NULL, +- "i2c-%d", adap->nr); +- if (IS_ERR(i2c_dev->dev)) { +- res = PTR_ERR(i2c_dev->dev); +- goto error; ++ ++ device_initialize(&i2c_dev->dev); ++ i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr); ++ i2c_dev->dev.class = i2c_dev_class; ++ i2c_dev->dev.parent = &adap->dev; ++ i2c_dev->dev.release = i2cdev_dev_release; ++ dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); ++ ++ res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); ++ if (res) { ++ put_i2c_dev(i2c_dev, false); ++ return res; + } + + pr_debug("i2c-dev: adapter [%s] registered as minor %d\n", + adap->name, adap->nr); + return 0; +-error: +- cdev_del(&i2c_dev->cdev); +-error_cdev: +- put_i2c_dev(i2c_dev); +- return res; + } + + static int i2cdev_detach_adapter(struct device *dev, void *dummy) +@@ -594,9 +600,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) + if (!i2c_dev) /* attach_adapter must have failed */ + return 0; + +- cdev_del(&i2c_dev->cdev); +- put_i2c_dev(i2c_dev); +- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); ++ put_i2c_dev(i2c_dev, true); + + pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); + return 0; +diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c +index 3e6fe1760d82..a86c511c29e0 100644 +--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c ++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c +@@ -270,6 +270,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) + err_rollback_available: + device_remove_file(&pdev->dev, &dev_attr_available_masters); + err_rollback: ++ i2c_demux_deactivate_master(priv); + for (j = 0; j < i; j++) { + of_node_put(priv->chan[j].parent_np); + of_changeset_destroy(&priv->chan[j].chgset); +diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c +index c4ec7779b394..190a7c1c5604 100644 +--- a/drivers/iio/dac/vf610_dac.c ++++ b/drivers/iio/dac/vf610_dac.c +@@ -235,6 +235,7 @@ static int vf610_dac_probe(struct platform_device *pdev) + return 0; + + error_iio_device_register: ++ vf610_dac_exit(info); + clk_disable_unprepare(info->clk); + + return ret; +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index e6ae8d123984..a3279f303b49 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -1171,8 +1171,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, + } + case IVHD_DEV_ACPI_HID: { + u16 devid; +- u8 hid[ACPIHID_HID_LEN] = {0}; +- u8 uid[ACPIHID_UID_LEN] = {0}; ++ u8 hid[ACPIHID_HID_LEN]; ++ u8 uid[ACPIHID_UID_LEN]; + int ret; + + if (h->type != 0x40) { +@@ -1189,6 +1189,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, + break; + } + ++ uid[0] = '\0'; + switch (e->uidf) { + case UID_NOT_PRESENT: + +@@ -1203,8 +1204,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, + break; + case UID_IS_CHARACTER: + +- memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1); +- uid[ACPIHID_UID_LEN - 1] = '\0'; ++ memcpy(uid, &e->uid, e->uidl); ++ uid[e->uidl] = '\0'; + + break; + default: +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index 451d417eb451..1c8df33404b0 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -276,6 +276,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid(dev, uuid); + __mei_me_cl_del(dev, me_cl); ++ mei_me_cl_put(me_cl); + up_write(&dev->me_clients_rwsem); + } + +@@ -297,6 +298,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); + __mei_me_cl_del(dev, me_cl); ++ mei_me_cl_put(me_cl); + up_write(&dev->me_clients_rwsem); + } + +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +index 5478a2ab45c4..54b5f61c8ed9 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +@@ -2236,8 +2236,6 @@ static int cxgb_up(struct adapter *adap) + #if IS_ENABLED(CONFIG_IPV6) + update_clip(adap); + #endif +- /* Initialize hash mac addr list*/ +- INIT_LIST_HEAD(&adap->mac_hlist); + return err; + + irq_err: +@@ -2259,6 +2257,7 @@ static void cxgb_down(struct adapter *adapter) + + t4_sge_stop(adapter); + t4_free_sge_resources(adapter); ++ + adapter->flags &= ~FULL_INIT_DONE; + } + +@@ -4789,6 +4788,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + (is_t5(adapter->params.chip) ? STATMODE_V(0) : + T6_STATMODE_V(0))); + ++ /* Initialize hash mac addr list */ ++ INIT_LIST_HEAD(&adapter->mac_hlist); ++ + for_each_port(adapter, i) { + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_ETH_QSETS); +@@ -5067,6 +5069,7 @@ sriov: + static void remove_one(struct pci_dev *pdev) + { + struct adapter *adapter = pci_get_drvdata(pdev); ++ struct hash_mac_addr *entry, *tmp; + + if (!adapter) { + pci_release_regions(pdev); +@@ -5105,6 +5108,12 @@ static void remove_one(struct pci_dev *pdev) + if (adapter->num_uld || adapter->num_ofld_uld) + t4_uld_mem_free(adapter); + free_some_resources(adapter); ++ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, ++ list) { ++ list_del(&entry->list); ++ kfree(entry); ++ } ++ + #if IS_ENABLED(CONFIG_IPV6) + t4_cleanup_clip_tbl(adapter); + #endif +diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +index 9eb3071b69a4..17db5be9d2b7 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +@@ -719,9 +719,6 @@ static int adapter_up(struct adapter *adapter) + if (adapter->flags & USING_MSIX) + name_msix_vecs(adapter); + +- /* Initialize hash mac addr list*/ +- INIT_LIST_HEAD(&adapter->mac_hlist); +- + adapter->flags |= FULL_INIT_DONE; + } + +@@ -2902,6 +2899,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, + if (err) + goto err_unmap_bar; + ++ /* Initialize hash mac addr list */ ++ INIT_LIST_HEAD(&adapter->mac_hlist); ++ + /* + * Allocate our "adapter ports" and stitch everything together. + */ +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index 7e35bd665630..90eab0521be1 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -3395,7 +3395,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, + tdba & 0x00000000ffffffffULL); + wr32(E1000_TDBAH(reg_idx), tdba >> 32); + +- ring->tail = hw->hw_addr + E1000_TDT(reg_idx); ++ ring->tail = adapter->io_addr + E1000_TDT(reg_idx); + wr32(E1000_TDH(reg_idx), 0); + writel(0, ring->tail); + +@@ -3734,7 +3734,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, + ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ +- ring->tail = hw->hw_addr + E1000_RDT(reg_idx); ++ ring->tail = adapter->io_addr + E1000_RDT(reg_idx); + wr32(E1000_RDH(reg_idx), 0); + writel(0, ring->tail); + +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index a9e8a7356c41..fe844888e0ed 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -1108,11 +1108,11 @@ static struct genl_family gtp_genl_family = { + }; + + static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, +- u32 type, struct pdp_ctx *pctx) ++ int flags, u32 type, struct pdp_ctx *pctx) + { + void *genlh; + +- genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0, ++ genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags, + type); + if (genlh == NULL) + goto nlmsg_failure; +@@ -1208,8 +1208,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) + goto err_unlock; + } + +- err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, +- info->snd_seq, info->nlhdr->nlmsg_type, pctx); ++ err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, ++ 0, info->nlhdr->nlmsg_type, pctx); + if (err < 0) + goto err_unlock_free; + +@@ -1252,6 +1252,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, + gtp_genl_fill_info(skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, ++ NLM_F_MULTI, + cb->nlh->nlmsg_type, pctx)) { + cb->args[0] = i; + cb->args[1] = j; +diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c +index 0c46ada027cf..e90ecb179622 100644 +--- a/drivers/nvdimm/btt.c ++++ b/drivers/nvdimm/btt.c +@@ -447,9 +447,9 @@ static int btt_log_init(struct arena_info *arena) + + static int btt_freelist_init(struct arena_info *arena) + { +- int old, new, ret; ++ int new, ret; + u32 i, map_entry; +- struct log_entry log_new, log_old; ++ struct log_entry log_new; + + arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), + GFP_KERNEL); +@@ -457,10 +457,6 @@ static int btt_freelist_init(struct arena_info *arena) + return -ENOMEM; + + for (i = 0; i < arena->nfree; i++) { +- old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT); +- if (old < 0) +- return old; +- + new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); + if (new < 0) + return new; +diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c +index bee2115ecf10..ec7482c7e7eb 100644 +--- a/drivers/platform/x86/alienware-wmi.c ++++ b/drivers/platform/x86/alienware-wmi.c +@@ -504,23 +504,22 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args, + + input.length = (acpi_size) sizeof(*in_args); + input.pointer = in_args; +- if (out_data != NULL) { ++ if (out_data) { + output.length = ACPI_ALLOCATE_BUFFER; + output.pointer = NULL; + status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1, + command, &input, &output); +- } else ++ if (ACPI_SUCCESS(status)) { ++ obj = (union acpi_object *)output.pointer; ++ if (obj && obj->type == ACPI_TYPE_INTEGER) ++ *out_data = (u32)obj->integer.value; ++ } ++ kfree(output.pointer); ++ } else { + status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1, + command, &input, NULL); +- +- if (ACPI_SUCCESS(status) && out_data != NULL) { +- obj = (union acpi_object *)output.pointer; +- if (obj && obj->type == ACPI_TYPE_INTEGER) +- *out_data = (u32) obj->integer.value; + } +- kfree(output.pointer); + return status; +- + } + + /* +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c +index 0fd7e40b86a0..8137aa343706 100644 +--- a/drivers/platform/x86/asus-nb-wmi.c ++++ b/drivers/platform/x86/asus-nb-wmi.c +@@ -561,9 +561,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = { + .detect_quirks = asus_nb_wmi_quirks, + }; + ++static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = { ++ { ++ /* ++ * asus-nb-wm adds no functionality. The T100TA has a detachable ++ * USB kbd, so no hotkeys and it has no WMI rfkill; and loading ++ * asus-nb-wm causes the camera LED to turn and _stay_ on. ++ */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), ++ }, ++ }, ++ { ++ /* The Asus T200TA has the same issue as the T100TA */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"), ++ }, ++ }, ++ {} /* Terminating entry */ ++}; + + static int __init asus_nb_wmi_init(void) + { ++ if (dmi_check_system(asus_nb_wmi_blacklist)) ++ return -ENODEV; ++ + return asus_wmi_register_driver(&asus_nb_wmi_driver); + } + +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c +index 28c45db45aba..ebe8e8dc4677 100644 +--- a/drivers/rapidio/devices/rio_mport_cdev.c ++++ b/drivers/rapidio/devices/rio_mport_cdev.c +@@ -905,6 +905,11 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, + rmcd_error("pinned %ld out of %ld pages", + pinned, nr_pages); + ret = -EFAULT; ++ /* ++ * Set nr_pages up to mean "how many pages to unpin, in ++ * the error handler: ++ */ ++ nr_pages = pinned; + goto err_pg; + } + +diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c +index 2633d2bfb1b4..9ef9cbfd8926 100644 +--- a/drivers/staging/greybus/uart.c ++++ b/drivers/staging/greybus/uart.c +@@ -539,9 +539,9 @@ static void gb_tty_set_termios(struct tty_struct *tty, + } + + if (C_CRTSCTS(tty) && C_BAUD(tty) != B0) +- newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN; ++ newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN; + else +- newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN; ++ newline.flow_control = 0; + + if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) { + memcpy(&gb_tty->line_coding, &newline, sizeof(newline)); +diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c +index d1cb9b9cf22b..391cbcc4ed77 100644 +--- a/drivers/staging/iio/accel/sca3000_ring.c ++++ b/drivers/staging/iio/accel/sca3000_ring.c +@@ -56,7 +56,7 @@ static int sca3000_read_data(struct sca3000_state *st, + st->tx[0] = SCA3000_READ_REG(reg_address_high); + ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); + if (ret) { +- dev_err(get_device(&st->us->dev), "problem reading register"); ++ dev_err(&st->us->dev, "problem reading register"); + goto error_free_rx; + } + +diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c +index 598f0faa48c8..0f5eb2bf5f73 100644 +--- a/drivers/staging/iio/resolver/ad2s1210.c ++++ b/drivers/staging/iio/resolver/ad2s1210.c +@@ -126,17 +126,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) + static int ad2s1210_config_read(struct ad2s1210_state *st, + unsigned char address) + { +- struct spi_transfer xfer = { +- .len = 2, +- .rx_buf = st->rx, +- .tx_buf = st->tx, ++ struct spi_transfer xfers[] = { ++ { ++ .len = 1, ++ .rx_buf = &st->rx[0], ++ .tx_buf = &st->tx[0], ++ .cs_change = 1, ++ }, { ++ .len = 1, ++ .rx_buf = &st->rx[1], ++ .tx_buf = &st->tx[1], ++ }, + }; + int ret = 0; + + ad2s1210_set_mode(MOD_CONFIG, st); + st->tx[0] = address | AD2S1210_MSB_IS_HIGH; + st->tx[1] = AD2S1210_REG_FAULT; +- ret = spi_sync_transfer(st->sdev, &xfer, 1); ++ ret = spi_sync_transfer(st->sdev, xfers, 2); + if (ret < 0) + return ret; + st->old_data = true; +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index 2e541a029657..e33d23c2f6ea 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -1081,11 +1081,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, + + if (usb_endpoint_out(epaddr)) { + ep = dev->ep_out[epnum]; +- if (reset_hardware) ++ if (reset_hardware && epnum != 0) + dev->ep_out[epnum] = NULL; + } else { + ep = dev->ep_in[epnum]; +- if (reset_hardware) ++ if (reset_hardware && epnum != 0) + dev->ep_in[epnum] = NULL; + } + if (ep) { +diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c +index 9e17d933ea94..3167f276c4c2 100644 +--- a/drivers/watchdog/watchdog_dev.c ++++ b/drivers/watchdog/watchdog_dev.c +@@ -38,7 +38,6 @@ + #include /* For __init/__exit/... */ + #include /* For timeout functions */ + #include /* For printk/panic/... */ +-#include /* For data references */ + #include /* For handling misc devices */ + #include /* For module stuff/... */ + #include /* For mutexes */ +@@ -53,14 +52,14 @@ + + /* + * struct watchdog_core_data - watchdog core internal data +- * @kref: Reference count. ++ * @dev: The watchdog's internal device + * @cdev: The watchdog's Character device. + * @wdd: Pointer to watchdog device. + * @lock: Lock for watchdog core. + * @status: Watchdog core internal status bits. + */ + struct watchdog_core_data { +- struct kref kref; ++ struct device dev; + struct cdev cdev; + struct watchdog_device *wdd; + struct mutex lock; +@@ -794,7 +793,7 @@ static int watchdog_open(struct inode *inode, struct file *file) + file->private_data = wd_data; + + if (!hw_running) +- kref_get(&wd_data->kref); ++ get_device(&wd_data->dev); + + /* dev/watchdog is a virtual (and thus non-seekable) filesystem */ + return nonseekable_open(inode, file); +@@ -806,11 +805,11 @@ out_clear: + return err; + } + +-static void watchdog_core_data_release(struct kref *kref) ++static void watchdog_core_data_release(struct device *dev) + { + struct watchdog_core_data *wd_data; + +- wd_data = container_of(kref, struct watchdog_core_data, kref); ++ wd_data = container_of(dev, struct watchdog_core_data, dev); + + kfree(wd_data); + } +@@ -870,7 +869,7 @@ done: + */ + if (!running) { + module_put(wd_data->cdev.owner); +- kref_put(&wd_data->kref, watchdog_core_data_release); ++ put_device(&wd_data->dev); + } + return 0; + } +@@ -889,17 +888,22 @@ static struct miscdevice watchdog_miscdev = { + .fops = &watchdog_fops, + }; + ++static struct class watchdog_class = { ++ .name = "watchdog", ++ .owner = THIS_MODULE, ++ .dev_groups = wdt_groups, ++}; ++ + /* + * watchdog_cdev_register: register watchdog character device + * @wdd: watchdog device +- * @devno: character device number + * + * Register a watchdog character device including handling the legacy + * /dev/watchdog node. /dev/watchdog is actually a miscdevice and + * thus we set it up like that. + */ + +-static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) ++static int watchdog_cdev_register(struct watchdog_device *wdd) + { + struct watchdog_core_data *wd_data; + int err; +@@ -907,7 +911,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) + wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL); + if (!wd_data) + return -ENOMEM; +- kref_init(&wd_data->kref); + mutex_init(&wd_data->lock); + + wd_data->wdd = wdd; +@@ -934,23 +937,33 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) + } + } + ++ device_initialize(&wd_data->dev); ++ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); ++ wd_data->dev.class = &watchdog_class; ++ wd_data->dev.parent = wdd->parent; ++ wd_data->dev.groups = wdd->groups; ++ wd_data->dev.release = watchdog_core_data_release; ++ dev_set_drvdata(&wd_data->dev, wdd); ++ dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); ++ + /* Fill in the data structures */ + cdev_init(&wd_data->cdev, &watchdog_fops); +- wd_data->cdev.owner = wdd->ops->owner; + + /* Add the device */ +- err = cdev_add(&wd_data->cdev, devno, 1); ++ err = cdev_device_add(&wd_data->cdev, &wd_data->dev); + if (err) { + pr_err("watchdog%d unable to add device %d:%d\n", + wdd->id, MAJOR(watchdog_devt), wdd->id); + if (wdd->id == 0) { + misc_deregister(&watchdog_miscdev); + old_wd_data = NULL; +- kref_put(&wd_data->kref, watchdog_core_data_release); ++ put_device(&wd_data->dev); + } + return err; + } + ++ wd_data->cdev.owner = wdd->ops->owner; ++ + /* Record time of most recent heartbeat as 'just before now'. */ + wd_data->last_hw_keepalive = jiffies - 1; + +@@ -960,7 +973,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) + */ + if (watchdog_hw_running(wdd)) { + __module_get(wdd->ops->owner); +- kref_get(&wd_data->kref); ++ get_device(&wd_data->dev); + queue_delayed_work(watchdog_wq, &wd_data->work, 0); + } + +@@ -979,7 +992,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd) + { + struct watchdog_core_data *wd_data = wdd->wd_data; + +- cdev_del(&wd_data->cdev); ++ cdev_device_del(&wd_data->cdev, &wd_data->dev); + if (wdd->id == 0) { + misc_deregister(&watchdog_miscdev); + old_wd_data = NULL; +@@ -992,15 +1005,9 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd) + + cancel_delayed_work_sync(&wd_data->work); + +- kref_put(&wd_data->kref, watchdog_core_data_release); ++ put_device(&wd_data->dev); + } + +-static struct class watchdog_class = { +- .name = "watchdog", +- .owner = THIS_MODULE, +- .dev_groups = wdt_groups, +-}; +- + /* + * watchdog_dev_register: register a watchdog device + * @wdd: watchdog device +@@ -1012,27 +1019,14 @@ static struct class watchdog_class = { + + int watchdog_dev_register(struct watchdog_device *wdd) + { +- struct device *dev; +- dev_t devno; + int ret; + +- devno = MKDEV(MAJOR(watchdog_devt), wdd->id); +- +- ret = watchdog_cdev_register(wdd, devno); ++ ret = watchdog_cdev_register(wdd); + if (ret) + return ret; + +- dev = device_create_with_groups(&watchdog_class, wdd->parent, +- devno, wdd, wdd->groups, +- "watchdog%d", wdd->id); +- if (IS_ERR(dev)) { +- watchdog_cdev_unregister(wdd); +- return PTR_ERR(dev); +- } +- + ret = watchdog_register_pretimeout(wdd); + if (ret) { +- device_destroy(&watchdog_class, devno); + watchdog_cdev_unregister(wdd); + } + +@@ -1050,7 +1044,6 @@ int watchdog_dev_register(struct watchdog_device *wdd) + void watchdog_dev_unregister(struct watchdog_device *wdd) + { + watchdog_unregister_pretimeout(wdd); +- device_destroy(&watchdog_class, wdd->wd_data->cdev.dev); + watchdog_cdev_unregister(wdd); + } + +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c +index 617e9ae67f50..e11aacb35d6b 100644 +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -3394,6 +3394,7 @@ retry: + WARN_ON(1); + tsession = NULL; + target = -1; ++ mutex_lock(&session->s_mutex); + } + goto retry; + +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c +index c2ef617d2f97..c875f246cb0e 100644 +--- a/fs/configfs/dir.c ++++ b/fs/configfs/dir.c +@@ -1537,6 +1537,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) + spin_lock(&configfs_dirent_lock); + configfs_detach_rollback(dentry); + spin_unlock(&configfs_dirent_lock); ++ config_item_put(parent_item); + return -EINTR; + } + frag->frag_dead = true; +diff --git a/fs/file.c b/fs/file.c +index 09aac4d4729b..82d3f925bab3 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -89,7 +89,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, + */ + static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) + { +- unsigned int cpy, set; ++ size_t cpy, set; + + BUG_ON(nfdt->max_fds < ofdt->max_fds); + +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index adc1a97cfe96..efd44d5645d8 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -548,9 +548,6 @@ __acquires(&gl->gl_lockref.lock) + goto out_unlock; + if (nonblock) + goto out_sched; +- smp_mb(); +- if (atomic_read(&gl->gl_revokes) != 0) +- goto out_sched; + set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); + GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); + gl->gl_target = gl->gl_demote_state; +diff --git a/include/linux/net.h b/include/linux/net.h +index cd0c8bd0a1de..54270c4707cf 100644 +--- a/include/linux/net.h ++++ b/include/linux/net.h +@@ -298,6 +298,9 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, + int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); + int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); + ++/* Routine returns the IP overhead imposed by a (caller-protected) socket. */ ++u32 kernel_sock_ip_overhead(struct sock *sk); ++ + #define MODULE_ALIAS_NETPROTO(proto) \ + MODULE_ALIAS("net-pf-" __stringify(proto)) + +diff --git a/include/linux/padata.h b/include/linux/padata.h +index 0f9e567d5e15..3afa17ed59da 100644 +--- a/include/linux/padata.h ++++ b/include/linux/padata.h +@@ -24,7 +24,6 @@ + #include + #include + #include +-#include + #include + #include + +@@ -37,6 +36,7 @@ + * @list: List entry, to attach to the padata lists. + * @pd: Pointer to the internal control structure. + * @cb_cpu: Callback cpu for serializatioon. ++ * @cpu: Cpu for parallelization. + * @seq_nr: Sequence number of the parallelized data object. + * @info: Used to pass information from the parallel to the serial function. + * @parallel: Parallel execution function. +@@ -46,6 +46,7 @@ struct padata_priv { + struct list_head list; + struct parallel_data *pd; + int cb_cpu; ++ int cpu; + int info; + void (*parallel)(struct padata_priv *padata); + void (*serial)(struct padata_priv *padata); +@@ -83,7 +84,6 @@ struct padata_serial_queue { + * @serial: List to wait for serialization after reordering. + * @pwork: work struct for parallelization. + * @swork: work struct for serialization. +- * @pd: Backpointer to the internal control structure. + * @work: work struct for parallelization. + * @num_obj: Number of objects that are processed by this cpu. + * @cpu_index: Index of the cpu. +@@ -91,7 +91,6 @@ struct padata_serial_queue { + struct padata_parallel_queue { + struct padata_list parallel; + struct padata_list reorder; +- struct parallel_data *pd; + struct work_struct work; + atomic_t num_obj; + int cpu_index; +@@ -118,10 +117,10 @@ struct padata_cpumask { + * @reorder_objects: Number of objects waiting in the reorder queues. + * @refcnt: Number of objects holding a reference on this parallel_data. + * @max_seq_nr: Maximal used sequence number. ++ * @cpu: Next CPU to be processed. + * @cpumask: The cpumasks in use for parallel and serial workers. ++ * @reorder_work: work struct for reordering. + * @lock: Reorder lock. +- * @processed: Number of already processed objects. +- * @timer: Reorder timer. + */ + struct parallel_data { + struct padata_instance *pinst; +@@ -130,10 +129,10 @@ struct parallel_data { + atomic_t reorder_objects; + atomic_t refcnt; + atomic_t seq_nr; ++ int cpu; + struct padata_cpumask cpumask; ++ struct work_struct reorder_work; + spinlock_t lock ____cacheline_aligned; +- unsigned int processed; +- struct timer_list timer; + }; + + /** +diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h +index 4bd1f55d6377..6418c4d10241 100644 +--- a/include/uapi/linux/if_pppol2tp.h ++++ b/include/uapi/linux/if_pppol2tp.h +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + + /* Structure used to connect() the socket to a particular tunnel UDP + * socket over IPv4. +@@ -90,14 +91,12 @@ enum { + PPPOL2TP_SO_REORDERTO = 5, + }; + +-/* Debug message categories for the DEBUG socket option */ ++/* Debug message categories for the DEBUG socket option (deprecated) */ + enum { +- PPPOL2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if +- * compiled in) */ +- PPPOL2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel +- * interface */ +- PPPOL2TP_MSG_SEQ = (1 << 2), /* sequence numbers */ +- PPPOL2TP_MSG_DATA = (1 << 3), /* data packets */ ++ PPPOL2TP_MSG_DEBUG = L2TP_MSG_DEBUG, ++ PPPOL2TP_MSG_CONTROL = L2TP_MSG_CONTROL, ++ PPPOL2TP_MSG_SEQ = L2TP_MSG_SEQ, ++ PPPOL2TP_MSG_DATA = L2TP_MSG_DATA, + }; + + +diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h +index 4bd27d0270a2..bb2d62037037 100644 +--- a/include/uapi/linux/l2tp.h ++++ b/include/uapi/linux/l2tp.h +@@ -108,7 +108,7 @@ enum { + L2TP_ATTR_VLAN_ID, /* u16 */ + L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */ + L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */ +- L2TP_ATTR_DEBUG, /* u32 */ ++ L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */ + L2TP_ATTR_RECV_SEQ, /* u8 */ + L2TP_ATTR_SEND_SEQ, /* u8 */ + L2TP_ATTR_LNS_MODE, /* u8 */ +@@ -175,6 +175,21 @@ enum l2tp_seqmode { + L2TP_SEQ_ALL = 2, + }; + ++/** ++ * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions ++ * ++ * @L2TP_MSG_DEBUG: verbose debug (if compiled in) ++ * @L2TP_MSG_CONTROL: userspace - kernel interface ++ * @L2TP_MSG_SEQ: sequence numbers ++ * @L2TP_MSG_DATA: data packets ++ */ ++enum l2tp_debug_flags { ++ L2TP_MSG_DEBUG = (1 << 0), ++ L2TP_MSG_CONTROL = (1 << 1), ++ L2TP_MSG_SEQ = (1 << 2), ++ L2TP_MSG_DATA = (1 << 3), ++}; ++ + /* + * NETLINK_GENERIC related info + */ +diff --git a/kernel/padata.c b/kernel/padata.c +index 6939111b3cbe..e82f066d63ac 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -66,15 +66,11 @@ static int padata_cpu_hash(struct parallel_data *pd) + static void padata_parallel_worker(struct work_struct *parallel_work) + { + struct padata_parallel_queue *pqueue; +- struct parallel_data *pd; +- struct padata_instance *pinst; + LIST_HEAD(local_list); + + local_bh_disable(); + pqueue = container_of(parallel_work, + struct padata_parallel_queue, work); +- pd = pqueue->pd; +- pinst = pd->pinst; + + spin_lock(&pqueue->parallel.lock); + list_replace_init(&pqueue->parallel.list, &local_list); +@@ -137,6 +133,7 @@ int padata_do_parallel(struct padata_instance *pinst, + padata->cb_cpu = cb_cpu; + + target_cpu = padata_cpu_hash(pd); ++ padata->cpu = target_cpu; + queue = per_cpu_ptr(pd->pqueue, target_cpu); + + spin_lock(&queue->parallel.lock); +@@ -160,8 +157,6 @@ EXPORT_SYMBOL(padata_do_parallel); + * A pointer to the control struct of the next object that needs + * serialization, if present in one of the percpu reorder queues. + * +- * NULL, if all percpu reorder queues are empty. +- * + * -EINPROGRESS, if the next object that needs serialization will + * be parallel processed by another cpu and is not yet present in + * the cpu's reorder queue. +@@ -171,25 +166,12 @@ EXPORT_SYMBOL(padata_do_parallel); + */ + static struct padata_priv *padata_get_next(struct parallel_data *pd) + { +- int cpu, num_cpus; +- unsigned int next_nr, next_index; + struct padata_parallel_queue *next_queue; + struct padata_priv *padata; + struct padata_list *reorder; ++ int cpu = pd->cpu; + +- num_cpus = cpumask_weight(pd->cpumask.pcpu); +- +- /* +- * Calculate the percpu reorder queue and the sequence +- * number of the next object. +- */ +- next_nr = pd->processed; +- next_index = next_nr % num_cpus; +- cpu = padata_index_to_cpu(pd, next_index); + next_queue = per_cpu_ptr(pd->pqueue, cpu); +- +- padata = NULL; +- + reorder = &next_queue->reorder; + + spin_lock(&reorder->lock); +@@ -200,7 +182,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) + list_del_init(&padata->list); + atomic_dec(&pd->reorder_objects); + +- pd->processed++; ++ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, ++ false); + + spin_unlock(&reorder->lock); + goto out; +@@ -223,6 +206,7 @@ static void padata_reorder(struct parallel_data *pd) + struct padata_priv *padata; + struct padata_serial_queue *squeue; + struct padata_instance *pinst = pd->pinst; ++ struct padata_parallel_queue *next_queue; + + /* + * We need to ensure that only one cpu can work on dequeueing of +@@ -241,12 +225,11 @@ static void padata_reorder(struct parallel_data *pd) + padata = padata_get_next(pd); + + /* +- * All reorder queues are empty, or the next object that needs +- * serialization is parallel processed by another cpu and is +- * still on it's way to the cpu's reorder queue, nothing to +- * do for now. ++ * If the next object that needs serialization is parallel ++ * processed by another cpu and is still on it's way to the ++ * cpu's reorder queue, nothing to do for now. + */ +- if (!padata || PTR_ERR(padata) == -EINPROGRESS) ++ if (PTR_ERR(padata) == -EINPROGRESS) + break; + + /* +@@ -255,7 +238,6 @@ static void padata_reorder(struct parallel_data *pd) + * so exit immediately. + */ + if (PTR_ERR(padata) == -ENODATA) { +- del_timer(&pd->timer); + spin_unlock_bh(&pd->lock); + return; + } +@@ -274,28 +256,27 @@ static void padata_reorder(struct parallel_data *pd) + + /* + * The next object that needs serialization might have arrived to +- * the reorder queues in the meantime, we will be called again +- * from the timer function if no one else cares for it. ++ * the reorder queues in the meantime. + * +- * Ensure reorder_objects is read after pd->lock is dropped so we see +- * an increment from another task in padata_do_serial. Pairs with ++ * Ensure reorder queue is read after pd->lock is dropped so we see ++ * new objects from another task in padata_do_serial. Pairs with + * smp_mb__after_atomic in padata_do_serial. + */ + smp_mb(); +- if (atomic_read(&pd->reorder_objects) +- && !(pinst->flags & PADATA_RESET)) +- mod_timer(&pd->timer, jiffies + HZ); +- else +- del_timer(&pd->timer); + +- return; ++ next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); ++ if (!list_empty(&next_queue->reorder.list)) ++ queue_work(pinst->wq, &pd->reorder_work); + } + +-static void padata_reorder_timer(unsigned long arg) ++static void invoke_padata_reorder(struct work_struct *work) + { +- struct parallel_data *pd = (struct parallel_data *)arg; ++ struct parallel_data *pd; + ++ local_bh_disable(); ++ pd = container_of(work, struct parallel_data, reorder_work); + padata_reorder(pd); ++ local_bh_enable(); + } + + static void padata_serial_worker(struct work_struct *serial_work) +@@ -342,29 +323,22 @@ static void padata_serial_worker(struct work_struct *serial_work) + */ + void padata_do_serial(struct padata_priv *padata) + { +- int cpu; +- struct padata_parallel_queue *pqueue; +- struct parallel_data *pd; +- +- pd = padata->pd; +- +- cpu = get_cpu(); +- pqueue = per_cpu_ptr(pd->pqueue, cpu); ++ struct parallel_data *pd = padata->pd; ++ struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue, ++ padata->cpu); + + spin_lock(&pqueue->reorder.lock); +- atomic_inc(&pd->reorder_objects); + list_add_tail(&padata->list, &pqueue->reorder.list); ++ atomic_inc(&pd->reorder_objects); + spin_unlock(&pqueue->reorder.lock); + + /* +- * Ensure the atomic_inc of reorder_objects above is ordered correctly ++ * Ensure the addition to the reorder list is ordered correctly + * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb + * in padata_reorder. + */ + smp_mb__after_atomic(); + +- put_cpu(); +- + padata_reorder(pd); + } + EXPORT_SYMBOL(padata_do_serial); +@@ -413,9 +387,14 @@ static void padata_init_pqueues(struct parallel_data *pd) + struct padata_parallel_queue *pqueue; + + cpu_index = 0; +- for_each_cpu(cpu, pd->cpumask.pcpu) { ++ for_each_possible_cpu(cpu) { + pqueue = per_cpu_ptr(pd->pqueue, cpu); +- pqueue->pd = pd; ++ ++ if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) { ++ pqueue->cpu_index = -1; ++ continue; ++ } ++ + pqueue->cpu_index = cpu_index; + cpu_index++; + +@@ -449,12 +428,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, + + padata_init_pqueues(pd); + padata_init_squeues(pd); +- setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); + atomic_set(&pd->seq_nr, -1); + atomic_set(&pd->reorder_objects, 0); + atomic_set(&pd->refcnt, 1); + pd->pinst = pinst; + spin_lock_init(&pd->lock); ++ pd->cpu = cpumask_first(pd->cpumask.pcpu); ++ INIT_WORK(&pd->reorder_work, invoke_padata_reorder); + + return pd; + +diff --git a/lib/Makefile b/lib/Makefile +index 452d2956a5a2..7a55c5205281 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -230,5 +230,7 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o + obj-$(CONFIG_UBSAN) += ubsan.o + + UBSAN_SANITIZE_ubsan.o := n ++KASAN_SANITIZE_ubsan.o := n ++CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN) + + obj-$(CONFIG_SBITMAP) += sbitmap.o +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index 7c3da29fad8e..36c7f616294a 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -112,53 +112,19 @@ struct l2tp_net { + spinlock_t l2tp_session_hlist_lock; + }; + +-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); + + static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) + { + return sk->sk_user_data; + } + +-static inline struct l2tp_net *l2tp_pernet(struct net *net) ++static inline struct l2tp_net *l2tp_pernet(const struct net *net) + { + BUG_ON(!net); + + return net_generic(net, l2tp_net_id); + } + +-/* Tunnel reference counts. Incremented per session that is added to +- * the tunnel. +- */ +-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) +-{ +- atomic_inc(&tunnel->ref_count); +-} +- +-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) +-{ +- if (atomic_dec_and_test(&tunnel->ref_count)) +- l2tp_tunnel_free(tunnel); +-} +-#ifdef L2TP_REFCNT_DEBUG +-#define l2tp_tunnel_inc_refcount(_t) \ +-do { \ +- pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \ +- __func__, __LINE__, (_t)->name, \ +- atomic_read(&_t->ref_count)); \ +- l2tp_tunnel_inc_refcount_1(_t); \ +-} while (0) +-#define l2tp_tunnel_dec_refcount(_t) \ +-do { \ +- pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \ +- __func__, __LINE__, (_t)->name, \ +- atomic_read(&_t->ref_count)); \ +- l2tp_tunnel_dec_refcount_1(_t); \ +-} while (0) +-#else +-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) +-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) +-#endif +- + /* Session hash global list for L2TPv3. + * The session_id SHOULD be random according to RFC3931, but several + * L2TP implementations use incrementing session_ids. So we do a real +@@ -216,27 +182,6 @@ static void l2tp_tunnel_sock_put(struct sock *sk) + sock_put(sk); + } + +-/* Lookup a session by id in the global session list +- */ +-static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) +-{ +- struct l2tp_net *pn = l2tp_pernet(net); +- struct hlist_head *session_list = +- l2tp_session_id_hash_2(pn, session_id); +- struct l2tp_session *session; +- +- rcu_read_lock_bh(); +- hlist_for_each_entry_rcu(session, session_list, global_hlist) { +- if (session->session_id == session_id) { +- rcu_read_unlock_bh(); +- return session; +- } +- } +- rcu_read_unlock_bh(); +- +- return NULL; +-} +- + /* Session hash list. + * The session_id SHOULD be random according to RFC2661, but several + * L2TP implementations (Cisco and Microsoft) use incrementing +@@ -249,38 +194,31 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) + return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; + } + +-/* Lookup a session by id +- */ +-struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id) ++/* Lookup a tunnel. A new reference is held on the returned tunnel. */ ++struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) + { +- struct hlist_head *session_list; +- struct l2tp_session *session; ++ const struct l2tp_net *pn = l2tp_pernet(net); ++ struct l2tp_tunnel *tunnel; + +- /* In L2TPv3, session_ids are unique over all tunnels and we +- * sometimes need to look them up before we know the +- * tunnel. +- */ +- if (tunnel == NULL) +- return l2tp_session_find_2(net, session_id); ++ rcu_read_lock_bh(); ++ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { ++ if (tunnel->tunnel_id == tunnel_id) { ++ l2tp_tunnel_inc_refcount(tunnel); ++ rcu_read_unlock_bh(); + +- session_list = l2tp_session_id_hash(tunnel, session_id); +- read_lock_bh(&tunnel->hlist_lock); +- hlist_for_each_entry(session, session_list, hlist) { +- if (session->session_id == session_id) { +- read_unlock_bh(&tunnel->hlist_lock); +- return session; ++ return tunnel; + } + } +- read_unlock_bh(&tunnel->hlist_lock); ++ rcu_read_unlock_bh(); + + return NULL; + } +-EXPORT_SYMBOL_GPL(l2tp_session_find); ++EXPORT_SYMBOL_GPL(l2tp_tunnel_get); + +-/* Like l2tp_session_find() but takes a reference on the returned session. ++/* Lookup a session. A new reference is held on the returned session. + * Optionally calls session->ref() too if do_ref is true. + */ +-struct l2tp_session *l2tp_session_get(struct net *net, ++struct l2tp_session *l2tp_session_get(const struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref) + { +@@ -355,7 +293,8 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_nth); + /* Lookup a session by interface name. + * This is very inefficient but is only used by management interfaces. + */ +-struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, ++struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, ++ const char *ifname, + bool do_ref) + { + struct l2tp_net *pn = l2tp_pernet(net); +@@ -382,20 +321,28 @@ struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, + } + EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname); + +-static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, +- struct l2tp_session *session) ++int l2tp_session_register(struct l2tp_session *session, ++ struct l2tp_tunnel *tunnel) + { + struct l2tp_session *session_walk; + struct hlist_head *g_head; + struct hlist_head *head; + struct l2tp_net *pn; ++ int err; + + head = l2tp_session_id_hash(tunnel, session->session_id); + + write_lock_bh(&tunnel->hlist_lock); ++ if (!tunnel->acpt_newsess) { ++ err = -ENODEV; ++ goto err_tlock; ++ } ++ + hlist_for_each_entry(session_walk, head, hlist) +- if (session_walk->session_id == session->session_id) +- goto exist; ++ if (session_walk->session_id == session->session_id) { ++ err = -EEXIST; ++ goto err_tlock; ++ } + + if (tunnel->version == L2TP_HDR_VER_3) { + pn = l2tp_pernet(tunnel->l2tp_net); +@@ -403,30 +350,44 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, + session->session_id); + + spin_lock_bh(&pn->l2tp_session_hlist_lock); ++ + hlist_for_each_entry(session_walk, g_head, global_hlist) +- if (session_walk->session_id == session->session_id) +- goto exist_glob; ++ if (session_walk->session_id == session->session_id) { ++ err = -EEXIST; ++ goto err_tlock_pnlock; ++ } + ++ l2tp_tunnel_inc_refcount(tunnel); ++ sock_hold(tunnel->sock); + hlist_add_head_rcu(&session->global_hlist, g_head); ++ + spin_unlock_bh(&pn->l2tp_session_hlist_lock); ++ } else { ++ l2tp_tunnel_inc_refcount(tunnel); ++ sock_hold(tunnel->sock); + } + + hlist_add_head(&session->hlist, head); + write_unlock_bh(&tunnel->hlist_lock); + ++ /* Ignore management session in session count value */ ++ if (session->session_id != 0) ++ atomic_inc(&l2tp_session_count); ++ + return 0; + +-exist_glob: ++err_tlock_pnlock: + spin_unlock_bh(&pn->l2tp_session_hlist_lock); +-exist: ++err_tlock: + write_unlock_bh(&tunnel->hlist_lock); + +- return -EEXIST; ++ return err; + } ++EXPORT_SYMBOL_GPL(l2tp_session_register); + + /* Lookup a tunnel by id + */ +-struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) ++struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id) + { + struct l2tp_tunnel *tunnel; + struct l2tp_net *pn = l2tp_pernet(net); +@@ -444,7 +405,7 @@ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) + } + EXPORT_SYMBOL_GPL(l2tp_tunnel_find); + +-struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth) ++struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth) + { + struct l2tp_net *pn = l2tp_pernet(net); + struct l2tp_tunnel *tunnel; +@@ -1307,7 +1268,6 @@ static void l2tp_tunnel_destruct(struct sock *sk) + /* Remove hooks into tunnel socket */ + sk->sk_destruct = tunnel->old_sk_destruct; + sk->sk_user_data = NULL; +- tunnel->sock = NULL; + + /* Remove the tunnel struct from the tunnel list */ + pn = l2tp_pernet(tunnel->l2tp_net); +@@ -1317,6 +1277,8 @@ static void l2tp_tunnel_destruct(struct sock *sk) + atomic_dec(&l2tp_tunnel_count); + + l2tp_tunnel_closeall(tunnel); ++ ++ tunnel->sock = NULL; + l2tp_tunnel_dec_refcount(tunnel); + + /* Call the original destructor */ +@@ -1341,6 +1303,7 @@ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) + tunnel->name); + + write_lock_bh(&tunnel->hlist_lock); ++ tunnel->acpt_newsess = false; + for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { + again: + hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { +@@ -1394,17 +1357,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk) + } + } + +-/* Really kill the tunnel. +- * Come here only when all sessions have been cleared from the tunnel. +- */ +-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) +-{ +- BUG_ON(atomic_read(&tunnel->ref_count) != 0); +- BUG_ON(tunnel->sock != NULL); +- l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name); +- kfree_rcu(tunnel, rcu); +-} +- + /* Workqueue tunnel deletion function */ + static void l2tp_tunnel_del_work(struct work_struct *work) + { +@@ -1655,6 +1607,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 + tunnel->magic = L2TP_TUNNEL_MAGIC; + sprintf(&tunnel->name[0], "tunl %u", tunnel_id); + rwlock_init(&tunnel->hlist_lock); ++ tunnel->acpt_newsess = true; + + /* The net we belong to */ + tunnel->l2tp_net = net; +@@ -1840,7 +1793,6 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); + struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) + { + struct l2tp_session *session; +- int err; + + session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); + if (session != NULL) { +@@ -1895,25 +1847,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn + + l2tp_session_set_header_len(session, tunnel->version); + +- err = l2tp_session_add_to_tunnel(tunnel, session); +- if (err) { +- kfree(session); +- +- return ERR_PTR(err); +- } +- +- /* Bump the reference count. The session context is deleted +- * only when this drops to zero. +- */ + l2tp_session_inc_refcount(session); +- l2tp_tunnel_inc_refcount(tunnel); +- +- /* Ensure tunnel socket isn't deleted */ +- sock_hold(tunnel->sock); +- +- /* Ignore management session in session count value */ +- if (session->session_id != 0) +- atomic_inc(&l2tp_session_count); + + return session; + } +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h +index 7c2037184b6c..2b9b6fb67ae9 100644 +--- a/net/l2tp/l2tp_core.h ++++ b/net/l2tp/l2tp_core.h +@@ -23,16 +23,6 @@ + #define L2TP_HASH_BITS_2 8 + #define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2) + +-/* Debug message categories for the DEBUG socket option */ +-enum { +- L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if +- * compiled in) */ +- L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel +- * interface */ +- L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */ +- L2TP_MSG_DATA = (1 << 3), /* data packets */ +-}; +- + struct sk_buff; + + struct l2tp_stats { +@@ -172,6 +162,10 @@ struct l2tp_tunnel { + + struct rcu_head rcu; + rwlock_t hlist_lock; /* protect session_hlist */ ++ bool acpt_newsess; /* Indicates whether this ++ * tunnel accepts new sessions. ++ * Protected by hlist_lock. ++ */ + struct hlist_head session_hlist[L2TP_HASH_SIZE]; + /* hashed list of sessions, + * hashed by id */ +@@ -207,7 +201,9 @@ struct l2tp_tunnel { + }; + + struct l2tp_nl_cmd_ops { +- int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); ++ int (*session_create)(struct net *net, struct l2tp_tunnel *tunnel, ++ u32 session_id, u32 peer_session_id, ++ struct l2tp_session_cfg *cfg); + int (*session_delete)(struct l2tp_session *session); + }; + +@@ -241,18 +237,18 @@ out: + return tunnel; + } + +-struct l2tp_session *l2tp_session_get(struct net *net, ++struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); ++ ++struct l2tp_session *l2tp_session_get(const struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref); +-struct l2tp_session *l2tp_session_find(struct net *net, +- struct l2tp_tunnel *tunnel, +- u32 session_id); + struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, + bool do_ref); +-struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, ++struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, ++ const char *ifname, + bool do_ref); +-struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); +-struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); ++struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id); ++struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth); + + int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, + u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, +@@ -263,6 +259,9 @@ struct l2tp_session *l2tp_session_create(int priv_size, + struct l2tp_tunnel *tunnel, + u32 session_id, u32 peer_session_id, + struct l2tp_session_cfg *cfg); ++int l2tp_session_register(struct l2tp_session *session, ++ struct l2tp_tunnel *tunnel); ++ + void __l2tp_session_unhash(struct l2tp_session *session); + int l2tp_session_delete(struct l2tp_session *session); + void l2tp_session_free(struct l2tp_session *session); +@@ -281,6 +280,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, + void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); + int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); + ++static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel) ++{ ++ atomic_inc(&tunnel->ref_count); ++} ++ ++static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) ++{ ++ if (atomic_dec_and_test(&tunnel->ref_count)) ++ kfree_rcu(tunnel, rcu); ++} ++ + /* Session reference counts. Incremented when code obtains a reference + * to a session. + */ +diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c +index eecc64e138de..8b8fc2337960 100644 +--- a/net/l2tp/l2tp_eth.c ++++ b/net/l2tp/l2tp_eth.c +@@ -30,6 +30,9 @@ + #include + #include + #include ++#include ++#include ++#include + + #include "l2tp_core.h" + +@@ -41,7 +44,6 @@ struct l2tp_eth { + struct net_device *dev; + struct sock *tunnel_sock; + struct l2tp_session *session; +- struct list_head list; + atomic_long_t tx_bytes; + atomic_long_t tx_packets; + atomic_long_t tx_dropped; +@@ -52,20 +54,9 @@ struct l2tp_eth { + + /* via l2tp_session_priv() */ + struct l2tp_eth_sess { +- struct net_device *dev; ++ struct net_device __rcu *dev; + }; + +-/* per-net private data for this module */ +-static unsigned int l2tp_eth_net_id; +-struct l2tp_eth_net { +- struct list_head l2tp_eth_dev_list; +- spinlock_t l2tp_eth_lock; +-}; +- +-static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net) +-{ +- return net_generic(net, l2tp_eth_net_id); +-} + + static int l2tp_eth_dev_init(struct net_device *dev) + { +@@ -82,12 +73,13 @@ static int l2tp_eth_dev_init(struct net_device *dev) + static void l2tp_eth_dev_uninit(struct net_device *dev) + { + struct l2tp_eth *priv = netdev_priv(dev); +- struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); ++ struct l2tp_eth_sess *spriv; + +- spin_lock(&pn->l2tp_eth_lock); +- list_del_init(&priv->list); +- spin_unlock(&pn->l2tp_eth_lock); +- dev_put(dev); ++ spriv = l2tp_session_priv(priv->session); ++ RCU_INIT_POINTER(spriv->dev, NULL); ++ /* No need for synchronize_net() here. We're called by ++ * unregister_netdev*(), which does the synchronisation for us. ++ */ + } + + static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) +@@ -141,8 +133,8 @@ static void l2tp_eth_dev_setup(struct net_device *dev) + static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) + { + struct l2tp_eth_sess *spriv = l2tp_session_priv(session); +- struct net_device *dev = spriv->dev; +- struct l2tp_eth *priv = netdev_priv(dev); ++ struct net_device *dev; ++ struct l2tp_eth *priv; + + if (session->debug & L2TP_MSG_DATA) { + unsigned int length; +@@ -166,16 +158,25 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, + skb_dst_drop(skb); + nf_reset(skb); + ++ rcu_read_lock(); ++ dev = rcu_dereference(spriv->dev); ++ if (!dev) ++ goto error_rcu; ++ ++ priv = netdev_priv(dev); + if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { + atomic_long_inc(&priv->rx_packets); + atomic_long_add(data_len, &priv->rx_bytes); + } else { + atomic_long_inc(&priv->rx_errors); + } ++ rcu_read_unlock(); ++ + return; + ++error_rcu: ++ rcu_read_unlock(); + error: +- atomic_long_inc(&priv->rx_errors); + kfree_skb(skb); + } + +@@ -186,11 +187,15 @@ static void l2tp_eth_delete(struct l2tp_session *session) + + if (session) { + spriv = l2tp_session_priv(session); +- dev = spriv->dev; ++ ++ rtnl_lock(); ++ dev = rtnl_dereference(spriv->dev); + if (dev) { +- unregister_netdev(dev); +- spriv->dev = NULL; ++ unregister_netdevice(dev); ++ rtnl_unlock(); + module_put(THIS_MODULE); ++ } else { ++ rtnl_unlock(); + } + } + } +@@ -200,35 +205,89 @@ static void l2tp_eth_show(struct seq_file *m, void *arg) + { + struct l2tp_session *session = arg; + struct l2tp_eth_sess *spriv = l2tp_session_priv(session); +- struct net_device *dev = spriv->dev; ++ struct net_device *dev; ++ ++ rcu_read_lock(); ++ dev = rcu_dereference(spriv->dev); ++ if (!dev) { ++ rcu_read_unlock(); ++ return; ++ } ++ dev_hold(dev); ++ rcu_read_unlock(); + + seq_printf(m, " interface %s\n", dev->name); ++ ++ dev_put(dev); + } + #endif + +-static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) ++static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel, ++ struct l2tp_session *session, ++ struct net_device *dev) ++{ ++ unsigned int overhead = 0; ++ struct dst_entry *dst; ++ u32 l3_overhead = 0; ++ ++ /* if the encap is UDP, account for UDP header size */ ++ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { ++ overhead += sizeof(struct udphdr); ++ dev->needed_headroom += sizeof(struct udphdr); ++ } ++ if (session->mtu != 0) { ++ dev->mtu = session->mtu; ++ dev->needed_headroom += session->hdr_len; ++ return; ++ } ++ lock_sock(tunnel->sock); ++ l3_overhead = kernel_sock_ip_overhead(tunnel->sock); ++ release_sock(tunnel->sock); ++ if (l3_overhead == 0) { ++ /* L3 Overhead couldn't be identified, this could be ++ * because tunnel->sock was NULL or the socket's ++ * address family was not IPv4 or IPv6, ++ * dev mtu stays at 1500. ++ */ ++ return; ++ } ++ /* Adjust MTU, factor overhead - underlay L3, overlay L2 hdr ++ * UDP overhead, if any, was already factored in above. ++ */ ++ overhead += session->hdr_len + ETH_HLEN + l3_overhead; ++ ++ /* If PMTU discovery was enabled, use discovered MTU on L2TP device */ ++ dst = sk_dst_get(tunnel->sock); ++ if (dst) { ++ /* dst_mtu will use PMTU if found, else fallback to intf MTU */ ++ u32 pmtu = dst_mtu(dst); ++ ++ if (pmtu != 0) ++ dev->mtu = pmtu; ++ dst_release(dst); ++ } ++ session->mtu = dev->mtu - overhead; ++ dev->mtu = session->mtu; ++ dev->needed_headroom += session->hdr_len; ++} ++ ++static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, ++ u32 session_id, u32 peer_session_id, ++ struct l2tp_session_cfg *cfg) + { + struct net_device *dev; + char name[IFNAMSIZ]; +- struct l2tp_tunnel *tunnel; + struct l2tp_session *session; + struct l2tp_eth *priv; + struct l2tp_eth_sess *spriv; + int rc; +- struct l2tp_eth_net *pn; +- +- tunnel = l2tp_tunnel_find(net, tunnel_id); +- if (!tunnel) { +- rc = -ENODEV; +- goto out; +- } + + if (cfg->ifname) { + dev = dev_get_by_name(net, cfg->ifname); + if (dev) { + dev_put(dev); + rc = -EEXIST; +- goto out; ++ goto err; + } + strlcpy(name, cfg->ifname, IFNAMSIZ); + } else +@@ -238,26 +297,22 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p + peer_session_id, cfg); + if (IS_ERR(session)) { + rc = PTR_ERR(session); +- goto out; ++ goto err; + } + + dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN, + l2tp_eth_dev_setup); + if (!dev) { + rc = -ENOMEM; +- goto out_del_session; ++ goto err_sess; + } + + dev_net_set(dev, net); +- if (session->mtu == 0) +- session->mtu = dev->mtu - session->hdr_len; +- dev->mtu = session->mtu; +- dev->needed_headroom += session->hdr_len; ++ l2tp_eth_adjust_mtu(tunnel, session, dev); + + priv = netdev_priv(dev); + priv->dev = dev; + priv->session = session; +- INIT_LIST_HEAD(&priv->list); + + priv->tunnel_sock = tunnel->sock; + session->recv_skb = l2tp_eth_dev_recv; +@@ -267,48 +322,50 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p + #endif + + spriv = l2tp_session_priv(session); +- spriv->dev = dev; + +- rc = register_netdev(dev); +- if (rc < 0) +- goto out_del_dev; ++ l2tp_session_inc_refcount(session); + +- __module_get(THIS_MODULE); +- /* Must be done after register_netdev() */ +- strlcpy(session->ifname, dev->name, IFNAMSIZ); ++ rtnl_lock(); + +- dev_hold(dev); +- pn = l2tp_eth_pernet(dev_net(dev)); +- spin_lock(&pn->l2tp_eth_lock); +- list_add(&priv->list, &pn->l2tp_eth_dev_list); +- spin_unlock(&pn->l2tp_eth_lock); ++ /* Register both device and session while holding the rtnl lock. This ++ * ensures that l2tp_eth_delete() will see that there's a device to ++ * unregister, even if it happened to run before we assign spriv->dev. ++ */ ++ rc = l2tp_session_register(session, tunnel); ++ if (rc < 0) { ++ rtnl_unlock(); ++ goto err_sess_dev; ++ } + +- return 0; ++ rc = register_netdevice(dev); ++ if (rc < 0) { ++ rtnl_unlock(); ++ l2tp_session_delete(session); ++ l2tp_session_dec_refcount(session); ++ free_netdev(dev); + +-out_del_dev: +- free_netdev(dev); +- spriv->dev = NULL; +-out_del_session: +- l2tp_session_delete(session); +-out: +- return rc; +-} ++ return rc; ++ } + +-static __net_init int l2tp_eth_init_net(struct net *net) +-{ +- struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id); ++ strlcpy(session->ifname, dev->name, IFNAMSIZ); ++ rcu_assign_pointer(spriv->dev, dev); ++ ++ rtnl_unlock(); + +- INIT_LIST_HEAD(&pn->l2tp_eth_dev_list); +- spin_lock_init(&pn->l2tp_eth_lock); ++ l2tp_session_dec_refcount(session); ++ ++ __module_get(THIS_MODULE); + + return 0; +-} + +-static struct pernet_operations l2tp_eth_net_ops = { +- .init = l2tp_eth_init_net, +- .id = &l2tp_eth_net_id, +- .size = sizeof(struct l2tp_eth_net), +-}; ++err_sess_dev: ++ l2tp_session_dec_refcount(session); ++ free_netdev(dev); ++err_sess: ++ kfree(session); ++err: ++ return rc; ++} + + + static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { +@@ -323,25 +380,18 @@ static int __init l2tp_eth_init(void) + + err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); + if (err) +- goto out; +- +- err = register_pernet_device(&l2tp_eth_net_ops); +- if (err) +- goto out_unreg; ++ goto err; + + pr_info("L2TP ethernet pseudowire support (L2TPv3)\n"); + + return 0; + +-out_unreg: +- l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); +-out: ++err: + return err; + } + + static void __exit l2tp_eth_exit(void) + { +- unregister_pernet_device(&l2tp_eth_net_ops); + l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); + } + +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c +index d6fccfdca201..47d7bdff8be8 100644 +--- a/net/l2tp/l2tp_netlink.c ++++ b/net/l2tp/l2tp_netlink.c +@@ -72,10 +72,12 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info, + (info->attrs[L2TP_ATTR_CONN_ID])) { + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); + session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); +- tunnel = l2tp_tunnel_find(net, tunnel_id); +- if (tunnel) ++ tunnel = l2tp_tunnel_get(net, tunnel_id); ++ if (tunnel) { + session = l2tp_session_get(net, tunnel, session_id, + do_ref); ++ l2tp_tunnel_dec_refcount(tunnel); ++ } + } + + return session; +@@ -278,8 +280,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info + } + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); + +- tunnel = l2tp_tunnel_find(net, tunnel_id); +- if (tunnel == NULL) { ++ tunnel = l2tp_tunnel_get(net, tunnel_id); ++ if (!tunnel) { + ret = -ENODEV; + goto out; + } +@@ -289,6 +291,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info + + l2tp_tunnel_delete(tunnel); + ++ l2tp_tunnel_dec_refcount(tunnel); ++ + out: + return ret; + } +@@ -306,8 +310,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info + } + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); + +- tunnel = l2tp_tunnel_find(net, tunnel_id); +- if (tunnel == NULL) { ++ tunnel = l2tp_tunnel_get(net, tunnel_id); ++ if (!tunnel) { + ret = -ENODEV; + goto out; + } +@@ -318,6 +322,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info + ret = l2tp_tunnel_notify(&l2tp_nl_family, info, + tunnel, L2TP_CMD_TUNNEL_MODIFY); + ++ l2tp_tunnel_dec_refcount(tunnel); ++ + out: + return ret; + } +@@ -430,34 +436,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info) + + if (!info->attrs[L2TP_ATTR_CONN_ID]) { + ret = -EINVAL; +- goto out; ++ goto err; + } + + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); + +- tunnel = l2tp_tunnel_find(net, tunnel_id); +- if (tunnel == NULL) { +- ret = -ENODEV; +- goto out; +- } +- + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; +- goto out; ++ goto err; ++ } ++ ++ tunnel = l2tp_tunnel_get(net, tunnel_id); ++ if (!tunnel) { ++ ret = -ENODEV; ++ goto err_nlmsg; + } + + ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, + NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET); + if (ret < 0) +- goto err_out; ++ goto err_nlmsg_tunnel; ++ ++ l2tp_tunnel_dec_refcount(tunnel); + + return genlmsg_unicast(net, msg, info->snd_portid); + +-err_out: ++err_nlmsg_tunnel: ++ l2tp_tunnel_dec_refcount(tunnel); ++err_nlmsg: + nlmsg_free(msg); +- +-out: ++err: + return ret; + } + +@@ -501,8 +510,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf + ret = -EINVAL; + goto out; + } ++ + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); +- tunnel = l2tp_tunnel_find(net, tunnel_id); ++ tunnel = l2tp_tunnel_get(net, tunnel_id); + if (!tunnel) { + ret = -ENODEV; + goto out; +@@ -510,29 +520,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf + + if (!info->attrs[L2TP_ATTR_SESSION_ID]) { + ret = -EINVAL; +- goto out; ++ goto out_tunnel; + } + session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); +- session = l2tp_session_find(net, tunnel, session_id); +- if (session) { +- ret = -EEXIST; +- goto out; +- } + + if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { + ret = -EINVAL; +- goto out; ++ goto out_tunnel; + } + peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); + + if (!info->attrs[L2TP_ATTR_PW_TYPE]) { + ret = -EINVAL; +- goto out; ++ goto out_tunnel; + } + cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); + if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { + ret = -EINVAL; +- goto out; ++ goto out_tunnel; + } + + if (tunnel->version > 2) { +@@ -551,7 +556,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf + u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); + if (len > 8) { + ret = -EINVAL; +- goto out; ++ goto out_tunnel; + } + cfg.cookie_len = len; + memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); +@@ -560,7 +565,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf + u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); + if (len > 8) { + ret = -EINVAL; +- goto out; ++ goto out_tunnel; + } + cfg.peer_cookie_len = len; + memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); +@@ -603,7 +608,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf + if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || + (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { + ret = -EPROTONOSUPPORT; +- goto out; ++ goto out_tunnel; + } + + /* Check that pseudowire-specific params are present */ +@@ -613,7 +618,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf + case L2TP_PWTYPE_ETH_VLAN: + if (!info->attrs[L2TP_ATTR_VLAN_ID]) { + ret = -EINVAL; +- goto out; ++ goto out_tunnel; + } + break; + case L2TP_PWTYPE_ETH: +@@ -627,10 +632,10 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf + break; + } + +- ret = -EPROTONOSUPPORT; +- if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create) +- ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id, +- session_id, peer_session_id, &cfg); ++ ret = l2tp_nl_cmd_ops[cfg.pw_type]->session_create(net, tunnel, ++ session_id, ++ peer_session_id, ++ &cfg); + + if (ret >= 0) { + session = l2tp_session_get(net, tunnel, session_id, false); +@@ -641,6 +646,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf + } + } + ++out_tunnel: ++ l2tp_tunnel_dec_refcount(tunnel); + out: + return ret; + } +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c +index d919b3e6b548..979fa868a4f1 100644 +--- a/net/l2tp/l2tp_ppp.c ++++ b/net/l2tp/l2tp_ppp.c +@@ -122,8 +122,11 @@ + struct pppol2tp_session { + int owner; /* pid that opened the socket */ + +- struct sock *sock; /* Pointer to the session ++ struct mutex sk_lock; /* Protects .sk */ ++ struct sock __rcu *sk; /* Pointer to the session + * PPPoX socket */ ++ struct sock *__sk; /* Copy of .sk, for cleanup */ ++ struct rcu_head rcu; /* For asynchronous release */ + struct sock *tunnel_sock; /* Pointer to the tunnel UDP + * socket */ + int flags; /* accessed by PPPIOCGFLAGS. +@@ -138,6 +141,24 @@ static const struct ppp_channel_ops pppol2tp_chan_ops = { + + static const struct proto_ops pppol2tp_ops; + ++/* Retrieves the pppol2tp socket associated to a session. ++ * A reference is held on the returned socket, so this function must be paired ++ * with sock_put(). ++ */ ++static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session) ++{ ++ struct pppol2tp_session *ps = l2tp_session_priv(session); ++ struct sock *sk; ++ ++ rcu_read_lock(); ++ sk = rcu_dereference(ps->sk); ++ if (sk) ++ sock_hold(sk); ++ rcu_read_unlock(); ++ ++ return sk; ++} ++ + /* Helpers to obtain tunnel/session contexts from sockets. + */ + static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) +@@ -224,21 +245,22 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int + /* If the socket is bound, send it in to PPP's input queue. Otherwise + * queue it on the session socket. + */ +- sk = ps->sock; ++ rcu_read_lock(); ++ sk = rcu_dereference(ps->sk); + if (sk == NULL) + goto no_sock; + + if (sk->sk_state & PPPOX_BOUND) { + struct pppox_sock *po; + +- l2tp_dbg(session, PPPOL2TP_MSG_DATA, ++ l2tp_dbg(session, L2TP_MSG_DATA, + "%s: recv %d byte data frame, passing to ppp\n", + session->name, data_len); + + po = pppox_sk(sk); + ppp_input(&po->chan, skb); + } else { +- l2tp_dbg(session, PPPOL2TP_MSG_DATA, ++ l2tp_dbg(session, L2TP_MSG_DATA, + "%s: recv %d byte data frame, passing to L2TP socket\n", + session->name, data_len); + +@@ -247,30 +269,16 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int + kfree_skb(skb); + } + } ++ rcu_read_unlock(); + + return; + + no_sock: +- l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name); ++ rcu_read_unlock(); ++ l2tp_info(session, L2TP_MSG_DATA, "%s: no socket\n", session->name); + kfree_skb(skb); + } + +-static void pppol2tp_session_sock_hold(struct l2tp_session *session) +-{ +- struct pppol2tp_session *ps = l2tp_session_priv(session); +- +- if (ps->sock) +- sock_hold(ps->sock); +-} +- +-static void pppol2tp_session_sock_put(struct l2tp_session *session) +-{ +- struct pppol2tp_session *ps = l2tp_session_priv(session); +- +- if (ps->sock) +- sock_put(ps->sock); +-} +- + /************************************************************************ + * Transmit handling + ***********************************************************************/ +@@ -431,17 +439,16 @@ abort: + */ + static void pppol2tp_session_close(struct l2tp_session *session) + { +- struct pppol2tp_session *ps = l2tp_session_priv(session); +- struct sock *sk = ps->sock; +- struct socket *sock = sk->sk_socket; ++ struct sock *sk; + + BUG_ON(session->magic != L2TP_SESSION_MAGIC); + +- if (sock) +- inet_shutdown(sock, SEND_SHUTDOWN); +- +- /* Don't let the session go away before our socket does */ +- l2tp_session_inc_refcount(session); ++ sk = pppol2tp_session_get_sock(session); ++ if (sk) { ++ if (sk->sk_socket) ++ inet_shutdown(sk->sk_socket, SEND_SHUTDOWN); ++ sock_put(sk); ++ } + } + + /* Really kill the session socket. (Called from sock_put() if +@@ -461,6 +468,14 @@ static void pppol2tp_session_destruct(struct sock *sk) + } + } + ++static void pppol2tp_put_sk(struct rcu_head *head) ++{ ++ struct pppol2tp_session *ps; ++ ++ ps = container_of(head, typeof(*ps), rcu); ++ sock_put(ps->__sk); ++} ++ + /* Called when the PPPoX socket (session) is closed. + */ + static int pppol2tp_release(struct socket *sock) +@@ -486,11 +501,23 @@ static int pppol2tp_release(struct socket *sock) + + session = pppol2tp_sock_to_session(sk); + +- /* Purge any queued data */ + if (session != NULL) { +- __l2tp_session_unhash(session); +- l2tp_session_queue_purge(session); +- sock_put(sk); ++ struct pppol2tp_session *ps; ++ ++ l2tp_session_delete(session); ++ ++ ps = l2tp_session_priv(session); ++ mutex_lock(&ps->sk_lock); ++ ps->__sk = rcu_dereference_protected(ps->sk, ++ lockdep_is_held(&ps->sk_lock)); ++ RCU_INIT_POINTER(ps->sk, NULL); ++ mutex_unlock(&ps->sk_lock); ++ call_rcu(&ps->rcu, pppol2tp_put_sk); ++ ++ /* Rely on the sock_put() call at the end of the function for ++ * dropping the reference held by pppol2tp_sock_to_session(). ++ * The last reference will be dropped by pppol2tp_put_sk(). ++ */ + } + release_sock(sk); + +@@ -557,16 +584,47 @@ out: + static void pppol2tp_show(struct seq_file *m, void *arg) + { + struct l2tp_session *session = arg; +- struct pppol2tp_session *ps = l2tp_session_priv(session); ++ struct sock *sk; ++ ++ sk = pppol2tp_session_get_sock(session); ++ if (sk) { ++ struct pppox_sock *po = pppox_sk(sk); + +- if (ps) { +- struct pppox_sock *po = pppox_sk(ps->sock); +- if (po) +- seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); ++ seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); ++ sock_put(sk); + } + } + #endif + ++static void pppol2tp_session_init(struct l2tp_session *session) ++{ ++ struct pppol2tp_session *ps; ++ struct dst_entry *dst; ++ ++ session->recv_skb = pppol2tp_recv; ++ session->session_close = pppol2tp_session_close; ++#if IS_ENABLED(CONFIG_L2TP_DEBUGFS) ++ session->show = pppol2tp_show; ++#endif ++ ++ ps = l2tp_session_priv(session); ++ mutex_init(&ps->sk_lock); ++ ps->tunnel_sock = session->tunnel->sock; ++ ps->owner = current->pid; ++ ++ /* If PMTU discovery was enabled, use the MTU that was discovered */ ++ dst = sk_dst_get(session->tunnel->sock); ++ if (dst) { ++ u32 pmtu = dst_mtu(dst); ++ ++ if (pmtu) { ++ session->mtu = pmtu - PPPOL2TP_HEADER_OVERHEAD; ++ session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD; ++ } ++ dst_release(dst); ++ } ++} ++ + /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket + */ + static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, +@@ -578,7 +636,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, + struct l2tp_session *session = NULL; + struct l2tp_tunnel *tunnel; + struct pppol2tp_session *ps; +- struct dst_entry *dst; + struct l2tp_session_cfg cfg = { 0, }; + int error = 0; + u32 tunnel_id, peer_tunnel_id; +@@ -700,13 +757,17 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, + /* Using a pre-existing session is fine as long as it hasn't + * been connected yet. + */ +- if (ps->sock) { ++ mutex_lock(&ps->sk_lock); ++ if (rcu_dereference_protected(ps->sk, ++ lockdep_is_held(&ps->sk_lock))) { ++ mutex_unlock(&ps->sk_lock); + error = -EEXIST; + goto end; + } + + /* consistency checks */ + if (ps->tunnel_sock != tunnel->sock) { ++ mutex_unlock(&ps->sk_lock); + error = -EEXIST; + goto end; + } +@@ -722,35 +783,19 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, + error = PTR_ERR(session); + goto end; + } +- } +- +- /* Associate session with its PPPoL2TP socket */ +- ps = l2tp_session_priv(session); +- ps->owner = current->pid; +- ps->sock = sk; +- ps->tunnel_sock = tunnel->sock; + +- session->recv_skb = pppol2tp_recv; +- session->session_close = pppol2tp_session_close; +-#if IS_ENABLED(CONFIG_L2TP_DEBUGFS) +- session->show = pppol2tp_show; +-#endif +- +- /* We need to know each time a skb is dropped from the reorder +- * queue. +- */ +- session->ref = pppol2tp_session_sock_hold; +- session->deref = pppol2tp_session_sock_put; +- +- /* If PMTU discovery was enabled, use the MTU that was discovered */ +- dst = sk_dst_get(tunnel->sock); +- if (dst != NULL) { +- u32 pmtu = dst_mtu(dst); ++ pppol2tp_session_init(session); ++ ps = l2tp_session_priv(session); ++ l2tp_session_inc_refcount(session); + +- if (pmtu != 0) +- session->mtu = session->mru = pmtu - +- PPPOL2TP_HEADER_OVERHEAD; +- dst_release(dst); ++ mutex_lock(&ps->sk_lock); ++ error = l2tp_session_register(session, tunnel); ++ if (error < 0) { ++ mutex_unlock(&ps->sk_lock); ++ kfree(session); ++ goto end; ++ } ++ drop_refcnt = true; + } + + /* Special case: if source & dest session_id == 0x0000, this +@@ -775,14 +820,25 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, + po->chan.mtu = session->mtu; + + error = ppp_register_net_channel(sock_net(sk), &po->chan); +- if (error) ++ if (error) { ++ mutex_unlock(&ps->sk_lock); + goto end; ++ } + + out_no_ppp: + /* This is how we get the session context from the socket. */ + sk->sk_user_data = session; ++ rcu_assign_pointer(ps->sk, sk); ++ mutex_unlock(&ps->sk_lock); ++ ++ /* Keep the reference we've grabbed on the session: sk doesn't expect ++ * the session to disappear. pppol2tp_session_destruct() is responsible ++ * for dropping it. ++ */ ++ drop_refcnt = false; ++ + sk->sk_state = PPPOX_CONNECTED; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: created\n", + session->name); + + end: +@@ -795,25 +851,19 @@ end: + + #ifdef CONFIG_L2TP_V3 + +-/* Called when creating sessions via the netlink interface. +- */ +-static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) ++/* Called when creating sessions via the netlink interface. */ ++static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel, ++ u32 session_id, u32 peer_session_id, ++ struct l2tp_session_cfg *cfg) + { + int error; +- struct l2tp_tunnel *tunnel; + struct l2tp_session *session; +- struct pppol2tp_session *ps; +- +- tunnel = l2tp_tunnel_find(net, tunnel_id); +- +- /* Error if we can't find the tunnel */ +- error = -ENOENT; +- if (tunnel == NULL) +- goto out; + + /* Error if tunnel socket is not prepped */ +- if (tunnel->sock == NULL) +- goto out; ++ if (!tunnel->sock) { ++ error = -ENOENT; ++ goto err; ++ } + + /* Default MTU values. */ + if (cfg->mtu == 0) +@@ -827,18 +877,20 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i + peer_session_id, cfg); + if (IS_ERR(session)) { + error = PTR_ERR(session); +- goto out; ++ goto err; + } + +- ps = l2tp_session_priv(session); +- ps->tunnel_sock = tunnel->sock; ++ pppol2tp_session_init(session); + +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", +- session->name); ++ error = l2tp_session_register(session, tunnel); ++ if (error < 0) ++ goto err_sess; + +- error = 0; ++ return 0; + +-out: ++err_sess: ++ kfree(session); ++err: + return error; + } + +@@ -995,16 +1047,14 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, + struct l2tp_tunnel *tunnel = session->tunnel; + struct pppol2tp_ioc_stats stats; + +- l2tp_dbg(session, PPPOL2TP_MSG_CONTROL, ++ l2tp_dbg(session, L2TP_MSG_CONTROL, + "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", + session->name, cmd, arg); + +- sk = ps->sock; ++ sk = pppol2tp_session_get_sock(session); + if (!sk) + return -EBADR; + +- sock_hold(sk); +- + switch (cmd) { + case SIOCGIFMTU: + err = -ENXIO; +@@ -1018,7 +1068,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, + if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) + break; + +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mtu=%d\n", + session->name, session->mtu); + err = 0; + break; +@@ -1034,7 +1084,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, + + session->mtu = ifr.ifr_mtu; + +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mtu=%d\n", + session->name, session->mtu); + err = 0; + break; +@@ -1048,7 +1098,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, + if (put_user(session->mru, (int __user *) arg)) + break; + +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mru=%d\n", + session->name, session->mru); + err = 0; + break; +@@ -1063,7 +1113,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, + break; + + session->mru = val; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mru=%d\n", + session->name, session->mru); + err = 0; + break; +@@ -1073,7 +1123,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, + if (put_user(ps->flags, (int __user *) arg)) + break; + +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get flags=%d\n", + session->name, ps->flags); + err = 0; + break; +@@ -1083,7 +1133,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, + if (get_user(val, (int __user *) arg)) + break; + ps->flags = val; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set flags=%d\n", + session->name, ps->flags); + err = 0; + break; +@@ -1100,7 +1150,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, + if (copy_to_user((void __user *) arg, &stats, + sizeof(stats))) + break; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get L2TP stats\n", + session->name); + err = 0; + break; +@@ -1128,7 +1178,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, + struct sock *sk; + struct pppol2tp_ioc_stats stats; + +- l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL, ++ l2tp_dbg(tunnel, L2TP_MSG_CONTROL, + "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", + tunnel->name, cmd, arg); + +@@ -1171,7 +1221,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, + err = -EFAULT; + break; + } +- l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", ++ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get L2TP stats\n", + tunnel->name); + err = 0; + break; +@@ -1261,7 +1311,7 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk, + switch (optname) { + case PPPOL2TP_SO_DEBUG: + tunnel->debug = val; +- l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", ++ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: set debug=%x\n", + tunnel->name, tunnel->debug); + break; + +@@ -1280,7 +1330,6 @@ static int pppol2tp_session_setsockopt(struct sock *sk, + int optname, int val) + { + int err = 0; +- struct pppol2tp_session *ps = l2tp_session_priv(session); + + switch (optname) { + case PPPOL2TP_SO_RECVSEQ: +@@ -1289,7 +1338,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, + break; + } + session->recv_seq = val ? -1 : 0; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, ++ l2tp_info(session, L2TP_MSG_CONTROL, + "%s: set recv_seq=%d\n", + session->name, session->recv_seq); + break; +@@ -1301,13 +1350,13 @@ static int pppol2tp_session_setsockopt(struct sock *sk, + } + session->send_seq = val ? -1 : 0; + { +- struct sock *ssk = ps->sock; +- struct pppox_sock *po = pppox_sk(ssk); ++ struct pppox_sock *po = pppox_sk(sk); ++ + po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : + PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; + } + l2tp_session_set_header_len(session, session->tunnel->version); +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, ++ l2tp_info(session, L2TP_MSG_CONTROL, + "%s: set send_seq=%d\n", + session->name, session->send_seq); + break; +@@ -1318,20 +1367,20 @@ static int pppol2tp_session_setsockopt(struct sock *sk, + break; + } + session->lns_mode = val ? -1 : 0; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, ++ l2tp_info(session, L2TP_MSG_CONTROL, + "%s: set lns_mode=%d\n", + session->name, session->lns_mode); + break; + + case PPPOL2TP_SO_DEBUG: + session->debug = val; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set debug=%x\n", + session->name, session->debug); + break; + + case PPPOL2TP_SO_REORDERTO: + session->reorder_timeout = msecs_to_jiffies(val); +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, ++ l2tp_info(session, L2TP_MSG_CONTROL, + "%s: set reorder_timeout=%d\n", + session->name, session->reorder_timeout); + break; +@@ -1412,7 +1461,7 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk, + switch (optname) { + case PPPOL2TP_SO_DEBUG: + *val = tunnel->debug; +- l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n", ++ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get debug=%x\n", + tunnel->name, tunnel->debug); + break; + +@@ -1435,31 +1484,31 @@ static int pppol2tp_session_getsockopt(struct sock *sk, + switch (optname) { + case PPPOL2TP_SO_RECVSEQ: + *val = session->recv_seq; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, ++ l2tp_info(session, L2TP_MSG_CONTROL, + "%s: get recv_seq=%d\n", session->name, *val); + break; + + case PPPOL2TP_SO_SENDSEQ: + *val = session->send_seq; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, ++ l2tp_info(session, L2TP_MSG_CONTROL, + "%s: get send_seq=%d\n", session->name, *val); + break; + + case PPPOL2TP_SO_LNSMODE: + *val = session->lns_mode; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, ++ l2tp_info(session, L2TP_MSG_CONTROL, + "%s: get lns_mode=%d\n", session->name, *val); + break; + + case PPPOL2TP_SO_DEBUG: + *val = session->debug; +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n", ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get debug=%d\n", + session->name, *val); + break; + + case PPPOL2TP_SO_REORDERTO: + *val = (int) jiffies_to_msecs(session->reorder_timeout); +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, ++ l2tp_info(session, L2TP_MSG_CONTROL, + "%s: get reorder_timeout=%d\n", session->name, *val); + break; + +@@ -1638,8 +1687,9 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) + { + struct l2tp_session *session = v; + struct l2tp_tunnel *tunnel = session->tunnel; +- struct pppol2tp_session *ps = l2tp_session_priv(session); +- struct pppox_sock *po = pppox_sk(ps->sock); ++ unsigned char state; ++ char user_data_ok; ++ struct sock *sk; + u32 ip = 0; + u16 port = 0; + +@@ -1649,6 +1699,15 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) + port = ntohs(inet->inet_sport); + } + ++ sk = pppol2tp_session_get_sock(session); ++ if (sk) { ++ state = sk->sk_state; ++ user_data_ok = (session == sk->sk_user_data) ? 'Y' : 'N'; ++ } else { ++ state = 0; ++ user_data_ok = 'N'; ++ } ++ + seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " + "%04X/%04X %d %c\n", + session->name, ip, port, +@@ -1656,9 +1715,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) + session->session_id, + tunnel->peer_tunnel_id, + session->peer_session_id, +- ps->sock->sk_state, +- (session == ps->sock->sk_user_data) ? +- 'Y' : 'N'); ++ state, user_data_ok); + seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", + session->mtu, session->mru, + session->recv_seq ? 'R' : '-', +@@ -1675,8 +1732,12 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) + atomic_long_read(&session->stats.rx_bytes), + atomic_long_read(&session->stats.rx_errors)); + +- if (po) ++ if (sk) { ++ struct pppox_sock *po = pppox_sk(sk); ++ + seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); ++ sock_put(sk); ++ } + } + + static int pppol2tp_seq_show(struct seq_file *m, void *v) +diff --git a/net/socket.c b/net/socket.c +index 65afc8ec68d4..88abc72df2a6 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -3321,3 +3321,49 @@ int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) + return sock->ops->shutdown(sock, how); + } + EXPORT_SYMBOL(kernel_sock_shutdown); ++ ++/* This routine returns the IP overhead imposed by a socket i.e. ++ * the length of the underlying IP header, depending on whether ++ * this is an IPv4 or IPv6 socket and the length from IP options turned ++ * on at the socket. Assumes that the caller has a lock on the socket. ++ */ ++u32 kernel_sock_ip_overhead(struct sock *sk) ++{ ++ struct inet_sock *inet; ++ struct ip_options_rcu *opt; ++ u32 overhead = 0; ++ bool owned_by_user; ++#if IS_ENABLED(CONFIG_IPV6) ++ struct ipv6_pinfo *np; ++ struct ipv6_txoptions *optv6 = NULL; ++#endif /* IS_ENABLED(CONFIG_IPV6) */ ++ ++ if (!sk) ++ return overhead; ++ ++ owned_by_user = sock_owned_by_user(sk); ++ switch (sk->sk_family) { ++ case AF_INET: ++ inet = inet_sk(sk); ++ overhead += sizeof(struct iphdr); ++ opt = rcu_dereference_protected(inet->inet_opt, ++ owned_by_user); ++ if (opt) ++ overhead += opt->opt.optlen; ++ return overhead; ++#if IS_ENABLED(CONFIG_IPV6) ++ case AF_INET6: ++ np = inet6_sk(sk); ++ overhead += sizeof(struct ipv6hdr); ++ if (np) ++ optv6 = rcu_dereference_protected(np->opt, ++ owned_by_user); ++ if (optv6) ++ overhead += (optv6->opt_flen + optv6->opt_nflen); ++ return overhead; ++#endif /* IS_ENABLED(CONFIG_IPV6) */ ++ default: /* Returns 0 overhead if the socket is not ipv4 or ipv6 */ ++ return overhead; ++ } ++} ++EXPORT_SYMBOL(kernel_sock_ip_overhead); +diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile +index 8b29dc17c73c..2cad963c4fb7 100644 +--- a/scripts/gcc-plugins/Makefile ++++ b/scripts/gcc-plugins/Makefile +@@ -9,6 +9,7 @@ else + HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti + HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb + HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable ++ HOST_EXTRACXXFLAGS += -Wno-format-diag + export HOST_EXTRACXXFLAGS + endif + +diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h +index 08fe09c28bd2..6792915f5174 100644 +--- a/scripts/gcc-plugins/gcc-common.h ++++ b/scripts/gcc-plugins/gcc-common.h +@@ -31,7 +31,9 @@ + #include "ggc.h" + #include "timevar.h" + ++#if BUILDING_GCC_VERSION < 10000 + #include "params.h" ++#endif + + #if BUILDING_GCC_VERSION <= 4009 + #include "pointer-set.h" +@@ -796,6 +798,7 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l + return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT); + } + ++#if BUILDING_GCC_VERSION < 10000 + template <> + template <> + inline bool is_a_helper::test(const_gimple gs) +@@ -809,6 +812,7 @@ inline bool is_a_helper::test(const_gimple gs) + { + return gs->code == GIMPLE_RETURN; + } ++#endif + + static inline gasm *as_a_gasm(gimple stmt) + { +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index c783fefa558a..e034dc21421e 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -90,7 +90,7 @@ static struct shash_desc *init_desc(char type) + algo = evm_hash; + } + +- if (*tfm == NULL) { ++ if (IS_ERR_OR_NULL(*tfm)) { + mutex_lock(&mutex); + if (*tfm) + goto out; +diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c +index 44b44d7e0dbc..853a7d2333b3 100644 +--- a/security/integrity/ima/ima_fs.c ++++ b/security/integrity/ima/ima_fs.c +@@ -331,8 +331,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf, + integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, + "policy_update", "signed policy required", + 1, 0); +- if (ima_appraise & IMA_APPRAISE_ENFORCE) +- result = -EACCES; ++ result = -EACCES; + } else { + result = ima_parse_add_rule(data); + } +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c +index f09ae7efc695..f0052c06d065 100644 +--- a/sound/core/pcm_lib.c ++++ b/sound/core/pcm_lib.c +@@ -456,6 +456,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, + + no_delta_check: + if (runtime->status->hw_ptr == new_hw_ptr) { ++ runtime->hw_ptr_jiffies = curr_jiffies; + update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); + return 0; + } diff --git a/patch/kernel/odroidc4-legacy/patch-4.9.225-226.patch b/patch/kernel/odroidc4-legacy/patch-4.9.225-226.patch new file mode 100644 index 000000000..0e753fd78 --- /dev/null +++ b/patch/kernel/odroidc4-legacy/patch-4.9.225-226.patch @@ -0,0 +1,1518 @@ +diff --git a/Makefile b/Makefile +index d17a2ad3cc4d..b0e1162fddfa 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 225 ++SUBLEVEL = 226 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts +index 78bfc1a307d6..ebc6e10f8624 100644 +--- a/arch/arm/boot/dts/imx6q-b450v3.dts ++++ b/arch/arm/boot/dts/imx6q-b450v3.dts +@@ -65,13 +65,6 @@ + }; + }; + +-&clks { +- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, +- <&clks IMX6QDL_CLK_LDB_DI1_SEL>; +- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, +- <&clks IMX6QDL_CLK_PLL3_USB_OTG>; +-}; +- + &ldb { + status = "okay"; + +diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts +index d85388725426..681aa612e07f 100644 +--- a/arch/arm/boot/dts/imx6q-b650v3.dts ++++ b/arch/arm/boot/dts/imx6q-b650v3.dts +@@ -65,13 +65,6 @@ + }; + }; + +-&clks { +- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, +- <&clks IMX6QDL_CLK_LDB_DI1_SEL>; +- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, +- <&clks IMX6QDL_CLK_PLL3_USB_OTG>; +-}; +- + &ldb { + status = "okay"; + +diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts +index 167f7446722a..8596df4078e9 100644 +--- a/arch/arm/boot/dts/imx6q-b850v3.dts ++++ b/arch/arm/boot/dts/imx6q-b850v3.dts +@@ -53,17 +53,6 @@ + }; + }; + +-&clks { +- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, +- <&clks IMX6QDL_CLK_LDB_DI1_SEL>, +- <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, +- <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>; +- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, +- <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, +- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>, +- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>; +-}; +- + &ldb { + fsl,dual-channel; + status = "okay"; +diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi +index e4a415fd899b..cee0e19f180f 100644 +--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi ++++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi +@@ -92,6 +92,56 @@ + mux-int-port = <1>; + mux-ext-port = <4>; + }; ++ ++ aliases { ++ mdio-gpio0 = &mdio0; ++ }; ++ ++ mdio0: mdio-gpio { ++ compatible = "virtual,mdio-gpio"; ++ gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>, /* mdc */ ++ <&gpio2 7 GPIO_ACTIVE_HIGH>; /* mdio */ ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ switch@0 { ++ compatible = "marvell,mv88e6085"; /* 88e6240*/ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0>; ++ ++ switch_ports: ports { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ mdio { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ switchphy0: switchphy@0 { ++ reg = <0>; ++ }; ++ ++ switchphy1: switchphy@1 { ++ reg = <1>; ++ }; ++ ++ switchphy2: switchphy@2 { ++ reg = <2>; ++ }; ++ ++ switchphy3: switchphy@3 { ++ reg = <3>; ++ }; ++ ++ switchphy4: switchphy@4 { ++ reg = <4>; ++ }; ++ }; ++ }; ++ }; + }; + + &ecspi5 { +@@ -299,3 +349,30 @@ + tcxo-clock-frequency = <26000000>; + }; + }; ++ ++&pcie { ++ /* Synopsys, Inc. Device */ ++ pci_root: root@0,0 { ++ compatible = "pci16c3,abcd"; ++ reg = <0x00000000 0 0 0 0>; ++ ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ }; ++}; ++ ++&clks { ++ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, ++ <&clks IMX6QDL_CLK_LDB_DI1_SEL>, ++ <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, ++ <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>, ++ <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>, ++ <&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>; ++ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, ++ <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>; ++}; +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c +index b9db8e529e4d..dbbe3932f833 100644 +--- a/arch/parisc/mm/init.c ++++ b/arch/parisc/mm/init.c +@@ -604,7 +604,7 @@ void __init mem_init(void) + > BITS_PER_LONG); + + high_memory = __va((max_pfn << PAGE_SHIFT)); +- set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); ++ set_max_mapnr(max_low_pfn); + free_all_bootmem(); + + #ifdef CONFIG_PA11 +diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h +index fe884e18fa6e..c7854a098b6b 100644 +--- a/arch/x86/include/asm/dma.h ++++ b/arch/x86/include/asm/dma.h +@@ -73,7 +73,7 @@ + #define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT) + + /* 4GB broken PCI/AGP hardware bus master zone */ +-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) ++#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) + + #ifdef CONFIG_X86_32 + /* The maximum address that we can perform a DMA transfer to on this platform */ +diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c +index 05d3241ad20b..9d763557a105 100644 +--- a/drivers/gpio/gpio-tegra.c ++++ b/drivers/gpio/gpio-tegra.c +@@ -341,6 +341,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d) + struct tegra_gpio_info *tgi = bank->tgi; + int gpio = d->hwirq; + ++ tegra_gpio_irq_mask(d); + gpiochip_unlock_as_irq(&tgi->gc, gpio); + } + +diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c +index 8ce0f6eef89e..b9d653afff8b 100644 +--- a/drivers/infiniband/hw/qib/qib_sysfs.c ++++ b/drivers/infiniband/hw/qib/qib_sysfs.c +@@ -756,7 +756,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + qib_dev_err(dd, + "Skipping linkcontrol sysfs info, (err %d) port %u\n", + ret, port_num); +- goto bail; ++ goto bail_link; + } + kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); + +@@ -766,7 +766,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + qib_dev_err(dd, + "Skipping sl2vl sysfs info, (err %d) port %u\n", + ret, port_num); +- goto bail_link; ++ goto bail_sl; + } + kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); + +@@ -776,7 +776,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + qib_dev_err(dd, + "Skipping diag_counters sysfs info, (err %d) port %u\n", + ret, port_num); +- goto bail_sl; ++ goto bail_diagc; + } + kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); + +@@ -789,7 +789,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + qib_dev_err(dd, + "Skipping Congestion Control sysfs info, (err %d) port %u\n", + ret, port_num); +- goto bail_diagc; ++ goto bail_cc; + } + + kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); +@@ -871,6 +871,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) + &cc_table_bin_attr); + kobject_put(&ppd->pport_cc_kobj); + } ++ kobject_put(&ppd->diagc_kobj); + kobject_put(&ppd->sl2vl_kobj); + kobject_put(&ppd->pport_kobj); + } +diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c +index e9ae3d500a55..700f018df668 100644 +--- a/drivers/input/evdev.c ++++ b/drivers/input/evdev.c +@@ -342,20 +342,6 @@ static int evdev_fasync(int fd, struct file *file, int on) + return fasync_helper(fd, file, on, &client->fasync); + } + +-static int evdev_flush(struct file *file, fl_owner_t id) +-{ +- struct evdev_client *client = file->private_data; +- struct evdev *evdev = client->evdev; +- +- mutex_lock(&evdev->mutex); +- +- if (evdev->exist && !client->revoked) +- input_flush_device(&evdev->handle, file); +- +- mutex_unlock(&evdev->mutex); +- return 0; +-} +- + static void evdev_free(struct device *dev) + { + struct evdev *evdev = container_of(dev, struct evdev, dev); +@@ -469,6 +455,10 @@ static int evdev_release(struct inode *inode, struct file *file) + unsigned int i; + + mutex_lock(&evdev->mutex); ++ ++ if (evdev->exist && !client->revoked) ++ input_flush_device(&evdev->handle, file); ++ + evdev_ungrab(evdev, client); + mutex_unlock(&evdev->mutex); + +@@ -1331,7 +1321,6 @@ static const struct file_operations evdev_fops = { + .compat_ioctl = evdev_ioctl_compat, + #endif + .fasync = evdev_fasync, +- .flush = evdev_flush, + .llseek = no_llseek, + }; + +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 26476a64e663..54a6691d7d87 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -475,6 +475,16 @@ static const u8 xboxone_fw2015_init[] = { + 0x05, 0x20, 0x00, 0x01, 0x00 + }; + ++/* ++ * This packet is required for Xbox One S (0x045e:0x02ea) ++ * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to ++ * initialize the controller that was previously used in ++ * Bluetooth mode. ++ */ ++static const u8 xboxone_s_init[] = { ++ 0x05, 0x20, 0x00, 0x0f, 0x06 ++}; ++ + /* + * This packet is required for the Titanfall 2 Xbox One pads + * (0x0e6f:0x0165) to finish initialization and for Hori pads +@@ -533,6 +543,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { + XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), + XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), + XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), ++ XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), ++ XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), + XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), +diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c +index 65038dcc7613..677edbf870a7 100644 +--- a/drivers/input/rmi4/rmi_driver.c ++++ b/drivers/input/rmi4/rmi_driver.c +@@ -991,7 +991,8 @@ static int rmi_driver_probe(struct device *dev) + if (data->input) { + rmi_driver_set_input_name(rmi_dev, data->input); + if (!rmi_dev->xport->input) { +- if (input_register_device(data->input)) { ++ retval = input_register_device(data->input); ++ if (retval) { + dev_err(dev, "%s: Failed to register input device.\n", + __func__); + goto err_destroy_functions; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index a4e76084a2af..fd1e79013cf8 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -738,6 +738,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), + }, + }, ++ { ++ /* Lenovo ThinkPad Twist S230u */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c +index 2c41107240de..499402a975b3 100644 +--- a/drivers/input/touchscreen/usbtouchscreen.c ++++ b/drivers/input/touchscreen/usbtouchscreen.c +@@ -197,6 +197,7 @@ static const struct usb_device_id usbtouch_devices[] = { + #endif + + #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH ++ {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, + {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, + {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, + {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES}, +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c +index dbcc13efaf3c..d609e14bb904 100644 +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -195,7 +195,7 @@ struct iommu_group *iommu_group_alloc(void) + NULL, "%d", group->id); + if (ret) { + ida_simple_remove(&iommu_group_ida, group->id); +- kfree(group); ++ kobject_put(&group->kobj); + return ERR_PTR(ret); + } + +diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c +index 641a532b67cb..3f756fa2f603 100644 +--- a/drivers/net/bonding/bond_sysfs_slave.c ++++ b/drivers/net/bonding/bond_sysfs_slave.c +@@ -153,8 +153,10 @@ int bond_sysfs_slave_add(struct slave *slave) + + err = kobject_init_and_add(&slave->kobj, &slave_ktype, + &(slave->dev->dev.kobj), "bonding_slave"); +- if (err) ++ if (err) { ++ kobject_put(&slave->kobj); + return err; ++ } + + for (a = slave_attrs; *a; ++a) { + err = sysfs_create_file(&slave->kobj, &((*a)->attr)); +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +index 551b2a9ebf0f..4a4e86000192 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +@@ -2867,6 +2867,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) + dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); + dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); + } ++ ++ put_device(&pdev->dev); ++ + return 0; + } + EXPORT_SYMBOL(hns_dsaf_roce_reset); +diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c +index 9af0887c8a29..fe9dc1b3078c 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/fw.c ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c +@@ -2704,7 +2704,7 @@ void mlx4_opreq_action(struct work_struct *work) + if (err) { + mlx4_err(dev, "Failed to retrieve required operation: %d\n", + err); +- return; ++ goto out; + } + MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); + MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index bb142a13d9f2..b6113620cb1a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -786,6 +786,7 @@ static void cmd_work_handler(struct work_struct *work) + int alloc_ret; + int cmd_mode; + ++ complete(&ent->handling); + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + down(sem); + if (!ent->page_queue) { +@@ -904,6 +905,12 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) + struct mlx5_cmd *cmd = &dev->cmd; + int err; + ++ if (!wait_for_completion_timeout(&ent->handling, timeout) && ++ cancel_work_sync(&ent->work)) { ++ ent->ret = -ECANCELED; ++ goto out_err; ++ } ++ + if (cmd->mode == CMD_MODE_POLLING) { + wait_for_completion(&ent->done); + } else if (!wait_for_completion_timeout(&ent->done, timeout)) { +@@ -911,12 +918,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + } + ++out_err: + err = ent->ret; + + if (err == -ETIMEDOUT) { + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); ++ } else if (err == -ECANCELED) { ++ mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", ++ mlx5_command_str(msg_to_opcode(ent->in)), ++ msg_to_opcode(ent->in)); + } + mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", + err, deliv_status_to_str(ent->status), ent->status); +@@ -951,6 +963,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + + ent->token = token; + ++ init_completion(&ent->handling); + if (!callback) + init_completion(&ent->done); + +@@ -970,6 +983,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + err = wait_func(dev, ent); + if (err == -ETIMEDOUT) + goto out; ++ if (err == -ECANCELED) ++ goto out_free; + + ds = ent->ts2 - ent->ts1; + op = MLX5_GET(mbox_in, in->first.data, opcode); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index 574311018e6f..f0a6b72497da 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -499,8 +499,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) + static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq) + { + struct mlx5e_tx_wqe_info *wi; ++ u32 nbytes = 0; ++ u16 ci, npkts = 0; + struct sk_buff *skb; +- u16 ci; + int i; + + while (sq->cc != sq->pc) { +@@ -521,8 +522,11 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq) + } + + dev_kfree_skb_any(skb); ++ npkts++; ++ nbytes += wi->num_bytes; + sq->cc += wi->num_wqebbs; + } ++ netdev_tx_completed_queue(sq->txq, npkts, nbytes); + } + + static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq) +diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c +index b14f0305aa31..ad661d1979c7 100644 +--- a/drivers/net/ethernet/microchip/encx24j600.c ++++ b/drivers/net/ethernet/microchip/encx24j600.c +@@ -1058,7 +1058,7 @@ static int encx24j600_spi_probe(struct spi_device *spi) + if (unlikely(ret)) { + netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n", + ret); +- goto out_free; ++ goto out_stop; + } + + eidled = encx24j600_read_reg(priv, EIDLED); +@@ -1076,6 +1076,8 @@ static int encx24j600_spi_probe(struct spi_device *spi) + + out_unregister: + unregister_netdev(priv->ndev); ++out_stop: ++ kthread_stop(priv->kworker_task); + out_free: + free_netdev(ndev); + +@@ -1088,6 +1090,7 @@ static int encx24j600_spi_remove(struct spi_device *spi) + struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev); + + unregister_netdev(priv->ndev); ++ kthread_stop(priv->kworker_task); + + free_netdev(priv->ndev); + +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +index 35c5ac41c0a1..5d2de48b77a0 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +@@ -3610,7 +3610,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) + ahw->diag_cnt = 0; + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + if (ret) +- goto fail_diag_irq; ++ goto fail_mbx_args; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intrpt_id = ahw->intr_tbl[0].id; +@@ -3640,6 +3640,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) + + done: + qlcnic_free_mbx_args(&cmd); ++ ++fail_mbx_args: + qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); + + fail_diag_irq: +diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c +index 062bce9acde6..bfe7b55f9714 100644 +--- a/drivers/net/ethernet/sun/cassini.c ++++ b/drivers/net/ethernet/sun/cassini.c +@@ -4980,7 +4980,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + cas_cacheline_size)) { + dev_err(&pdev->dev, "Could not set PCI cache " + "line size\n"); +- goto err_write_cacheline; ++ goto err_out_free_res; + } + } + #endif +@@ -5151,7 +5151,6 @@ err_out_iounmap: + err_out_free_res: + pci_release_regions(pdev); + +-err_write_cacheline: + /* Try to restore it in case the error occurred after we + * set it. + */ +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c +index a3aaef4c53a3..0d2bcb33697f 100644 +--- a/drivers/s390/scsi/zfcp_fsf.c ++++ b/drivers/s390/scsi/zfcp_fsf.c +@@ -1594,6 +1594,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; + struct zfcp_fsf_req *req; ++ unsigned long req_id = 0; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1616,6 +1617,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); + req->data = wka_port; + ++ req_id = req->req_id; ++ + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); + retval = zfcp_fsf_req_send(req); + if (retval) +@@ -1623,7 +1626,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + out: + spin_unlock_irq(&qdio->req_q_lock); + if (!retval) +- zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); ++ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); + return retval; + } + +@@ -1649,6 +1652,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; + struct zfcp_fsf_req *req; ++ unsigned long req_id = 0; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1671,6 +1675,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + req->data = wka_port; + req->qtcb->header.port_handle = wka_port->handle; + ++ req_id = req->req_id; ++ + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); + retval = zfcp_fsf_req_send(req); + if (retval) +@@ -1678,7 +1684,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + out: + spin_unlock_irq(&qdio->req_q_lock); + if (!retval) +- zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); ++ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); + return retval; + } + +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c +index f80a88d107d7..d8843657d787 100644 +--- a/drivers/tty/serial/sc16is7xx.c ++++ b/drivers/tty/serial/sc16is7xx.c +@@ -1523,10 +1523,12 @@ static int __init sc16is7xx_init(void) + #endif + return ret; + ++#ifdef CONFIG_SERIAL_SC16IS7XX_SPI + err_spi: + #ifdef CONFIG_SERIAL_SC16IS7XX_I2C + i2c_del_driver(&sc16is7xx_i2c_uart_driver); + #endif ++#endif + err_i2c: + uart_unregister_driver(&sc16is7xx_uart); + return ret; +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c +index b8534d3f8bb0..cb02e9ecd8e7 100644 +--- a/drivers/usb/gadget/legacy/inode.c ++++ b/drivers/usb/gadget/legacy/inode.c +@@ -1364,7 +1364,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + + req->buf = dev->rbuf; + req->context = NULL; +- value = -EOPNOTSUPP; + switch (ctrl->bRequest) { + + case USB_REQ_GET_DESCRIPTOR: +@@ -1788,7 +1787,7 @@ static ssize_t + dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + { + struct dev_data *dev = fd->private_data; +- ssize_t value = len, length = len; ++ ssize_t value, length = len; + unsigned total; + u32 tag; + char *kbuf; +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index e78553d51837..73cd7482c1fa 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -1721,7 +1721,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, + (!regset->active || regset->active(t->task, regset) > 0)) { + int ret; + size_t size = regset->n * regset->size; +- void *data = kmalloc(size, GFP_KERNEL); ++ void *data = kzalloc(size, GFP_KERNEL); + if (unlikely(!data)) + return 0; + ret = regset->get(t->task, regset, +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c +index 799b59d96fe2..7dba96d5fef1 100644 +--- a/fs/cachefiles/rdwr.c ++++ b/fs/cachefiles/rdwr.c +@@ -64,9 +64,9 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, + object = container_of(op->op.object, struct cachefiles_object, fscache); + spin_lock(&object->work_lock); + list_add_tail(&monitor->op_link, &op->to_do); ++ fscache_enqueue_retrieval(op); + spin_unlock(&object->work_lock); + +- fscache_enqueue_retrieval(op); + fscache_put_retrieval(op); + return 0; + } +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 09d83275c20b..b2919166855f 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -3293,7 +3293,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) + * than it negotiated since it will refuse the read + * then. + */ +- if ((tcon->ses) && !(tcon->ses->capabilities & ++ if (!(tcon->ses->capabilities & + tcon->ses->server->vals->cap_large_files)) { + current_read_size = min_t(uint, + current_read_size, CIFSMaxBufSize); +diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c +index c2ca9566b764..fb9b1d702351 100644 +--- a/fs/gfs2/quota.c ++++ b/fs/gfs2/quota.c +@@ -1039,8 +1039,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) + u32 x; + int error = 0; + +- if (capable(CAP_SYS_RESOURCE) || +- sdp->sd_args.ar_quota != GFS2_QUOTA_ON) ++ if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) + return 0; + + error = gfs2_quota_hold(ip, uid, gid); +diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h +index 836f29480be6..e3a6e2404d11 100644 +--- a/fs/gfs2/quota.h ++++ b/fs/gfs2/quota.h +@@ -47,7 +47,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, + int ret; + + ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ +- if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) ++ if (capable(CAP_SYS_RESOURCE) || ++ sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) + return 0; + ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); + if (ret) +diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h +index 5d2add1a6c96..864fcfa1df41 100644 +--- a/include/asm-generic/topology.h ++++ b/include/asm-generic/topology.h +@@ -51,7 +51,7 @@ + #ifdef CONFIG_NEED_MULTIPLE_NODES + #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) + #else +- #define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #define cpumask_of_node(node) ((void)(node), cpu_online_mask) + #endif + #endif + #ifndef pcibus_to_node +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h +index 509e99076c57..6094e4a3a0a4 100644 +--- a/include/linux/mlx5/driver.h ++++ b/include/linux/mlx5/driver.h +@@ -656,6 +656,7 @@ struct mlx5_cmd_work_ent { + struct delayed_work cb_timeout_work; + void *context; + int idx; ++ struct completion handling; + struct completion done; + struct mlx5_cmd *cmd; + struct work_struct work; +diff --git a/include/linux/mm.h b/include/linux/mm.h +index ca6f213fa4f0..7a4c035b187f 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -504,6 +504,11 @@ static inline int is_vmalloc_or_module_addr(const void *x) + + extern void kvfree(const void *addr); + ++/* ++ * Mapcount of compound page as a whole, does not include mapped sub-pages. ++ * ++ * Must be called only for compound pages or any their tail sub-pages. ++ */ + static inline int compound_mapcount(struct page *page) + { + VM_BUG_ON_PAGE(!PageCompound(page), page); +@@ -523,10 +528,16 @@ static inline void page_mapcount_reset(struct page *page) + + int __page_mapcount(struct page *page); + ++/* ++ * Mapcount of 0-order page; when compound sub-page, includes ++ * compound_mapcount(). ++ * ++ * Result is undefined for pages which cannot be mapped into userspace. ++ * For example SLAB or special types of pages. See function page_has_type(). ++ * They use this place in struct page differently. ++ */ + static inline int page_mapcount(struct page *page) + { +- VM_BUG_ON_PAGE(PageSlab(page), page); +- + if (unlikely(PageCompound(page))) + return __page_mapcount(page); + return atomic_read(&page->_mapcount) + 1; +diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h +index 2ab2830316b7..aca42a2e79cf 100644 +--- a/include/linux/netfilter/nf_conntrack_pptp.h ++++ b/include/linux/netfilter/nf_conntrack_pptp.h +@@ -4,7 +4,7 @@ + + #include + +-extern const char *const pptp_msg_name[]; ++const char *pptp_msg_name(u_int16_t msg); + + /* state of the control session */ + enum pptp_ctrlsess_state { +diff --git a/include/net/act_api.h b/include/net/act_api.h +index 82f3c912a5b1..051b90779708 100644 +--- a/include/net/act_api.h ++++ b/include/net/act_api.h +@@ -94,7 +94,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm) + { + dtm->install = jiffies_to_clock_t(jiffies - stm->install); + dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse); +- dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse); ++ dtm->firstuse = stm->firstuse ? ++ jiffies_to_clock_t(jiffies - stm->firstuse) : 0; + dtm->expires = jiffies_to_clock_t(stm->expires); + } + +diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h +index f888263fd757..f4205f935331 100644 +--- a/include/rdma/ib_addr.h ++++ b/include/rdma/ib_addr.h +@@ -208,11 +208,13 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, + dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); + if (dev) { + ip4 = in_dev_get(dev); +- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) { ++ if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) + ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address, + (struct in6_addr *)gid); ++ ++ if (ip4) + in_dev_put(ip4); +- } ++ + dev_put(dev); + } + } +diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h +index bb2d62037037..80d85053fb06 100644 +--- a/include/uapi/linux/l2tp.h ++++ b/include/uapi/linux/l2tp.h +@@ -9,9 +9,8 @@ + + #include + #include +-#ifndef __KERNEL__ +-#include +-#endif ++#include ++#include + + #define IPPROTO_L2TP 115 + +@@ -31,7 +30,7 @@ struct sockaddr_l2tpip { + __u32 l2tp_conn_id; /* Connection ID of tunnel */ + + /* Pad to size of `struct sockaddr'. */ +- unsigned char __pad[sizeof(struct sockaddr) - ++ unsigned char __pad[__SOCK_SIZE__ - + sizeof(__kernel_sa_family_t) - + sizeof(__be16) - sizeof(struct in_addr) - + sizeof(__u32)]; +diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c +index 37ddb7bda651..ec7c7eda0774 100644 +--- a/kernel/irq/migration.c ++++ b/kernel/irq/migration.c +@@ -7,17 +7,18 @@ + void irq_move_masked_irq(struct irq_data *idata) + { + struct irq_desc *desc = irq_data_to_desc(idata); +- struct irq_chip *chip = desc->irq_data.chip; ++ struct irq_data *data = &desc->irq_data; ++ struct irq_chip *chip = data->chip; + +- if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) ++ if (likely(!irqd_is_setaffinity_pending(data))) + return; + +- irqd_clr_move_pending(&desc->irq_data); ++ irqd_clr_move_pending(data); + + /* + * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. + */ +- if (irqd_is_per_cpu(&desc->irq_data)) { ++ if (irqd_is_per_cpu(data)) { + WARN_ON(1); + return; + } +@@ -42,9 +43,20 @@ void irq_move_masked_irq(struct irq_data *idata) + * For correct operation this depends on the caller + * masking the irqs. + */ +- if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) +- irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); +- ++ if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { ++ int ret; ++ ++ ret = irq_do_set_affinity(data, desc->pending_mask, false); ++ /* ++ * If the there is a cleanup pending in the underlying ++ * vector management, reschedule the move for the next ++ * interrupt. Leave desc->pending_mask intact. ++ */ ++ if (ret == -EBUSY) { ++ irqd_set_move_pending(data); ++ return; ++ } ++ } + cpumask_clear(desc->pending_mask); + } + +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index c74a087fcb7d..5d11aeceb7f8 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -1499,7 +1499,7 @@ static void __vunmap(const void *addr, int deallocate_pages) + addr)) + return; + +- area = find_vmap_area((unsigned long)addr)->vm; ++ area = find_vm_area(addr); + if (unlikely(!area)) { + WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", + addr); +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c +index de55a3f001dc..02be8ee23271 100644 +--- a/net/ax25/af_ax25.c ++++ b/net/ax25/af_ax25.c +@@ -639,8 +639,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, + break; + + case SO_BINDTODEVICE: +- if (optlen > IFNAMSIZ) +- optlen = IFNAMSIZ; ++ if (optlen > IFNAMSIZ - 1) ++ optlen = IFNAMSIZ - 1; ++ ++ memset(devname, 0, sizeof(devname)); + + if (copy_from_user(devname, optval, optlen)) { + res = -EFAULT; +diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c +index d94aaf7c7685..4b5e1a661317 100644 +--- a/net/bridge/netfilter/nft_reject_bridge.c ++++ b/net/bridge/netfilter/nft_reject_bridge.c +@@ -34,6 +34,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, + ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); + eth->h_proto = eth_hdr(oldskb)->h_proto; + skb_pull(nskb, ETH_HLEN); ++ ++ if (skb_vlan_tag_present(oldskb)) { ++ u16 vid = skb_vlan_tag_get(oldskb); ++ ++ __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); ++ } + } + + static int nft_bridge_iphdr_validate(struct sk_buff *skb) +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c +index 70ccb0716fc5..4fd679b30b19 100644 +--- a/net/ceph/osd_client.c ++++ b/net/ceph/osd_client.c +@@ -2879,7 +2879,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) + * supported. + */ + req->r_t.target_oloc.pool = m.redirect.oloc.pool; +- req->r_flags |= CEPH_OSD_FLAG_REDIRECTED; ++ req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | ++ CEPH_OSD_FLAG_IGNORE_OVERLAY | ++ CEPH_OSD_FLAG_IGNORE_CACHE; + req->r_tid = 0; + __submit_request(req, false); + goto out_unlock_osdc; +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index ead1a32c68f7..e652e376fb30 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -2361,7 +2361,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) + } + + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { +- __dev_notify_flags(dev, old_flags, 0U); ++ __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); + } else { + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; + __dev_notify_flags(dev, old_flags, ~0U); +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c +index 8afb67a48409..dc3b36ca9f30 100644 +--- a/net/ipv4/ip_vti.c ++++ b/net/ipv4/ip_vti.c +@@ -50,7 +50,7 @@ static int vti_net_id __read_mostly; + static int vti_tunnel_init(struct net_device *dev); + + static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, +- int encap_type) ++ int encap_type, bool update_skb_dev) + { + struct ip_tunnel *tunnel; + const struct iphdr *iph = ip_hdr(skb); +@@ -65,6 +65,9 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, + + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; + ++ if (update_skb_dev) ++ skb->dev = tunnel->dev; ++ + return xfrm_input(skb, nexthdr, spi, encap_type); + } + +@@ -74,25 +77,43 @@ drop: + return 0; + } + +-static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi, +- int encap_type) ++static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi, ++ int encap_type) + { +- struct ip_tunnel *tunnel; ++ return vti_input(skb, nexthdr, spi, encap_type, false); ++} ++ ++static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev) ++{ ++ XFRM_SPI_SKB_CB(skb)->family = AF_INET; ++ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); ++ ++ return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev); ++} ++ ++static int vti_rcv_proto(struct sk_buff *skb) ++{ ++ return vti_rcv(skb, 0, false); ++} ++ ++static int vti_rcv_tunnel(struct sk_buff *skb) ++{ ++ struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id); + const struct iphdr *iph = ip_hdr(skb); +- struct net *net = dev_net(skb->dev); +- struct ip_tunnel_net *itn = net_generic(net, vti_net_id); ++ struct ip_tunnel *tunnel; + + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, + iph->saddr, iph->daddr, 0); + if (tunnel) { ++ struct tnl_ptk_info tpi = { ++ .proto = htons(ETH_P_IP), ++ }; ++ + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) + goto drop; +- +- XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; +- +- skb->dev = tunnel->dev; +- +- return xfrm_input(skb, nexthdr, spi, encap_type); ++ if (iptunnel_pull_header(skb, 0, tpi.proto, false)) ++ goto drop; ++ return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false); + } + + return -EINVAL; +@@ -101,22 +122,6 @@ drop: + return 0; + } + +-static int vti_rcv(struct sk_buff *skb) +-{ +- XFRM_SPI_SKB_CB(skb)->family = AF_INET; +- XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); +- +- return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); +-} +- +-static int vti_rcv_ipip(struct sk_buff *skb) +-{ +- XFRM_SPI_SKB_CB(skb)->family = AF_INET; +- XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); +- +- return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0); +-} +- + static int vti_rcv_cb(struct sk_buff *skb, int err) + { + unsigned short family; +@@ -482,31 +487,31 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev) + } + + static struct xfrm4_protocol vti_esp4_protocol __read_mostly = { +- .handler = vti_rcv, +- .input_handler = vti_input, ++ .handler = vti_rcv_proto, ++ .input_handler = vti_input_proto, + .cb_handler = vti_rcv_cb, + .err_handler = vti4_err, + .priority = 100, + }; + + static struct xfrm4_protocol vti_ah4_protocol __read_mostly = { +- .handler = vti_rcv, +- .input_handler = vti_input, ++ .handler = vti_rcv_proto, ++ .input_handler = vti_input_proto, + .cb_handler = vti_rcv_cb, + .err_handler = vti4_err, + .priority = 100, + }; + + static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = { +- .handler = vti_rcv, +- .input_handler = vti_input, ++ .handler = vti_rcv_proto, ++ .input_handler = vti_input_proto, + .cb_handler = vti_rcv_cb, + .err_handler = vti4_err, + .priority = 100, + }; + + static struct xfrm_tunnel ipip_handler __read_mostly = { +- .handler = vti_rcv_ipip, ++ .handler = vti_rcv_tunnel, + .err_handler = vti4_err, + .priority = 0, + }; +diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c +index 56d71a004dce..bd23a2d01b6c 100644 +--- a/net/ipv4/ipip.c ++++ b/net/ipv4/ipip.c +@@ -689,7 +689,7 @@ out: + + rtnl_link_failed: + #if IS_ENABLED(CONFIG_MPLS) +- xfrm4_tunnel_deregister(&mplsip_handler, AF_INET); ++ xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS); + xfrm_tunnel_mplsip_failed: + + #endif +diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c +index b3ca21b2ba9b..ddbf93e70069 100644 +--- a/net/ipv4/netfilter/nf_nat_pptp.c ++++ b/net/ipv4/netfilter/nf_nat_pptp.c +@@ -156,8 +156,7 @@ pptp_outbound_pkt(struct sk_buff *skb, + break; + default: + pr_debug("unknown outbound packet 0x%04x:%s\n", msg, +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : +- pptp_msg_name[0]); ++ pptp_msg_name(msg)); + /* fall through */ + case PPTP_SET_LINK_INFO: + /* only need to NAT in case PAC is behind NAT box */ +@@ -250,9 +249,7 @@ pptp_inbound_pkt(struct sk_buff *skb, + pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); + break; + default: +- pr_debug("unknown inbound packet %s\n", +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : +- pptp_msg_name[0]); ++ pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg)); + /* fall through */ + case PPTP_START_SESSION_REQUEST: + case PPTP_START_SESSION_REPLY: +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 81efd2d3998d..c8c51bd2d695 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -477,18 +477,16 @@ u32 ip_idents_reserve(u32 hash, int segs) + atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; + u32 old = ACCESS_ONCE(*p_tstamp); + u32 now = (u32)jiffies; +- u32 new, delta = 0; ++ u32 delta = 0; + + if (old != now && cmpxchg(p_tstamp, old, now) == old) + delta = prandom_u32_max(now - old); + +- /* Do not use atomic_add_return() as it makes UBSAN unhappy */ +- do { +- old = (u32)atomic_read(p_id); +- new = old + delta + segs; +- } while (atomic_cmpxchg(p_id, old, new) != old); +- +- return new - segs; ++ /* If UBSAN reports an error there, please make sure your compiler ++ * supports -fno-strict-overflow before reporting it that was a bug ++ * in UBSAN, and it has been fixed in GCC-8. ++ */ ++ return atomic_add_return(segs + delta, p_id) - segs; + } + EXPORT_SYMBOL(ip_idents_reserve); + +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c +index f7eaa1051b5b..2fbd100b9e73 100644 +--- a/net/mac80211/mesh_hwmp.c ++++ b/net/mac80211/mesh_hwmp.c +@@ -1082,7 +1082,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) + mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, + target_flags, mpath->dst, mpath->sn, da, 0, + ttl, lifetime, 0, ifmsh->preq_id++, sdata); ++ ++ spin_lock_bh(&mpath->state_lock); ++ if (mpath->flags & MESH_PATH_DELETED) { ++ spin_unlock_bh(&mpath->state_lock); ++ goto enddiscovery; ++ } + mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); ++ spin_unlock_bh(&mpath->state_lock); + + enddiscovery: + rcu_read_unlock(); +diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c +index e82157285d34..f13d40402f7e 100644 +--- a/net/netfilter/ipset/ip_set_list_set.c ++++ b/net/netfilter/ipset/ip_set_list_set.c +@@ -61,7 +61,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, + /* Don't lookup sub-counters at all */ + opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; + if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) +- opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; ++ opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE; + list_for_each_entry_rcu(e, &map->members, list) { + if (SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(e, set))) +diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c +index f60a4755d71e..1b2fa9d8575f 100644 +--- a/net/netfilter/nf_conntrack_pptp.c ++++ b/net/netfilter/nf_conntrack_pptp.c +@@ -71,24 +71,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); + + #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) + /* PptpControlMessageType names */ +-const char *const pptp_msg_name[] = { +- "UNKNOWN_MESSAGE", +- "START_SESSION_REQUEST", +- "START_SESSION_REPLY", +- "STOP_SESSION_REQUEST", +- "STOP_SESSION_REPLY", +- "ECHO_REQUEST", +- "ECHO_REPLY", +- "OUT_CALL_REQUEST", +- "OUT_CALL_REPLY", +- "IN_CALL_REQUEST", +- "IN_CALL_REPLY", +- "IN_CALL_CONNECT", +- "CALL_CLEAR_REQUEST", +- "CALL_DISCONNECT_NOTIFY", +- "WAN_ERROR_NOTIFY", +- "SET_LINK_INFO" ++static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = { ++ [0] = "UNKNOWN_MESSAGE", ++ [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST", ++ [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY", ++ [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST", ++ [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY", ++ [PPTP_ECHO_REQUEST] = "ECHO_REQUEST", ++ [PPTP_ECHO_REPLY] = "ECHO_REPLY", ++ [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST", ++ [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY", ++ [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST", ++ [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY", ++ [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT", ++ [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST", ++ [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY", ++ [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY", ++ [PPTP_SET_LINK_INFO] = "SET_LINK_INFO" + }; ++ ++const char *pptp_msg_name(u_int16_t msg) ++{ ++ if (msg > PPTP_MSG_MAX) ++ return pptp_msg_name_array[0]; ++ ++ return pptp_msg_name_array[msg]; ++} + EXPORT_SYMBOL(pptp_msg_name); + #endif + +@@ -277,7 +285,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; + + msg = ntohs(ctlh->messageType); +- pr_debug("inbound control message %s\n", pptp_msg_name[msg]); ++ pr_debug("inbound control message %s\n", pptp_msg_name(msg)); + + switch (msg) { + case PPTP_START_SESSION_REPLY: +@@ -312,7 +320,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + pcid = pptpReq->ocack.peersCallID; + if (info->pns_call_id != pcid) + goto invalid; +- pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], ++ pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg), + ntohs(cid), ntohs(pcid)); + + if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { +@@ -329,7 +337,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + goto invalid; + + cid = pptpReq->icreq.callID; +- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); ++ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); + info->cstate = PPTP_CALL_IN_REQ; + info->pac_call_id = cid; + break; +@@ -348,7 +356,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + if (info->pns_call_id != pcid) + goto invalid; + +- pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); ++ pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid)); + info->cstate = PPTP_CALL_IN_CONF; + + /* we expect a GRE connection from PAC to PNS */ +@@ -358,7 +366,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + case PPTP_CALL_DISCONNECT_NOTIFY: + /* server confirms disconnect */ + cid = pptpReq->disc.callID; +- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); ++ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); + info->cstate = PPTP_CALL_NONE; + + /* untrack this call id, unexpect GRE packets */ +@@ -385,7 +393,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + invalid: + pr_debug("invalid %s: type=%d cid=%u pcid=%u " + "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], ++ pptp_msg_name(msg), + msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, + ntohs(info->pns_call_id), ntohs(info->pac_call_id)); + return NF_ACCEPT; +@@ -405,7 +413,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, + typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; + + msg = ntohs(ctlh->messageType); +- pr_debug("outbound control message %s\n", pptp_msg_name[msg]); ++ pr_debug("outbound control message %s\n", pptp_msg_name(msg)); + + switch (msg) { + case PPTP_START_SESSION_REQUEST: +@@ -427,7 +435,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, + info->cstate = PPTP_CALL_OUT_REQ; + /* track PNS call id */ + cid = pptpReq->ocreq.callID; +- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); ++ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); + info->pns_call_id = cid; + break; + +@@ -441,7 +449,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, + pcid = pptpReq->icack.peersCallID; + if (info->pac_call_id != pcid) + goto invalid; +- pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], ++ pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg), + ntohs(cid), ntohs(pcid)); + + if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { +@@ -481,7 +489,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, + invalid: + pr_debug("invalid %s: type=%d cid=%u pcid=%u " + "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], ++ pptp_msg_name(msg), + msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, + ntohs(info->pns_call_id), ntohs(info->pac_call_id)); + return NF_ACCEPT; +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c +index 41547c6e496a..a8253079902f 100644 +--- a/net/qrtr/qrtr.c ++++ b/net/qrtr/qrtr.c +@@ -571,7 +571,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb) + } + mutex_unlock(&qrtr_node_lock); + +- qrtr_local_enqueue(node, skb); ++ qrtr_local_enqueue(NULL, skb); + + return 0; + } +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 1a3c75347f48..146b568962e0 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -1793,12 +1793,13 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, + /* Update the content of current association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); +- if (sctp_state(asoc, SHUTDOWN_PENDING) && ++ if ((sctp_state(asoc, SHUTDOWN_PENDING) || ++ sctp_state(asoc, SHUTDOWN_SENT)) && + (sctp_sstate(asoc->base.sk, CLOSING) || + sock_flag(asoc->base.sk, SOCK_DEAD))) { +- /* if were currently in SHUTDOWN_PENDING, but the socket +- * has been closed by user, don't transition to ESTABLISHED. +- * Instead trigger SHUTDOWN bundled with COOKIE_ACK. ++ /* If the socket has been closed by user, don't ++ * transition to ESTABLISHED. Instead trigger SHUTDOWN ++ * bundled with COOKIE_ACK. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c +index 6e3f0254d8a1..1e87639f2c27 100644 +--- a/net/xfrm/xfrm_input.c ++++ b/net/xfrm/xfrm_input.c +@@ -302,7 +302,7 @@ resume: + dev_put(skb->dev); + + spin_lock(&x->lock); +- if (nexthdr <= 0) { ++ if (nexthdr < 0) { + if (nexthdr == -EBADMSG) { + xfrm_audit_state_icvfail(x, skb, + x->type->proto); +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c +index 637387bbaaea..2c4aa7b5ccd5 100644 +--- a/net/xfrm/xfrm_output.c ++++ b/net/xfrm/xfrm_output.c +@@ -240,7 +240,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu) + + if (skb->protocol == htons(ETH_P_IP)) + proto = AF_INET; +- else if (skb->protocol == htons(ETH_P_IPV6)) ++ else if (skb->protocol == htons(ETH_P_IPV6) && ++ skb->sk->sk_family == AF_INET6) + proto = AF_INET6; + else + return; +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 69d061d4ed4f..b00ed36b9aac 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -757,12 +757,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, + static bool xfrm_policy_mark_match(struct xfrm_policy *policy, + struct xfrm_policy *pol) + { +- u32 mark = policy->mark.v & policy->mark.m; +- +- if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) +- return true; +- +- if ((mark & pol->mark.m) == pol->mark.v && ++ if (policy->mark.v == pol->mark.v && + policy->priority == pol->priority) + return true; + +diff --git a/security/commoncap.c b/security/commoncap.c +index 8df676fbd393..b86aca8d6798 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -497,6 +497,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm) + int ret; + kuid_t root_uid; + ++ new->cap_ambient = old->cap_ambient; + if (WARN_ON(!cap_ambient_invariant_ok(old))) + return -EPERM; + +diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c +index 36d2416f90d9..96b737adf4d2 100644 +--- a/sound/core/hwdep.c ++++ b/sound/core/hwdep.c +@@ -228,14 +228,14 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw, + if (copy_from_user(&info, _info, sizeof(info))) + return -EFAULT; + /* check whether the dsp was already loaded */ +- if (hw->dsp_loaded & (1 << info.index)) ++ if (hw->dsp_loaded & (1u << info.index)) + return -EBUSY; + if (!access_ok(VERIFY_READ, info.image, info.length)) + return -EFAULT; + err = hw->ops.dsp_load(hw, &info); + if (err < 0) + return err; +- hw->dsp_loaded |= (1 << info.index); ++ hw->dsp_loaded |= (1u << info.index); + return 0; + } + +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index e2f62362a0b0..024864ce3f76 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -980,6 +980,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, + cval->res = 384; + } + break; ++ case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */ ++ if ((strstr(kctl->id.name, "Playback Volume") != NULL) || ++ strstr(kctl->id.name, "Capture Volume") != NULL) { ++ cval->min >>= 8; ++ cval->max = 0; ++ cval->res = 1; ++ } ++ break; + } + } + diff --git a/patch/kernel/odroidc4-legacy/patch-4.9.226-227.patch b/patch/kernel/odroidc4-legacy/patch-4.9.226-227.patch new file mode 100644 index 000000000..9ba6a9fb3 --- /dev/null +++ b/patch/kernel/odroidc4-legacy/patch-4.9.226-227.patch @@ -0,0 +1,1642 @@ +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index b41046b5713b..a5225df4a070 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -358,6 +358,7 @@ What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass + /sys/devices/system/cpu/vulnerabilities/l1tf + /sys/devices/system/cpu/vulnerabilities/mds ++ /sys/devices/system/cpu/vulnerabilities/srbds + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort + /sys/devices/system/cpu/vulnerabilities/itlb_multihit + Date: January 2018 +diff --git a/Documentation/hw-vuln/index.rst b/Documentation/hw-vuln/index.rst +index 24f53c501366..b5fbc6ae9d5f 100644 +--- a/Documentation/hw-vuln/index.rst ++++ b/Documentation/hw-vuln/index.rst +@@ -12,4 +12,5 @@ are configurable at compile, boot or run time. + l1tf + mds + tsx_async_abort +- multihit.rst ++ multihit ++ special-register-buffer-data-sampling +diff --git a/Documentation/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/hw-vuln/special-register-buffer-data-sampling.rst +new file mode 100644 +index 000000000000..47b1b3afac99 +--- /dev/null ++++ b/Documentation/hw-vuln/special-register-buffer-data-sampling.rst +@@ -0,0 +1,149 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++SRBDS - Special Register Buffer Data Sampling ++============================================= ++ ++SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to ++infer values returned from special register accesses. Special register ++accesses are accesses to off core registers. According to Intel's evaluation, ++the special register reads that have a security expectation of privacy are ++RDRAND, RDSEED and SGX EGETKEY. ++ ++When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved ++to the core through the special register mechanism that is susceptible ++to MDS attacks. ++ ++Affected processors ++-------------------- ++Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may ++be affected. ++ ++A processor is affected by SRBDS if its Family_Model and stepping is ++in the following list, with the exception of the listed processors ++exporting MDS_NO while Intel TSX is available yet not enabled. The ++latter class of processors are only affected when Intel TSX is enabled ++by software using TSX_CTRL_MSR otherwise they are not affected. ++ ++ ============= ============ ======== ++ common name Family_Model Stepping ++ ============= ============ ======== ++ IvyBridge 06_3AH All ++ ++ Haswell 06_3CH All ++ Haswell_L 06_45H All ++ Haswell_G 06_46H All ++ ++ Broadwell_G 06_47H All ++ Broadwell 06_3DH All ++ ++ Skylake_L 06_4EH All ++ Skylake 06_5EH All ++ ++ Kabylake_L 06_8EH <= 0xC ++ Kabylake 06_9EH <= 0xD ++ ============= ============ ======== ++ ++Related CVEs ++------------ ++ ++The following CVE entry is related to this SRBDS issue: ++ ++ ============== ===== ===================================== ++ CVE-2020-0543 SRBDS Special Register Buffer Data Sampling ++ ============== ===== ===================================== ++ ++Attack scenarios ++---------------- ++An unprivileged user can extract values returned from RDRAND and RDSEED ++executed on another core or sibling thread using MDS techniques. ++ ++ ++Mitigation mechanism ++------------------- ++Intel will release microcode updates that modify the RDRAND, RDSEED, and ++EGETKEY instructions to overwrite secret special register data in the shared ++staging buffer before the secret data can be accessed by another logical ++processor. ++ ++During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core ++accesses from other logical processors will be delayed until the special ++register read is complete and the secret data in the shared staging buffer is ++overwritten. ++ ++This has three effects on performance: ++ ++#. RDRAND, RDSEED, or EGETKEY instructions have higher latency. ++ ++#. Executing RDRAND at the same time on multiple logical processors will be ++ serialized, resulting in an overall reduction in the maximum RDRAND ++ bandwidth. ++ ++#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other ++ logical processors that miss their core caches, with an impact similar to ++ legacy locked cache-line-split accesses. ++ ++The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable ++the mitigation for RDRAND and RDSEED instructions executed outside of Intel ++Software Guard Extensions (Intel SGX) enclaves. On logical processors that ++disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not ++take longer to execute and do not impact performance of sibling logical ++processors memory accesses. The opt-out mechanism does not affect Intel SGX ++enclaves (including execution of RDRAND or RDSEED inside an enclave, as well ++as EGETKEY execution). ++ ++IA32_MCU_OPT_CTRL MSR Definition ++-------------------------------- ++Along with the mitigation for this issue, Intel added a new thread-scope ++IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and ++RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL = ++9]==1. This MSR is introduced through the microcode update. ++ ++Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor ++disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX ++enclave on that logical processor. Opting out of the mitigation for a ++particular logical processor does not affect the RDRAND and RDSEED mitigations ++for other logical processors. ++ ++Note that inside of an Intel SGX enclave, the mitigation is applied regardless ++of the value of RNGDS_MITG_DS. ++ ++Mitigation control on the kernel command line ++--------------------------------------------- ++The kernel command line allows control over the SRBDS mitigation at boot time ++with the option "srbds=". The option for this is: ++ ++ ============= ============================================================= ++ off This option disables SRBDS mitigation for RDRAND and RDSEED on ++ affected platforms. ++ ============= ============================================================= ++ ++SRBDS System Information ++----------------------- ++The Linux kernel provides vulnerability status information through sysfs. For ++SRBDS this can be accessed by the following sysfs file: ++/sys/devices/system/cpu/vulnerabilities/srbds ++ ++The possible values contained in this file are: ++ ++ ============================== ============================================= ++ Not affected Processor not vulnerable ++ Vulnerable Processor vulnerable and mitigation disabled ++ Vulnerable: No microcode Processor vulnerable and microcode is missing ++ mitigation ++ Mitigation: Microcode Processor is vulnerable and mitigation is in ++ effect. ++ Mitigation: TSX disabled Processor is only vulnerable when TSX is ++ enabled while this system was booted with TSX ++ disabled. ++ Unknown: Dependent on ++ hypervisor status Running on virtual guest processor that is ++ affected but with no way to know if host ++ processor is mitigated or vulnerable. ++ ============================== ============================================= ++ ++SRBDS Default mitigation ++------------------------ ++This new microcode serializes processor access during execution of RDRAND, ++RDSEED ensures that the shared buffer is overwritten before it is released for ++reuse. Use the "srbds=off" kernel command line to disable the mitigation for ++RDRAND and RDSEED. +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index e05d65d6fcb6..40602517ca52 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -4262,6 +4262,26 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + spia_pedr= + spia_peddr= + ++ srbds= [X86,INTEL] ++ Control the Special Register Buffer Data Sampling ++ (SRBDS) mitigation. ++ ++ Certain CPUs are vulnerable to an MDS-like ++ exploit which can leak bits from the random ++ number generator. ++ ++ By default, this issue is mitigated by ++ microcode. However, the microcode fix can cause ++ the RDRAND and RDSEED instructions to become ++ much slower. Among other effects, this will ++ result in reduced throughput from /dev/urandom. ++ ++ The microcode mitigation can be disabled with ++ the following option: ++ ++ off: Disable mitigation and remove ++ performance impact to RDRAND and RDSEED ++ + ssbd= [ARM64,HW] + Speculative Store Bypass Disable control + +diff --git a/Makefile b/Makefile +index b0e1162fddfa..6c3c6e193621 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 226 ++SUBLEVEL = 227 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c +index 9f96120eee6e..82464fae7772 100644 +--- a/arch/arc/kernel/setup.c ++++ b/arch/arc/kernel/setup.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -333,12 +334,12 @@ static void arc_chk_core_config(void) + if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr) + panic("Linux built with incorrect DCCM Base address\n"); + +- if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz) ++ if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz) + panic("Linux built with incorrect DCCM Size\n"); + #endif + + #ifdef CONFIG_ARC_HAS_ICCM +- if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) ++ if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz) + panic("Linux built with incorrect ICCM Size\n"); + #endif + +diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S +index 802a4ded9a62..e9df35249f9f 100644 +--- a/arch/s390/kernel/mcount.S ++++ b/arch/s390/kernel/mcount.S +@@ -39,6 +39,7 @@ EXPORT_SYMBOL(_mcount) + ENTRY(ftrace_caller) + .globl ftrace_regs_caller + .set ftrace_regs_caller,ftrace_caller ++ stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller + lgr %r1,%r15 + #ifndef CC_USING_HOTPATCH + aghi %r0,MCOUNT_RETURN_FIXUP +diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h +index ff501e511d91..b9473858c6b6 100644 +--- a/arch/x86/include/asm/cpu_device_id.h ++++ b/arch/x86/include/asm/cpu_device_id.h +@@ -8,6 +8,33 @@ + + #include + ++#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins) ++ ++/** ++ * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching ++ * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY ++ * The name is expanded to X86_VENDOR_@_vendor ++ * @_family: The family number or X86_FAMILY_ANY ++ * @_model: The model number, model constant or X86_MODEL_ANY ++ * @_steppings: Bitmask for steppings, stepping constant or X86_STEPPING_ANY ++ * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY ++ * @_data: Driver specific data or NULL. The internal storage ++ * format is unsigned long. The supplied value, pointer ++ * etc. is casted to unsigned long internally. ++ * ++ * Backport version to keep the SRBDS pile consistant. No shorter variants ++ * required for this. ++ */ ++#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \ ++ _steppings, _feature, _data) { \ ++ .vendor = X86_VENDOR_##_vendor, \ ++ .family = _family, \ ++ .model = _model, \ ++ .steppings = _steppings, \ ++ .feature = _feature, \ ++ .driver_data = (unsigned long) _data \ ++} ++ + extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); + + #endif +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index fb457ba8ccc6..2cd5d12a842c 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -316,6 +316,7 @@ + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ ++#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */ + #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ + #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ + #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ +@@ -346,19 +347,20 @@ + */ + #define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ + #endif +-#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ +-#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ +-#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ +-#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +-#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ +-#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ +-#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ +-#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ +-#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ +-#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ +-#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ +-#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ +-#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ +-#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ ++#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ ++#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ ++#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ ++#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ ++#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ ++#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ ++#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ ++#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ ++#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ ++#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ ++#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ ++#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ ++#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ ++#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ ++#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 8d162e0f2881..b12b0a50ad1f 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -103,6 +103,10 @@ + #define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ + #define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ + ++/* SRBDS support */ ++#define MSR_IA32_MCU_OPT_CTRL 0x00000123 ++#define RNGDS_MITG_DIS BIT(0) ++ + #define MSR_IA32_SYSENTER_CS 0x00000174 + #define MSR_IA32_SYSENTER_ESP 0x00000175 + #define MSR_IA32_SYSENTER_EIP 0x00000176 +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index 5736306bdaab..e638e3bc3cb8 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -203,6 +203,7 @@ static inline int pmd_large(pmd_t pte) + } + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE ++/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */ + static inline int pmd_trans_huge(pmd_t pmd) + { + return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 24307d5bb4b8..5ef0a2b34261 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -40,6 +40,7 @@ static void __init l1tf_select_mitigation(void); + static void __init mds_select_mitigation(void); + static void __init mds_print_mitigation(void); + static void __init taa_select_mitigation(void); ++static void __init srbds_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ + u64 x86_spec_ctrl_base; +@@ -107,6 +108,7 @@ void __init check_bugs(void) + l1tf_select_mitigation(); + mds_select_mitigation(); + taa_select_mitigation(); ++ srbds_select_mitigation(); + + /* + * As MDS and TAA mitigations are inter-related, print MDS +@@ -389,6 +391,97 @@ static int __init tsx_async_abort_parse_cmdline(char *str) + } + early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); + ++#undef pr_fmt ++#define pr_fmt(fmt) "SRBDS: " fmt ++ ++enum srbds_mitigations { ++ SRBDS_MITIGATION_OFF, ++ SRBDS_MITIGATION_UCODE_NEEDED, ++ SRBDS_MITIGATION_FULL, ++ SRBDS_MITIGATION_TSX_OFF, ++ SRBDS_MITIGATION_HYPERVISOR, ++}; ++ ++static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; ++ ++static const char * const srbds_strings[] = { ++ [SRBDS_MITIGATION_OFF] = "Vulnerable", ++ [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", ++ [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", ++ [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", ++ [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", ++}; ++ ++static bool srbds_off; ++ ++void update_srbds_msr(void) ++{ ++ u64 mcu_ctrl; ++ ++ if (!boot_cpu_has_bug(X86_BUG_SRBDS)) ++ return; ++ ++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ return; ++ ++ if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) ++ return; ++ ++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); ++ ++ switch (srbds_mitigation) { ++ case SRBDS_MITIGATION_OFF: ++ case SRBDS_MITIGATION_TSX_OFF: ++ mcu_ctrl |= RNGDS_MITG_DIS; ++ break; ++ case SRBDS_MITIGATION_FULL: ++ mcu_ctrl &= ~RNGDS_MITG_DIS; ++ break; ++ default: ++ break; ++ } ++ ++ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); ++} ++ ++static void __init srbds_select_mitigation(void) ++{ ++ u64 ia32_cap; ++ ++ if (!boot_cpu_has_bug(X86_BUG_SRBDS)) ++ return; ++ ++ /* ++ * Check to see if this is one of the MDS_NO systems supporting ++ * TSX that are only exposed to SRBDS when TSX is enabled. ++ */ ++ ia32_cap = x86_read_arch_cap_msr(); ++ if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) ++ srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; ++ else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; ++ else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) ++ srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; ++ else if (cpu_mitigations_off() || srbds_off) ++ srbds_mitigation = SRBDS_MITIGATION_OFF; ++ ++ update_srbds_msr(); ++ pr_info("%s\n", srbds_strings[srbds_mitigation]); ++} ++ ++static int __init srbds_parse_cmdline(char *str) ++{ ++ if (!str) ++ return -EINVAL; ++ ++ if (!boot_cpu_has_bug(X86_BUG_SRBDS)) ++ return 0; ++ ++ srbds_off = !strcmp(str, "off"); ++ return 0; ++} ++early_param("srbds", srbds_parse_cmdline); ++ + #undef pr_fmt + #define pr_fmt(fmt) "Spectre V1 : " fmt + +@@ -1501,6 +1594,11 @@ static char *ibpb_state(void) + return ""; + } + ++static ssize_t srbds_show_state(char *buf) ++{ ++ return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); ++} ++ + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, + char *buf, unsigned int bug) + { +@@ -1542,6 +1640,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr + case X86_BUG_ITLB_MULTIHIT: + return itlb_multihit_show_state(buf); + ++ case X86_BUG_SRBDS: ++ return srbds_show_state(buf); ++ + default: + break; + } +@@ -1588,4 +1689,9 @@ ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr + { + return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); + } ++ ++ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); ++} + #endif +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index f490a4fab2f7..b16b6176738b 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -956,9 +956,30 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + {} + }; + +-static bool __init cpu_matches(unsigned long which) ++#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ ++ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ ++ INTEL_FAM6_##model, steppings, \ ++ X86_FEATURE_ANY, issues) ++ ++#define SRBDS BIT(0) ++ ++static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { ++ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(HASWELL_CORE, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(HASWELL_ULT, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(HASWELL_GT3E, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0xC), SRBDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0xD), SRBDS), ++ {} ++}; ++ ++static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) + { +- const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); ++ const struct x86_cpu_id *m = x86_match_cpu(table); + + return m && !!(m->driver_data & which); + } +@@ -978,29 +999,32 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + u64 ia32_cap = x86_read_arch_cap_msr(); + + /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ +- if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) ++ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && ++ !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) + setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); + +- if (cpu_matches(NO_SPECULATION)) ++ if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) + return; + + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + +- if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && ++ if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && ++ !(ia32_cap & ARCH_CAP_SSB_NO) && + !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); + + if (ia32_cap & ARCH_CAP_IBRS_ALL) + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + +- if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { ++ if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && ++ !(ia32_cap & ARCH_CAP_MDS_NO)) { + setup_force_cpu_bug(X86_BUG_MDS); +- if (cpu_matches(MSBDS_ONLY)) ++ if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) + setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); + } + +- if (!cpu_matches(NO_SWAPGS)) ++ if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) + setup_force_cpu_bug(X86_BUG_SWAPGS); + + /* +@@ -1018,7 +1042,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) + setup_force_cpu_bug(X86_BUG_TAA); + +- if (cpu_matches(NO_MELTDOWN)) ++ /* ++ * SRBDS affects CPUs which support RDRAND or RDSEED and are listed ++ * in the vulnerability blacklist. ++ */ ++ if ((cpu_has(c, X86_FEATURE_RDRAND) || ++ cpu_has(c, X86_FEATURE_RDSEED)) && ++ cpu_matches(cpu_vuln_blacklist, SRBDS)) ++ setup_force_cpu_bug(X86_BUG_SRBDS); ++ ++ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + + /* Rogue Data Cache Load? No! */ +@@ -1027,7 +1060,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + +- if (cpu_matches(NO_L1TF)) ++ if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) + return; + + setup_force_cpu_bug(X86_BUG_L1TF); +@@ -1450,6 +1483,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) + mtrr_ap_init(); + validate_apic_and_package_id(c); + x86_spec_ctrl_setup_ap(); ++ update_srbds_msr(); + } + + struct msr_range { +diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h +index 4350f50b5deb..fdeeab6b158c 100644 +--- a/arch/x86/kernel/cpu/cpu.h ++++ b/arch/x86/kernel/cpu/cpu.h +@@ -66,6 +66,7 @@ extern int detect_extended_topology_early(struct cpuinfo_x86 *c); + extern int detect_ht_early(struct cpuinfo_x86 *c); + + extern void x86_spec_ctrl_setup_ap(void); ++extern void update_srbds_msr(void); + + extern u64 x86_read_arch_cap_msr(void); + +diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c +index e42117d5f4d7..f46ffb3b295f 100644 +--- a/arch/x86/kernel/cpu/match.c ++++ b/arch/x86/kernel/cpu/match.c +@@ -33,13 +33,18 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) + const struct x86_cpu_id *m; + struct cpuinfo_x86 *c = &boot_cpu_data; + +- for (m = match; m->vendor | m->family | m->model | m->feature; m++) { ++ for (m = match; ++ m->vendor | m->family | m->model | m->steppings | m->feature; ++ m++) { + if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor) + continue; + if (m->family != X86_FAMILY_ANY && c->x86 != m->family) + continue; + if (m->model != X86_MODEL_ANY && c->x86_model != m->model) + continue; ++ if (m->steppings != X86_STEPPING_ANY && ++ !(BIT(c->x86_stepping) & m->steppings)) ++ continue; + if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature)) + continue; + return m; +diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c +index bef36622e408..abd4fa587ca4 100644 +--- a/arch/x86/mm/mmio-mod.c ++++ b/arch/x86/mm/mmio-mod.c +@@ -385,7 +385,7 @@ static void enter_uniprocessor(void) + int cpu; + int err; + +- if (downed_cpus == NULL && ++ if (!cpumask_available(downed_cpus) && + !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) { + pr_notice("Failed to allocate mask\n"); + goto out; +@@ -415,7 +415,7 @@ static void leave_uniprocessor(void) + int cpu; + int err; + +- if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0) ++ if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0) + return; + pr_notice("Re-enabling CPUs...\n"); + for_each_cpu(cpu, downed_cpus) { +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index 677c5f36674b..100850398dd3 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -550,6 +550,12 @@ ssize_t __weak cpu_show_itlb_multihit(struct device *dev, + return sprintf(buf, "Not affected\n"); + } + ++ssize_t __weak cpu_show_srbds(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); + static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); +@@ -558,6 +564,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); + static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); + static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); + static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); ++static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -568,6 +575,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_mds.attr, + &dev_attr_tsx_async_abort.attr, + &dev_attr_itlb_multihit.attr, ++ &dev_attr_srbds.attr, + NULL + }; + +diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +index 95052373a828..681ac9bc68b3 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c ++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +@@ -381,6 +381,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { + }, + .driver_data = (void *)&sipodev_desc + }, ++ { ++ .ident = "Schneider SCL142ALM", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"), ++ }, ++ .driver_data = (void *)&sipodev_desc ++ }, + { } /* Terminate list */ + }; + +diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c +index 360b6e98137a..5a3a532937ba 100644 +--- a/drivers/iio/light/vcnl4000.c ++++ b/drivers/iio/light/vcnl4000.c +@@ -61,7 +61,6 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask, + u8 rdy_mask, u8 data_reg, int *val) + { + int tries = 20; +- __be16 buf; + int ret; + + mutex_lock(&data->lock); +@@ -88,13 +87,12 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask, + goto fail; + } + +- ret = i2c_smbus_read_i2c_block_data(data->client, +- data_reg, sizeof(buf), (u8 *) &buf); ++ ret = i2c_smbus_read_word_swapped(data->client, data_reg); + if (ret < 0) + goto fail; + + mutex_unlock(&data->lock); +- *val = be16_to_cpu(buf); ++ *val = ret; + + return 0; + +diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c +index d0435c7631ff..9c938f9892b2 100644 +--- a/drivers/net/can/slcan.c ++++ b/drivers/net/can/slcan.c +@@ -618,10 +618,9 @@ err_free_chan: + sl->tty = NULL; + tty->disc_data = NULL; + clear_bit(SLF_INUSE, &sl->flags); +- slc_free_netdev(sl->dev); + /* do not call free_netdev before rtnl_unlock */ + rtnl_unlock(); +- free_netdev(sl->dev); ++ slc_free_netdev(sl->dev); + return err; + + err_exit: +diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c +index a65d7a60f116..ffa7e7e6d18d 100644 +--- a/drivers/net/ethernet/apple/bmac.c ++++ b/drivers/net/ethernet/apple/bmac.c +@@ -1187,7 +1187,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea) + int i; + unsigned short data; + +- for (i = 0; i < 6; i++) ++ for (i = 0; i < 3; i++) + { + reset_and_select_srom(dev); + data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); +diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c +index 714593023bbc..af922bac19ae 100644 +--- a/drivers/net/ethernet/freescale/ucc_geth.c ++++ b/drivers/net/ethernet/freescale/ucc_geth.c +@@ -45,6 +45,7 @@ + #include + #include + #include ++#include + + #include "ucc_geth.h" + +@@ -1551,11 +1552,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) + + static void ugeth_quiesce(struct ucc_geth_private *ugeth) + { +- /* Prevent any further xmits, plus detach the device. */ +- netif_device_detach(ugeth->ndev); +- +- /* Wait for any current xmits to finish. */ +- netif_tx_disable(ugeth->ndev); ++ /* Prevent any further xmits */ ++ netif_tx_stop_all_queues(ugeth->ndev); + + /* Disable the interrupt to avoid NAPI rescheduling. */ + disable_irq(ugeth->ug_info->uf_info.irq); +@@ -1568,7 +1566,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth) + { + napi_enable(&ugeth->napi); + enable_irq(ugeth->ug_info->uf_info.irq); +- netif_device_attach(ugeth->ndev); ++ ++ /* allow to xmit again */ ++ netif_tx_wake_all_queues(ugeth->ndev); ++ __netdev_watchdog_up(ugeth->ndev); + } + + /* Called every time the controller might need to be made +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c +index 4143659615e1..264136dba674 100644 +--- a/drivers/net/ethernet/smsc/smsc911x.c ++++ b/drivers/net/ethernet/smsc/smsc911x.c +@@ -2506,20 +2506,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev) + + retval = smsc911x_init(dev); + if (retval < 0) +- goto out_disable_resources; ++ goto out_init_fail; + + netif_carrier_off(dev); + + retval = smsc911x_mii_init(pdev, dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); +- goto out_disable_resources; ++ goto out_init_fail; + } + + retval = register_netdev(dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i registering device", retval); +- goto out_disable_resources; ++ goto out_init_fail; + } else { + SMSC_TRACE(pdata, probe, + "Network interface: \"%s\"", dev->name); +@@ -2560,9 +2560,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev) + + return 0; + +-out_disable_resources: ++out_init_fail: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); ++out_disable_resources: + (void)smsc911x_disable_resources(pdev); + out_enable_resources_fail: + smsc911x_free_resources(pdev); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +index 11a4a81b0397..bcc5d1e16ce2 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +@@ -330,6 +330,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) + /* Enable PTP clock */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); + val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); ++ switch (gmac->phy_mode) { ++ case PHY_INTERFACE_MODE_RGMII: ++ val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | ++ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); ++ break; ++ case PHY_INTERFACE_MODE_SGMII: ++ val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | ++ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); ++ break; ++ default: ++ /* We don't get here; the switch above will have errored out */ ++ unreachable(); ++ } + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); + + if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c +index fa8f7c40a384..804c52c35f07 100644 +--- a/drivers/net/ppp/pppoe.c ++++ b/drivers/net/ppp/pppoe.c +@@ -494,6 +494,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, + if (!skb) + goto out; + ++ if (skb->pkt_type != PACKET_HOST) ++ goto abort; ++ + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) + goto abort; + +diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c +index cc841126147e..f870396e05e1 100644 +--- a/drivers/net/slip/slip.c ++++ b/drivers/net/slip/slip.c +@@ -867,7 +867,10 @@ err_free_chan: + sl->tty = NULL; + tty->disc_data = NULL; + clear_bit(SLF_INUSE, &sl->flags); ++ /* do not call free_netdev before rtnl_unlock */ ++ rtnl_unlock(); + sl_free_netdev(sl->dev); ++ return err; + + err_exit: + rtnl_unlock(); +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 5755eec00d7f..9a873616dd27 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -921,6 +921,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ + {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ + {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ +diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c +index a8d470010f5e..ea609dc7f081 100644 +--- a/drivers/net/wireless/cisco/airo.c ++++ b/drivers/net/wireless/cisco/airo.c +@@ -1928,6 +1928,10 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb, + airo_print_err(dev->name, "%s: skb == NULL!",__func__); + return NETDEV_TX_OK; + } ++ if (skb_padto(skb, ETH_ZLEN)) { ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } + npacks = skb_queue_len (&ai->txq); + + if (npacks >= MAXTXQ - 1) { +@@ -2130,6 +2134,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb, + airo_print_err(dev->name, "%s: skb == NULL!", __func__); + return NETDEV_TX_OK; + } ++ if (skb_padto(skb, ETH_ZLEN)) { ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } + + /* Find a vacant FID */ + for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ ); +@@ -2204,6 +2212,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb, + airo_print_err(dev->name, "%s: skb == NULL!", __func__); + return NETDEV_TX_OK; + } ++ if (skb_padto(skb, ETH_ZLEN)) { ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } + + /* Find a vacant FID */ + for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ ); +diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c +index 4a197a32d78c..979fcef1d848 100644 +--- a/drivers/net/wireless/intersil/p54/p54usb.c ++++ b/drivers/net/wireless/intersil/p54/p54usb.c +@@ -64,6 +64,7 @@ static struct usb_device_id p54u_table[] = { + {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ + {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ + {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ ++ {USB_DEVICE(0x124a, 0x4026)}, /* AirVasT USB wireless device */ + {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */ + {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */ + {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */ +diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c +index 798a32bbac5d..e023a679bdea 100644 +--- a/drivers/nfc/st21nfca/dep.c ++++ b/drivers/nfc/st21nfca/dep.c +@@ -184,8 +184,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev, + memcpy(atr_res->gbi, atr_req->gbi, gb_len); + r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, + gb_len); +- if (r < 0) ++ if (r < 0) { ++ kfree_skb(skb); + return r; ++ } + } + + info->dep_info.curr_nfc_dep_pni = 0; +diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c +index b5305f08b184..05b1c4f36b7b 100644 +--- a/drivers/nvmem/qfprom.c ++++ b/drivers/nvmem/qfprom.c +@@ -30,19 +30,6 @@ static int qfprom_reg_read(void *context, + return 0; + } + +-static int qfprom_reg_write(void *context, +- unsigned int reg, void *_val, size_t bytes) +-{ +- void __iomem *base = context; +- u32 *val = _val; +- int i = 0, words = bytes / 4; +- +- while (words--) +- writel(*val++, base + reg + (i++ * 4)); +- +- return 0; +-} +- + static int qfprom_remove(struct platform_device *pdev) + { + struct nvmem_device *nvmem = platform_get_drvdata(pdev); +@@ -56,7 +43,6 @@ static struct nvmem_config econfig = { + .stride = 4, + .word_size = 1, + .reg_read = qfprom_reg_read, +- .reg_write = qfprom_reg_write, + }; + + static int qfprom_probe(struct platform_device *pdev) +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c +index 9d555b63d2e2..d596b76eea64 100644 +--- a/drivers/scsi/scsi_devinfo.c ++++ b/drivers/scsi/scsi_devinfo.c +@@ -394,8 +394,8 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); + + /** + * scsi_dev_info_list_find - find a matching dev_info list entry. +- * @vendor: vendor string +- * @model: model (product) string ++ * @vendor: full vendor string ++ * @model: full model (product) string + * @key: specify list to use + * + * Description: +@@ -410,7 +410,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, + struct scsi_dev_info_list *devinfo; + struct scsi_dev_info_list_table *devinfo_table = + scsi_devinfo_lookup_by_key(key); +- size_t vmax, mmax; ++ size_t vmax, mmax, mlen; + const char *vskip, *mskip; + + if (IS_ERR(devinfo_table)) +@@ -449,15 +449,18 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, + dev_info_list) { + if (devinfo->compatible) { + /* +- * Behave like the older version of get_device_flags. ++ * vendor strings must be an exact match + */ +- if (memcmp(devinfo->vendor, vskip, vmax) || +- (vmax < sizeof(devinfo->vendor) && +- devinfo->vendor[vmax])) ++ if (vmax != strlen(devinfo->vendor) || ++ memcmp(devinfo->vendor, vskip, vmax)) + continue; +- if (memcmp(devinfo->model, mskip, mmax) || +- (mmax < sizeof(devinfo->model) && +- devinfo->model[mmax])) ++ ++ /* ++ * @model specifies the full string, and ++ * must be larger or equal to devinfo->model ++ */ ++ mlen = strlen(devinfo->model); ++ if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) + continue; + return devinfo; + } else { +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 50d15748084a..af4b0a2021d6 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -1512,6 +1512,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) + + err = ufshcd_map_sg(hba, lrbp); + if (err) { ++ ufshcd_release(hba); + lrbp->cmd = NULL; + clear_bit_unlock(tag, &hba->lrb_in_use); + goto out; +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index 16f0def9df82..babf0a337e96 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -305,6 +305,9 @@ static int dw_spi_transfer_one(struct spi_master *master, + dws->len = transfer->len; + spin_unlock_irqrestore(&dws->buf_lock, flags); + ++ /* Ensure dw->rx and dw->rx_end are visible */ ++ smp_mb(); ++ + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed */ +diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h +index b8af9656e6da..f97275b90177 100644 +--- a/drivers/staging/rtl8712/wifi.h ++++ b/drivers/staging/rtl8712/wifi.h +@@ -471,7 +471,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe) + /* block-ack parameters */ + #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 + #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C +-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 ++#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 + #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 + #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 + +@@ -565,13 +565,6 @@ struct ieee80211_ht_addt_info { + #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 + #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 + +-/* block-ack parameters */ +-#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 +-#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C +-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 +-#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 +-#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 +- + /* + * A-PMDU buffer sizes + * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) +diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c +index f8964247c4c3..985f49a65906 100644 +--- a/drivers/tty/hvc/hvc_console.c ++++ b/drivers/tty/hvc/hvc_console.c +@@ -358,15 +358,14 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + * tty fields and return the kref reference. + */ + if (rc) { +- tty_port_tty_set(&hp->port, NULL); +- tty->driver_data = NULL; +- tty_port_put(&hp->port); + printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); +- } else ++ } else { + /* We are ready... raise DTR/RTS */ + if (C_BAUD(tty)) + if (hp->ops->dtr_rts) + hp->ops->dtr_rts(hp, 1); ++ tty_port_set_initialized(&hp->port, true); ++ } + + /* Force wakeup of the polling thread */ + hvc_kick(); +@@ -376,22 +375,12 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + + static void hvc_close(struct tty_struct *tty, struct file * filp) + { +- struct hvc_struct *hp; ++ struct hvc_struct *hp = tty->driver_data; + unsigned long flags; + + if (tty_hung_up_p(filp)) + return; + +- /* +- * No driver_data means that this close was issued after a failed +- * hvc_open by the tty layer's release_dev() function and we can just +- * exit cleanly because the kref reference wasn't made. +- */ +- if (!tty->driver_data) +- return; +- +- hp = tty->driver_data; +- + spin_lock_irqsave(&hp->port.lock, flags); + + if (--hp->port.count == 0) { +@@ -399,6 +388,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) + /* We are done with the tty pointer now. */ + tty_port_tty_set(&hp->port, NULL); + ++ if (!tty_port_initialized(&hp->port)) ++ return; ++ + if (C_HUPCL(tty)) + if (hp->ops->dtr_rts) + hp->ops->dtr_rts(hp, 0); +@@ -415,6 +407,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) + * waking periodically to check chars_in_buffer(). + */ + tty_wait_until_sent(tty, HVC_CLOSE_WAIT); ++ tty_port_set_initialized(&hp->port, false); + } else { + if (hp->port.count < 0) + printk(KERN_ERR "hvc_close %X: oops, count is %d\n", +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c +index b4e7a7317713..d9eba7938917 100644 +--- a/drivers/tty/vt/keyboard.c ++++ b/drivers/tty/vt/keyboard.c +@@ -125,7 +125,11 @@ static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */ + static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ + static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ + static bool dead_key_next; +-static int npadch = -1; /* -1 or number assembled on pad */ ++ ++/* Handles a number being assembled on the number pad */ ++static bool npadch_active; ++static unsigned int npadch_value; ++ + static unsigned int diacr; + static char rep; /* flag telling character repeat */ + +@@ -815,12 +819,12 @@ static void k_shift(struct vc_data *vc, unsigned char value, char up_flag) + shift_state &= ~(1 << value); + + /* kludge */ +- if (up_flag && shift_state != old_state && npadch != -1) { ++ if (up_flag && shift_state != old_state && npadch_active) { + if (kbd->kbdmode == VC_UNICODE) +- to_utf8(vc, npadch); ++ to_utf8(vc, npadch_value); + else +- put_queue(vc, npadch & 0xff); +- npadch = -1; ++ put_queue(vc, npadch_value & 0xff); ++ npadch_active = false; + } + } + +@@ -838,7 +842,7 @@ static void k_meta(struct vc_data *vc, unsigned char value, char up_flag) + + static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) + { +- int base; ++ unsigned int base; + + if (up_flag) + return; +@@ -852,10 +856,12 @@ static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) + base = 16; + } + +- if (npadch == -1) +- npadch = value; +- else +- npadch = npadch * base + value; ++ if (!npadch_active) { ++ npadch_value = 0; ++ npadch_active = true; ++ } ++ ++ npadch_value = npadch_value * base + value; + } + + static void k_lock(struct vc_data *vc, unsigned char value, char up_flag) +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c +index f4bd08cfac11..a631975e050d 100644 +--- a/drivers/usb/gadget/function/f_uac2.c ++++ b/drivers/usb/gadget/function/f_uac2.c +@@ -1069,13 +1069,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) + agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); + if (!agdev->out_ep) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); +- return ret; ++ return -ENODEV; + } + + agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); + if (!agdev->in_ep) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); +- return ret; ++ return -ENODEV; + } + + uac2->p_prm.uac2 = uac2; +diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c +index 534a3f6fa89c..474bb13b7dbb 100644 +--- a/drivers/usb/musb/musb_debugfs.c ++++ b/drivers/usb/musb/musb_debugfs.c +@@ -200,6 +200,11 @@ static ssize_t musb_test_mode_write(struct file *file, + u8 test; + char buf[18]; + ++ memset(buf, 0x00, sizeof(buf)); ++ ++ if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) ++ return -EFAULT; ++ + pm_runtime_get_sync(musb->controller); + test = musb_readb(musb->mregs, MUSB_TESTMODE); + if (test) { +@@ -208,11 +213,6 @@ static ssize_t musb_test_mode_write(struct file *file, + goto ret; + } + +- memset(buf, 0x00, sizeof(buf)); +- +- if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) +- return -EFAULT; +- + if (strstarts(buf, "force host")) + test = MUSB_TEST_FORCE_HOST; + +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 737b6652a0b5..326e7109b8f8 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1146,6 +1146,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1031, 0xff), /* Telit LE910C1-EUX */ ++ .driver_info = NCTRL(0) | RSVD(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */ ++ .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), + .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 06916ddc3159..c59e6d4a8a61 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ ++ {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */ + {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ + {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ + {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ +diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c +index 93c696e2131f..0fbb34fcbddf 100644 +--- a/drivers/usb/serial/usb_wwan.c ++++ b/drivers/usb/serial/usb_wwan.c +@@ -305,6 +305,10 @@ static void usb_wwan_indat_callback(struct urb *urb) + if (status) { + dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n", + __func__, status, endpoint); ++ ++ /* don't resubmit on fatal errors */ ++ if (status == -ESHUTDOWN || status == -ENOENT) ++ return; + } else { + if (urb->actual_length) { + tty_insert_flip_string(&port->port, data, +diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h +index 1abfe37314a0..df841a72b804 100644 +--- a/include/linux/mod_devicetable.h ++++ b/include/linux/mod_devicetable.h +@@ -572,6 +572,10 @@ struct mips_cdmm_device_id { + /* + * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id. + * Although gcc seems to ignore this error, clang fails without this define. ++ * ++ * Note: The ordering of the struct is different from upstream because the ++ * static initializers in kernels < 5.7 still use C89 style while upstream ++ * has been converted to proper C99 initializers. + */ + #define x86cpu_device_id x86_cpu_id + struct x86_cpu_id { +@@ -580,6 +584,7 @@ struct x86_cpu_id { + __u16 model; + __u16 feature; /* bit index */ + kernel_ulong_t driver_data; ++ __u16 steppings; + }; + + #define X86_FEATURE_MATCH(x) \ +@@ -588,6 +593,7 @@ struct x86_cpu_id { + #define X86_VENDOR_ANY 0xffff + #define X86_FAMILY_ANY 0 + #define X86_MODEL_ANY 0 ++#define X86_STEPPING_ANY 0 + #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ + + /* +diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h +index 7e385b83b9d8..fe4b6b69d79a 100644 +--- a/include/uapi/linux/mmc/ioctl.h ++++ b/include/uapi/linux/mmc/ioctl.h +@@ -2,6 +2,7 @@ + #define LINUX_MMC_IOCTL_H + + #include ++#include + + struct mmc_ioc_cmd { + /* Implies direction of data. true = write, false = read */ +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c +index 8ddd29476c0d..1fcaa174ed32 100644 +--- a/kernel/events/uprobes.c ++++ b/kernel/events/uprobes.c +@@ -604,10 +604,6 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, + if (ret) + goto out; + +- /* uprobe_write_opcode() assumes we don't cross page boundary */ +- BUG_ON((uprobe->offset & ~PAGE_MASK) + +- UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); +- + smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ + set_bit(UPROBE_COPY_INSN, &uprobe->flags); + +@@ -886,6 +882,13 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * + if (offset > i_size_read(inode)) + return -EINVAL; + ++ /* ++ * This ensures that copy_from_page() and copy_to_page() ++ * can't cross page boundary. ++ */ ++ if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) ++ return -EINVAL; ++ + retry: + uprobe = alloc_uprobe(inode, offset); + if (!uprobe) +@@ -1696,6 +1699,9 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) + uprobe_opcode_t opcode; + int result; + ++ if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) ++ return -EINVAL; ++ + pagefault_disable(); + result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); + pagefault_enable(); +diff --git a/kernel/relay.c b/kernel/relay.c +index 91e8fbf8aff3..5034cb3a339f 100644 +--- a/kernel/relay.c ++++ b/kernel/relay.c +@@ -578,6 +578,11 @@ struct rchan *relay_open(const char *base_filename, + return NULL; + + chan->buf = alloc_percpu(struct rchan_buf *); ++ if (!chan->buf) { ++ kfree(chan); ++ return NULL; ++ } ++ + chan->version = RELAYFS_CHANNEL_VERSION; + chan->n_subbufs = n_subbufs; + chan->subbuf_size = subbuf_size; +diff --git a/mm/mremap.c b/mm/mremap.c +index 9e6035969d7b..b5d8d25173c6 100644 +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -212,7 +212,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, + new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); + if (!new_pmd) + break; +- if (pmd_trans_huge(*old_pmd)) { ++ if (pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { + if (extent == HPAGE_PMD_SIZE) { + bool moved; + /* See comment in move_ptes() */ +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c +index af3363f4543f..6f3c52943186 100644 +--- a/net/ipv4/devinet.c ++++ b/net/ipv4/devinet.c +@@ -262,6 +262,7 @@ static struct in_device *inetdev_init(struct net_device *dev) + err = devinet_sysctl_register(in_dev); + if (err) { + in_dev->dead = 1; ++ neigh_parms_release(&arp_tbl, in_dev->arp_parms); + in_dev_put(in_dev); + in_dev = NULL; + goto out; +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c +index 6a924be66e37..da158a3acac4 100644 +--- a/net/ipv6/esp6.c ++++ b/net/ipv6/esp6.c +@@ -426,8 +426,10 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) + + sg_init_table(sg, nfrags); + ret = skb_to_sgvec(skb, sg, 0, skb->len); +- if (unlikely(ret < 0)) ++ if (unlikely(ret < 0)) { ++ kfree(tmp); + goto out; ++ } + + aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); + aead_request_set_ad(req, assoclen); +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index 36c7f616294a..fdc1de1cb4fa 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -1568,6 +1568,8 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 + tunnel_id, fd); + goto err; + } ++ if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) ++ goto err; + switch (encap) { + case L2TP_ENCAPTYPE_UDP: + if (sk->sk_protocol != IPPROTO_UDP) { +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c +index 4a88c4eb2301..3817c3554641 100644 +--- a/net/l2tp/l2tp_ip.c ++++ b/net/l2tp/l2tp_ip.c +@@ -24,7 +24,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -208,15 +207,31 @@ discard: + return 0; + } + +-static int l2tp_ip_open(struct sock *sk) ++static int l2tp_ip_hash(struct sock *sk) + { +- /* Prevent autobind. We don't have ports. */ +- inet_sk(sk)->inet_num = IPPROTO_L2TP; ++ if (sk_unhashed(sk)) { ++ write_lock_bh(&l2tp_ip_lock); ++ sk_add_node(sk, &l2tp_ip_table); ++ write_unlock_bh(&l2tp_ip_lock); ++ } ++ return 0; ++} + ++static void l2tp_ip_unhash(struct sock *sk) ++{ ++ if (sk_unhashed(sk)) ++ return; + write_lock_bh(&l2tp_ip_lock); +- sk_add_node(sk, &l2tp_ip_table); ++ sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip_lock); ++} ++ ++static int l2tp_ip_open(struct sock *sk) ++{ ++ /* Prevent autobind. We don't have ports. */ ++ inet_sk(sk)->inet_num = IPPROTO_L2TP; + ++ l2tp_ip_hash(sk); + return 0; + } + +@@ -598,8 +613,8 @@ static struct proto l2tp_ip_prot = { + .sendmsg = l2tp_ip_sendmsg, + .recvmsg = l2tp_ip_recvmsg, + .backlog_rcv = l2tp_ip_backlog_recv, +- .hash = inet_hash, +- .unhash = inet_unhash, ++ .hash = l2tp_ip_hash, ++ .unhash = l2tp_ip_unhash, + .obj_size = sizeof(struct l2tp_ip_sock), + #ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ip_setsockopt, +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c +index 28274f397c55..76ef758db112 100644 +--- a/net/l2tp/l2tp_ip6.c ++++ b/net/l2tp/l2tp_ip6.c +@@ -24,8 +24,6 @@ + #include + #include + #include +-#include +-#include + #include + #include + #include +@@ -221,15 +219,31 @@ discard: + return 0; + } + +-static int l2tp_ip6_open(struct sock *sk) ++static int l2tp_ip6_hash(struct sock *sk) + { +- /* Prevent autobind. We don't have ports. */ +- inet_sk(sk)->inet_num = IPPROTO_L2TP; ++ if (sk_unhashed(sk)) { ++ write_lock_bh(&l2tp_ip6_lock); ++ sk_add_node(sk, &l2tp_ip6_table); ++ write_unlock_bh(&l2tp_ip6_lock); ++ } ++ return 0; ++} + ++static void l2tp_ip6_unhash(struct sock *sk) ++{ ++ if (sk_unhashed(sk)) ++ return; + write_lock_bh(&l2tp_ip6_lock); +- sk_add_node(sk, &l2tp_ip6_table); ++ sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip6_lock); ++} ++ ++static int l2tp_ip6_open(struct sock *sk) ++{ ++ /* Prevent autobind. We don't have ports. */ ++ inet_sk(sk)->inet_num = IPPROTO_L2TP; + ++ l2tp_ip6_hash(sk); + return 0; + } + +@@ -732,8 +746,8 @@ static struct proto l2tp_ip6_prot = { + .sendmsg = l2tp_ip6_sendmsg, + .recvmsg = l2tp_ip6_recvmsg, + .backlog_rcv = l2tp_ip6_backlog_recv, +- .hash = inet6_hash, +- .unhash = inet_unhash, ++ .hash = l2tp_ip6_hash, ++ .unhash = l2tp_ip6_unhash, + .obj_size = sizeof(struct l2tp_ip6_sock), + #ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ipv6_setsockopt, +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index 18f377306884..d6473b8d9a81 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -1296,7 +1296,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) + /* Wait for children sockets to appear; these are the new sockets + * created upon connection establishment. + */ +- timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); ++ timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK); + prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); + + while ((connected = vsock_dequeue_accept(listener)) == NULL && diff --git a/patch/kernel/odroidc4-legacy/patch-4.9.227-228.patch b/patch/kernel/odroidc4-legacy/patch-4.9.227-228.patch new file mode 100644 index 000000000..9d94f310d --- /dev/null +++ b/patch/kernel/odroidc4-legacy/patch-4.9.227-228.patch @@ -0,0 +1,4265 @@ +diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +index b6a7e7397b8b..b944fe067188 100644 +--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt ++++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +@@ -16,6 +16,9 @@ Required properties: + Documentation/devicetree/bindings/graph.txt. This port should be connected + to the input port of an attached HDMI or LVDS encoder chip. + ++Optional properties: ++- pinctrl-names: Contain "default" and "sleep". ++ + Example: + + dpi0: dpi@1401d000 { +@@ -26,6 +29,9 @@ dpi0: dpi@1401d000 { + <&mmsys CLK_MM_DPI_ENGINE>, + <&apmixedsys CLK_APMIXED_TVDPLL>; + clock-names = "pixel", "engine", "pll"; ++ pinctrl-names = "default", "sleep"; ++ pinctrl-0 = <&dpi_pin_func>; ++ pinctrl-1 = <&dpi_pin_idle>; + + port { + dpi0_out: endpoint { +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt +index d1908e50b506..b8f5bf2a890a 100644 +--- a/Documentation/virtual/kvm/api.txt ++++ b/Documentation/virtual/kvm/api.txt +@@ -3534,9 +3534,11 @@ EOI was received. + #define KVM_EXIT_HYPERV_SYNIC 1 + #define KVM_EXIT_HYPERV_HCALL 2 + __u32 type; ++ __u32 pad1; + union { + struct { + __u32 msr; ++ __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; +diff --git a/Makefile b/Makefile +index 6c3c6e193621..af23d7b67442 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 227 ++SUBLEVEL = 228 + EXTRAVERSION = + NAME = Roaring Lionus + +@@ -313,12 +313,8 @@ KBUILD_MODULES := + KBUILD_BUILTIN := 1 + + # If we have only "make modules", don't compile built-in objects. +-# When we're building modules with modversions, we need to consider +-# the built-in objects during the descend as well, in order to +-# make sure the checksums are up to date before we record them. +- + ifeq ($(MAKECMDGOALS),modules) +- KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) ++ KBUILD_BUILTIN := + endif + + # If we have "make modules", compile modules +@@ -1237,6 +1233,13 @@ ifdef CONFIG_MODULES + + all: modules + ++# When we're building modules with modversions, we need to consider ++# the built-in objects during the descend as well, in order to ++# make sure the checksums are up to date before we record them. ++ifdef CONFIG_MODVERSIONS ++ KBUILD_BUILTIN := 1 ++endif ++ + # Build modules + # + # A module can be listed more than once in obj-m resulting in +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c +index ae738a6319f6..364985c96a92 100644 +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -227,8 +227,8 @@ static struct undef_hook arm_break_hook = { + }; + + static struct undef_hook thumb_break_hook = { +- .instr_mask = 0xffff, +- .instr_val = 0xde01, ++ .instr_mask = 0xffffffff, ++ .instr_val = 0x0000de01, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = PSR_T_BIT, + .fn = break_trap, +diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c +index e01cbca196b5..a67fcf7a5643 100644 +--- a/arch/arm/mach-tegra/tegra.c ++++ b/arch/arm/mach-tegra/tegra.c +@@ -137,8 +137,8 @@ static const char * const tegra_dt_board_compat[] = { + }; + + DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") +- .l2c_aux_val = 0x3c400001, +- .l2c_aux_mask = 0xc20fc3fe, ++ .l2c_aux_val = 0x3c400000, ++ .l2c_aux_mask = 0xc20fc3ff, + .smp = smp_ops(tegra_smp_ops), + .map_io = tegra_map_common_io, + .init_early = tegra_init_early, +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S +index f8bb65032b79..796e8f675a93 100644 +--- a/arch/arm/mm/proc-macros.S ++++ b/arch/arm/mm/proc-macros.S +@@ -4,6 +4,7 @@ + * VMA_VM_FLAGS + * VM_EXEC + */ ++#include + #include + #include + +@@ -34,7 +35,7 @@ + * act_mm - get current->active_mm + */ + .macro act_mm, rd +- bic \rd, sp, #8128 ++ bic \rd, sp, #(THREAD_SIZE - 1) & ~63 + bic \rd, \rd, #63 + ldr \rd, [\rd, #TI_TASK] + ldr \rd, [\rd, #TSK_ACTIVE_MM] +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h +index 4cdfbd01b2de..367ebb48170b 100644 +--- a/arch/arm64/include/asm/kvm_host.h ++++ b/arch/arm64/include/asm/kvm_host.h +@@ -290,8 +290,10 @@ struct kvm_vcpu_arch { + * CP14 and CP15 live in the same array, as they are backed by the + * same system registers. + */ +-#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) +-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) ++#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ++ ++#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) ++#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) + + #ifdef CONFIG_CPU_BIG_ENDIAN + #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r)) +diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h +index 53c632c85b03..dff6db19ae4d 100644 +--- a/arch/m68k/include/asm/mac_via.h ++++ b/arch/m68k/include/asm/mac_via.h +@@ -256,6 +256,7 @@ extern int rbv_present,via_alt_mapping; + + struct irq_desc; + ++extern void via_l2_flush(int writeback); + extern void via_register_interrupts(void); + extern void via_irq_enable(int); + extern void via_irq_disable(int); +diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c +index e46895316eb0..dcf18e1ca0bb 100644 +--- a/arch/m68k/mac/config.c ++++ b/arch/m68k/mac/config.c +@@ -61,7 +61,6 @@ extern void iop_preinit(void); + extern void iop_init(void); + extern void via_init(void); + extern void via_init_clock(irq_handler_t func); +-extern void via_flush_cache(void); + extern void oss_init(void); + extern void psc_init(void); + extern void baboon_init(void); +@@ -132,21 +131,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record) + return unknown; + } + +-/* +- * Flip into 24bit mode for an instant - flushes the L2 cache card. We +- * have to disable interrupts for this. Our IRQ handlers will crap +- * themselves if they take an IRQ in 24bit mode! +- */ +- +-static void mac_cache_card_flush(int writeback) +-{ +- unsigned long flags; +- +- local_irq_save(flags); +- via_flush_cache(); +- local_irq_restore(flags); +-} +- + void __init config_mac(void) + { + if (!MACH_IS_MAC) +@@ -179,9 +163,8 @@ void __init config_mac(void) + * not. + */ + +- if (macintosh_config->ident == MAC_MODEL_IICI +- || macintosh_config->ident == MAC_MODEL_IIFX) +- mach_l2_flush = mac_cache_card_flush; ++ if (macintosh_config->ident == MAC_MODEL_IICI) ++ mach_l2_flush = via_l2_flush; + } + + +diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c +index a435aced6e43..35382c1b563f 100644 +--- a/arch/m68k/mac/via.c ++++ b/arch/m68k/mac/via.c +@@ -299,10 +299,14 @@ void via_debug_dump(void) + * the system into 24-bit mode for an instant. + */ + +-void via_flush_cache(void) ++void via_l2_flush(int writeback) + { ++ unsigned long flags; ++ ++ local_irq_save(flags); + via2[gBufB] &= ~VIA2B_vMode32; + via2[gBufB] |= VIA2B_vMode32; ++ local_irq_restore(flags); + } + + /* +diff --git a/arch/mips/Makefile b/arch/mips/Makefile +index 1a6bac7b076f..25f3bfef9b39 100644 +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -256,12 +256,23 @@ ifdef CONFIG_64BIT + endif + endif + ++# When linking a 32-bit executable the LLVM linker cannot cope with a ++# 32-bit load address that has been sign-extended to 64 bits. Simply ++# remove the upper 32 bits then, as it is safe to do so with other ++# linkers. ++ifdef CONFIG_64BIT ++ load-ld = $(load-y) ++else ++ load-ld = $(subst 0xffffffff,0x,$(load-y)) ++endif ++ + KBUILD_AFLAGS += $(cflags-y) + KBUILD_CFLAGS += $(cflags-y) +-KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) ++KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld) + KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) + + bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ ++ LINKER_LOAD_ADDRESS=$(load-ld) \ + VMLINUX_ENTRY_ADDRESS=$(entry-y) \ + PLATFORM="$(platform-y)" + ifdef CONFIG_32BIT +diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile +index 2f77e250b91d..0fa91c981658 100644 +--- a/arch/mips/boot/compressed/Makefile ++++ b/arch/mips/boot/compressed/Makefile +@@ -87,7 +87,7 @@ ifneq ($(zload-y),) + VMLINUZ_LOAD_ADDRESS := $(zload-y) + else + VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ +- $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) ++ $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS)) + endif + + vmlinuzobjs-y += $(obj)/piggy.o +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h +index bebec370324f..22573b4f25b6 100644 +--- a/arch/mips/include/asm/kvm_host.h ++++ b/arch/mips/include/asm/kvm_host.h +@@ -243,8 +243,12 @@ enum emulation_result { + #define MIPS3_PG_SHIFT 6 + #define MIPS3_PG_FRAME 0x3fffffc0 + ++#if defined(CONFIG_64BIT) ++#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) ++#else + #define VPN2_MASK 0xffffe000 +-#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID ++#endif ++#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) + #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) + #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) + #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) +diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S +index ae810da4d499..59ed811eb32a 100644 +--- a/arch/mips/kernel/genex.S ++++ b/arch/mips/kernel/genex.S +@@ -429,20 +429,20 @@ NESTED(nmi_handler, PT_SIZE, sp) + .endm + + .macro __build_clear_fpe ++ CLI ++ TRACE_IRQS_OFF + .set push + /* gas fails to assemble cfc1 for some archs (octeon).*/ \ + .set mips1 + SET_HARDFLOAT + cfc1 a1, fcr31 + .set pop +- CLI +- TRACE_IRQS_OFF + .endm + + .macro __build_clear_msa_fpe +- _cfcmsa a1, MSA_CSR + CLI + TRACE_IRQS_OFF ++ _cfcmsa a1, MSA_CSR + .endm + + .macro __build_clear_ade +diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c +index 60177a612cb1..df65516778a2 100644 +--- a/arch/mips/kernel/mips-cm.c ++++ b/arch/mips/kernel/mips-cm.c +@@ -123,9 +123,9 @@ static char *cm2_causes[32] = { + "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07", + "0x08", "0x09", "0x0a", "0x0b", + "0x0c", "0x0d", "0x0e", "0x0f", +- "0x10", "0x11", "0x12", "0x13", +- "0x14", "0x15", "0x16", "INTVN_WR_ERR", +- "INTVN_RD_ERR", "0x19", "0x1a", "0x1b", ++ "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13", ++ "0x14", "0x15", "0x16", "0x17", ++ "0x18", "0x19", "0x1a", "0x1b", + "0x1c", "0x1d", "0x1e", "0x1f" + }; + +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c +index 7cc1d29334ee..2c3b89a65317 100644 +--- a/arch/mips/kernel/setup.c ++++ b/arch/mips/kernel/setup.c +@@ -847,7 +847,17 @@ static void __init arch_mem_init(char **cmdline_p) + BOOTMEM_DEFAULT); + #endif + device_tree_init(); ++ ++ /* ++ * In order to reduce the possibility of kernel panic when failed to ++ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate ++ * low memory as small as possible before plat_swiotlb_setup(), so ++ * make sparse_init() using top-down allocation. ++ */ ++ memblock_set_bottom_up(false); + sparse_init(); ++ memblock_set_bottom_up(true); ++ + plat_swiotlb_setup(); + + dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); +diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c +index a7f81261c781..b7f7e08e1ce4 100644 +--- a/arch/mips/kernel/time.c ++++ b/arch/mips/kernel/time.c +@@ -22,12 +22,82 @@ + #include + #include + #include ++#include ++#include + + #include + #include + #include + #include + ++#ifdef CONFIG_CPU_FREQ ++ ++static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref); ++static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq); ++static unsigned long glb_lpj_ref; ++static unsigned long glb_lpj_ref_freq; ++ ++static int cpufreq_callback(struct notifier_block *nb, ++ unsigned long val, void *data) ++{ ++ struct cpufreq_freqs *freq = data; ++ struct cpumask *cpus = freq->policy->cpus; ++ unsigned long lpj; ++ int cpu; ++ ++ /* ++ * Skip lpj numbers adjustment if the CPU-freq transition is safe for ++ * the loops delay. (Is this possible?) ++ */ ++ if (freq->flags & CPUFREQ_CONST_LOOPS) ++ return NOTIFY_OK; ++ ++ /* Save the initial values of the lpjes for future scaling. */ ++ if (!glb_lpj_ref) { ++ glb_lpj_ref = boot_cpu_data.udelay_val; ++ glb_lpj_ref_freq = freq->old; ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(pcp_lpj_ref, cpu) = ++ cpu_data[cpu].udelay_val; ++ per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; ++ } ++ } ++ ++ /* ++ * Adjust global lpj variable and per-CPU udelay_val number in ++ * accordance with the new CPU frequency. ++ */ ++ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || ++ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { ++ loops_per_jiffy = cpufreq_scale(glb_lpj_ref, ++ glb_lpj_ref_freq, ++ freq->new); ++ ++ for_each_cpu(cpu, cpus) { ++ lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), ++ per_cpu(pcp_lpj_ref_freq, cpu), ++ freq->new); ++ cpu_data[cpu].udelay_val = (unsigned int)lpj; ++ } ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block cpufreq_notifier = { ++ .notifier_call = cpufreq_callback, ++}; ++ ++static int __init register_cpufreq_notifier(void) ++{ ++ return cpufreq_register_notifier(&cpufreq_notifier, ++ CPUFREQ_TRANSITION_NOTIFIER); ++} ++core_initcall(register_cpufreq_notifier); ++ ++#endif /* CONFIG_CPU_FREQ */ ++ + /* + * forward reference + */ +diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S +index 2d965d91fee4..612b2b301280 100644 +--- a/arch/mips/kernel/vmlinux.lds.S ++++ b/arch/mips/kernel/vmlinux.lds.S +@@ -49,7 +49,7 @@ SECTIONS + /* . = 0xa800000000300000; */ + . = 0xffffffff80300000; + #endif +- . = VMLINUX_LOAD_ADDRESS; ++ . = LINKER_LOAD_ADDRESS; + /* read-only */ + _text = .; /* Text and read-only data */ + .text : { +diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c +index 06254467e4dd..f12b00a056cb 100644 +--- a/arch/powerpc/platforms/cell/spufs/file.c ++++ b/arch/powerpc/platforms/cell/spufs/file.c +@@ -2044,8 +2044,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, + static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { +- int ret; + struct spu_context *ctx = file->private_data; ++ u32 stat, data; ++ int ret; + + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; +@@ -2054,11 +2055,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_mbox_info_read(ctx, buf, len, pos); ++ stat = ctx->csa.prob.mb_stat_R; ++ data = ctx->csa.prob.pu_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ /* EOF if there's no entry in the mbox */ ++ if (!(stat & 0x0000ff)) ++ return 0; ++ ++ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); + } + + static const struct file_operations spufs_mbox_info_fops = { +@@ -2085,6 +2091,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ u32 stat, data; + int ret; + + if (!access_ok(VERIFY_WRITE, buf, len)) +@@ -2094,11 +2101,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_ibox_info_read(ctx, buf, len, pos); ++ stat = ctx->csa.prob.mb_stat_R; ++ data = ctx->csa.priv2.puint_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ /* EOF if there's no entry in the ibox */ ++ if (!(stat & 0xff0000)) ++ return 0; ++ ++ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); + } + + static const struct file_operations spufs_ibox_info_fops = { +@@ -2107,6 +2119,11 @@ static const struct file_operations spufs_ibox_info_fops = { + .llseek = generic_file_llseek, + }; + ++static size_t spufs_wbox_info_cnt(struct spu_context *ctx) ++{ ++ return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); ++} ++ + static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) + { +@@ -2115,7 +2132,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, + u32 wbox_stat; + + wbox_stat = ctx->csa.prob.mb_stat_R; +- cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); ++ cnt = spufs_wbox_info_cnt(ctx); + for (i = 0; i < cnt; i++) { + data[i] = ctx->csa.spu_mailbox_data[i]; + } +@@ -2128,7 +2145,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; +- int ret; ++ u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; ++ int ret, count; + + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; +@@ -2137,11 +2155,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_wbox_info_read(ctx, buf, len, pos); ++ count = spufs_wbox_info_cnt(ctx); ++ memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data)); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &data, ++ count * sizeof(u32)); + } + + static const struct file_operations spufs_wbox_info_fops = { +@@ -2150,27 +2170,33 @@ static const struct file_operations spufs_wbox_info_fops = { + .llseek = generic_file_llseek, + }; + +-static ssize_t __spufs_dma_info_read(struct spu_context *ctx, +- char __user *buf, size_t len, loff_t *pos) ++static void spufs_get_dma_info(struct spu_context *ctx, ++ struct spu_dma_info *info) + { +- struct spu_dma_info info; +- struct mfc_cq_sr *qp, *spuqp; + int i; + +- info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; +- info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; +- info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; +- info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; +- info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; ++ info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; ++ info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; ++ info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; ++ info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; ++ info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; + for (i = 0; i < 16; i++) { +- qp = &info.dma_info_command_data[i]; +- spuqp = &ctx->csa.priv2.spuq[i]; ++ struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; ++ struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; + + qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; + qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; + qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; + qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; + } ++} ++ ++static ssize_t __spufs_dma_info_read(struct spu_context *ctx, ++ char __user *buf, size_t len, loff_t *pos) ++{ ++ struct spu_dma_info info; ++ ++ spufs_get_dma_info(ctx, &info); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof info); +@@ -2180,6 +2206,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ struct spu_dma_info info; + int ret; + + if (!access_ok(VERIFY_WRITE, buf, len)) +@@ -2189,11 +2216,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_dma_info_read(ctx, buf, len, pos); ++ spufs_get_dma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &info, ++ sizeof(info)); + } + + static const struct file_operations spufs_dma_info_fops = { +@@ -2202,13 +2230,31 @@ static const struct file_operations spufs_dma_info_fops = { + .llseek = no_llseek, + }; + ++static void spufs_get_proxydma_info(struct spu_context *ctx, ++ struct spu_proxydma_info *info) ++{ ++ int i; ++ ++ info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; ++ info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; ++ info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; ++ ++ for (i = 0; i < 8; i++) { ++ struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; ++ struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; ++ ++ qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; ++ qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; ++ qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; ++ qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; ++ } ++} ++ + static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) + { + struct spu_proxydma_info info; +- struct mfc_cq_sr *qp, *puqp; + int ret = sizeof info; +- int i; + + if (len < ret) + return -EINVAL; +@@ -2216,18 +2262,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; + +- info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; +- info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; +- info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; +- for (i = 0; i < 8; i++) { +- qp = &info.proxydma_info_command_data[i]; +- puqp = &ctx->csa.priv2.puq[i]; +- +- qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; +- qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; +- qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; +- qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; +- } ++ spufs_get_proxydma_info(ctx, &info); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof info); +@@ -2237,17 +2272,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ struct spu_proxydma_info info; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_proxydma_info_read(ctx, buf, len, pos); ++ spufs_get_proxydma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &info, ++ sizeof(info)); + } + + static const struct file_operations spufs_proxydma_info_fops = { +diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c +index a331fdc11a2c..396dbdea0cfa 100644 +--- a/arch/sparc/kernel/ptrace_32.c ++++ b/arch/sparc/kernel/ptrace_32.c +@@ -45,82 +45,79 @@ enum sparc_regset { + REGSET_FP, + }; + ++static int regwindow32_get(struct task_struct *target, ++ const struct pt_regs *regs, ++ u32 *uregs) ++{ ++ unsigned long reg_window = regs->u_regs[UREG_I6]; ++ int size = 16 * sizeof(u32); ++ ++ if (target == current) { ++ if (copy_from_user(uregs, (void __user *)reg_window, size)) ++ return -EFAULT; ++ } else { ++ if (access_process_vm(target, reg_window, uregs, size, ++ FOLL_FORCE) != size) ++ return -EFAULT; ++ } ++ return 0; ++} ++ ++static int regwindow32_set(struct task_struct *target, ++ const struct pt_regs *regs, ++ u32 *uregs) ++{ ++ unsigned long reg_window = regs->u_regs[UREG_I6]; ++ int size = 16 * sizeof(u32); ++ ++ if (target == current) { ++ if (copy_to_user((void __user *)reg_window, uregs, size)) ++ return -EFAULT; ++ } else { ++ if (access_process_vm(target, reg_window, uregs, size, ++ FOLL_FORCE | FOLL_WRITE) != size) ++ return -EFAULT; ++ } ++ return 0; ++} ++ + static int genregs32_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) + { + const struct pt_regs *regs = target->thread.kregs; +- unsigned long __user *reg_window; +- unsigned long *k = kbuf; +- unsigned long __user *u = ubuf; +- unsigned long reg; ++ u32 uregs[16]; ++ int ret; + + if (target == current) + flush_user_windows(); + +- pos /= sizeof(reg); +- count /= sizeof(reg); +- +- if (kbuf) { +- for (; count > 0 && pos < 16; count--) +- *k++ = regs->u_regs[pos++]; +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(*k++, ®_window[pos++])) +- return -EFAULT; +- } +- } else { +- for (; count > 0 && pos < 16; count--) { +- if (put_user(regs->u_regs[pos++], u++)) +- return -EFAULT; +- } +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(reg, ®_window[pos++]) || +- put_user(reg, u++)) +- return -EFAULT; +- } +- } +- while (count > 0) { +- switch (pos) { +- case 32: /* PSR */ +- reg = regs->psr; +- break; +- case 33: /* PC */ +- reg = regs->pc; +- break; +- case 34: /* NPC */ +- reg = regs->npc; +- break; +- case 35: /* Y */ +- reg = regs->y; +- break; +- case 36: /* WIM */ +- case 37: /* TBR */ +- reg = 0; +- break; +- default: +- goto finish; +- } ++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ regs->u_regs, ++ 0, 16 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + +- if (kbuf) +- *k++ = reg; +- else if (put_user(reg, u++)) ++ if (pos < 32 * sizeof(u32)) { ++ if (regwindow32_get(target, regs, uregs)) + return -EFAULT; +- pos++; +- count--; ++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 16 * sizeof(u32), 32 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + } +-finish: +- pos *= sizeof(reg); +- count *= sizeof(reg); + +- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, +- 38 * sizeof(reg), -1); ++ uregs[0] = regs->psr; ++ uregs[1] = regs->pc; ++ uregs[2] = regs->npc; ++ uregs[3] = regs->y; ++ uregs[4] = 0; /* WIM */ ++ uregs[5] = 0; /* TBR */ ++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 32 * sizeof(u32), 38 * sizeof(u32)); + } + + static int genregs32_set(struct task_struct *target, +@@ -129,82 +126,53 @@ static int genregs32_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + struct pt_regs *regs = target->thread.kregs; +- unsigned long __user *reg_window; +- const unsigned long *k = kbuf; +- const unsigned long __user *u = ubuf; +- unsigned long reg; ++ u32 uregs[16]; ++ u32 psr; ++ int ret; + + if (target == current) + flush_user_windows(); + +- pos /= sizeof(reg); +- count /= sizeof(reg); +- +- if (kbuf) { +- for (; count > 0 && pos < 16; count--) +- regs->u_regs[pos++] = *k++; +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (put_user(*k++, ®_window[pos++])) +- return -EFAULT; +- } +- } else { +- for (; count > 0 && pos < 16; count--) { +- if (get_user(reg, u++)) +- return -EFAULT; +- regs->u_regs[pos++] = reg; +- } +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(reg, u++) || +- put_user(reg, ®_window[pos++])) +- return -EFAULT; +- } +- } +- while (count > 0) { +- unsigned long psr; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ regs->u_regs, ++ 0, 16 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + +- if (kbuf) +- reg = *k++; +- else if (get_user(reg, u++)) ++ if (pos < 32 * sizeof(u32)) { ++ if (regwindow32_get(target, regs, uregs)) + return -EFAULT; +- +- switch (pos) { +- case 32: /* PSR */ +- psr = regs->psr; +- psr &= ~(PSR_ICC | PSR_SYSCALL); +- psr |= (reg & (PSR_ICC | PSR_SYSCALL)); +- regs->psr = psr; +- break; +- case 33: /* PC */ +- regs->pc = reg; +- break; +- case 34: /* NPC */ +- regs->npc = reg; +- break; +- case 35: /* Y */ +- regs->y = reg; +- break; +- case 36: /* WIM */ +- case 37: /* TBR */ +- break; +- default: +- goto finish; +- } +- +- pos++; +- count--; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 16 * sizeof(u32), 32 * sizeof(u32)); ++ if (ret) ++ return ret; ++ if (regwindow32_set(target, regs, uregs)) ++ return -EFAULT; ++ if (!count) ++ return 0; + } +-finish: +- pos *= sizeof(reg); +- count *= sizeof(reg); +- ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ &psr, ++ 32 * sizeof(u32), 33 * sizeof(u32)); ++ if (ret) ++ return ret; ++ regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | ++ (psr & (PSR_ICC | PSR_SYSCALL)); ++ if (!count) ++ return 0; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ ®s->pc, ++ 33 * sizeof(u32), 34 * sizeof(u32)); ++ if (ret || !count) ++ return ret; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ ®s->y, ++ 34 * sizeof(u32), 35 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, +- 38 * sizeof(reg), -1); ++ 35 * sizeof(u32), 38 * sizeof(u32)); + } + + static int fpregs32_get(struct task_struct *target, +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c +index 7037ca3b4328..0a89a0546077 100644 +--- a/arch/sparc/kernel/ptrace_64.c ++++ b/arch/sparc/kernel/ptrace_64.c +@@ -533,19 +533,13 @@ static int genregs32_get(struct task_struct *target, + for (; count > 0 && pos < 32; count--) { + if (access_process_vm(target, + (unsigned long) +- ®_window[pos], ++ ®_window[pos++], + ®, sizeof(reg), + FOLL_FORCE) + != sizeof(reg)) + return -EFAULT; +- if (access_process_vm(target, +- (unsigned long) u, +- ®, sizeof(reg), +- FOLL_FORCE | FOLL_WRITE) +- != sizeof(reg)) ++ if (put_user(reg, u++)) + return -EFAULT; +- pos++; +- u++; + } + } + } +@@ -645,12 +639,7 @@ static int genregs32_set(struct task_struct *target, + } + } else { + for (; count > 0 && pos < 32; count--) { +- if (access_process_vm(target, +- (unsigned long) +- u, +- ®, sizeof(reg), +- FOLL_FORCE) +- != sizeof(reg)) ++ if (get_user(reg, u++)) + return -EFAULT; + if (access_process_vm(target, + (unsigned long) +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index 7532f6f53677..93f41b4f05ce 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -48,16 +48,17 @@ + * Position Independent Executable (PIE) so that linker won't optimize + * R_386_GOT32X relocation to its fixed symbol address. Older + * linkers generate R_386_32 relocations against locally defined symbols, +- * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less ++ * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less + * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle + * R_386_32 relocations when relocating the kernel. To generate +- * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as ++ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as + * hidden: + */ + .hidden _bss + .hidden _ebss + .hidden _got + .hidden _egot ++ .hidden _end + + __HEAD + ENTRY(startup_32) +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index 3fac2d133e4e..d096bcfcb3f6 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -40,6 +40,7 @@ + .hidden _ebss + .hidden _got + .hidden _egot ++ .hidden _end + + __HEAD + .code32 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 2cd5d12a842c..8ceb7a8a249c 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -273,6 +273,7 @@ + #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ + #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ + #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ + #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ + #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ + #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ +@@ -312,7 +313,6 @@ + #define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ + #define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ + +- + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 8d56d701b5f7..4af16acc001a 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -223,6 +223,7 @@ enum spectre_v2_mitigation { + enum spectre_v2_user_mitigation { + SPECTRE_V2_USER_NONE, + SPECTRE_V2_USER_STRICT, ++ SPECTRE_V2_USER_STRICT_PREFERRED, + SPECTRE_V2_USER_PRCTL, + SPECTRE_V2_USER_SECCOMP, + }; +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 5ef0a2b34261..85c1cc0305f3 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -60,7 +60,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; + u64 __ro_after_init x86_amd_ls_cfg_base; + u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; + +-/* Control conditional STIPB in switch_to() */ ++/* Control conditional STIBP in switch_to() */ + DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); + /* Control conditional IBPB in switch_mm() */ + DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +@@ -580,7 +580,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline); + static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + SPECTRE_V2_NONE; + +-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = ++static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = ++ SPECTRE_V2_USER_NONE; ++static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = + SPECTRE_V2_USER_NONE; + + #ifdef RETPOLINE +@@ -632,10 +634,11 @@ enum spectre_v2_user_cmd { + }; + + static const char * const spectre_v2_user_strings[] = { +- [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", +- [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", +- [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", +- [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", ++ [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", ++ [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", ++ [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", ++ [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", ++ [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", + }; + + static const struct { +@@ -747,23 +750,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) + pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", + static_key_enabled(&switch_mm_always_ibpb) ? + "always-on" : "conditional"); ++ ++ spectre_v2_user_ibpb = mode; + } + +- /* If enhanced IBRS is enabled no STIPB required */ +- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) ++ /* ++ * If enhanced IBRS is enabled or SMT impossible, STIBP is not ++ * required. ++ */ ++ if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + /* +- * If SMT is not possible or STIBP is not available clear the STIPB +- * mode. ++ * At this point, an STIBP mode other than "off" has been set. ++ * If STIBP support is not being forced, check if STIBP always-on ++ * is preferred. + */ +- if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) ++ if (mode != SPECTRE_V2_USER_STRICT && ++ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) ++ mode = SPECTRE_V2_USER_STRICT_PREFERRED; ++ ++ /* ++ * If STIBP is not available, clear the STIBP mode. ++ */ ++ if (!boot_cpu_has(X86_FEATURE_STIBP)) + mode = SPECTRE_V2_USER_NONE; ++ ++ spectre_v2_user_stibp = mode; ++ + set_mode: +- spectre_v2_user = mode; +- /* Only print the STIBP mode when SMT possible */ +- if (smt_possible) +- pr_info("%s\n", spectre_v2_user_strings[mode]); ++ pr_info("%s\n", spectre_v2_user_strings[mode]); + } + + static const char * const spectre_v2_strings[] = { +@@ -1003,10 +1019,11 @@ void arch_smt_update(void) + { + mutex_lock(&spec_ctrl_mutex); + +- switch (spectre_v2_user) { ++ switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + break; + case SPECTRE_V2_USER_STRICT: ++ case SPECTRE_V2_USER_STRICT_PREFERRED: + update_stibp_strict(); + break; + case SPECTRE_V2_USER_PRCTL: +@@ -1235,13 +1252,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + { + switch (ctrl) { + case PR_SPEC_ENABLE: +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return 0; + /* + * Indirect branch speculation is always disabled in strict +- * mode. ++ * mode. It can neither be enabled if it was force-disabled ++ * by a previous prctl call. ++ + */ +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || ++ task_spec_ib_force_disable(task)) + return -EPERM; + task_clear_spec_ib_disable(task); + task_update_spec_tif(task); +@@ -1252,9 +1275,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + * Indirect branch speculation is always allowed when + * mitigation is force disabled. + */ +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return -EPERM; +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + return 0; + task_set_spec_ib_disable(task); + if (ctrl == PR_SPEC_FORCE_DISABLE) +@@ -1285,7 +1311,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) + { + if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) + ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); +- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) + ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); + } + #endif +@@ -1314,21 +1341,24 @@ static int ib_prctl_get(struct task_struct *task) + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return PR_SPEC_NOT_AFFECTED; + +- switch (spectre_v2_user) { +- case SPECTRE_V2_USER_NONE: ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_PRCTL: +- case SPECTRE_V2_USER_SECCOMP: ++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) ++ return PR_SPEC_DISABLE; ++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) { + if (task_spec_ib_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ib_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_STRICT: +- return PR_SPEC_DISABLE; +- default: ++ } else + return PR_SPEC_NOT_AFFECTED; +- } + } + + int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +@@ -1569,11 +1599,13 @@ static char *stibp_state(void) + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return ""; + +- switch (spectre_v2_user) { ++ switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + return ", STIBP: disabled"; + case SPECTRE_V2_USER_STRICT: + return ", STIBP: forced"; ++ case SPECTRE_V2_USER_STRICT_PREFERRED: ++ return ", STIBP: always-on"; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (static_key_enabled(&switch_to_cond_stibp)) +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index 2e4eab22ca37..f58e4cc20c1c 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -337,28 +337,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, + u64 msr = x86_spec_ctrl_base; + bool updmsr = false; + +- /* +- * If TIF_SSBD is different, select the proper mitigation +- * method. Note that if SSBD mitigation is disabled or permanentely +- * enabled this branch can't be taken because nothing can set +- * TIF_SSBD. +- */ +- if (tif_diff & _TIF_SSBD) { +- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ /* Handle change of TIF_SSBD depending on the mitigation method. */ ++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_ssb_virt_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_core_ssb_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || +- static_cpu_has(X86_FEATURE_AMD_SSBD)) { +- msr |= ssbd_tif_to_spec_ctrl(tifn); +- updmsr = true; +- } ++ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || ++ static_cpu_has(X86_FEATURE_AMD_SSBD)) { ++ updmsr |= !!(tif_diff & _TIF_SSBD); ++ msr |= ssbd_tif_to_spec_ctrl(tifn); + } + +- /* +- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, +- * otherwise avoid the MSR write. +- */ ++ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ + if (IS_ENABLED(CONFIG_SMP) && + static_branch_unlikely(&switch_to_cond_stibp)) { + updmsr |= !!(tif_diff & _TIF_SPEC_IB); +diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h +index 898e97cf6629..320ab978fb1f 100644 +--- a/arch/x86/kernel/process.h ++++ b/arch/x86/kernel/process.h +@@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev, + if (IS_ENABLED(CONFIG_SMP)) { + /* + * Avoid __switch_to_xtra() invocation when conditional +- * STIPB is disabled and the only different bit is ++ * STIBP is disabled and the only different bit is + * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not + * in the TIF_WORK_CTXSW masks. + */ +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c +index c55b11fe8e9f..b427dc73ba27 100644 +--- a/arch/x86/kernel/reboot.c ++++ b/arch/x86/kernel/reboot.c +@@ -198,6 +198,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), + }, + }, ++ { /* Handle problems with rebooting on Apple MacBook6,1 */ ++ .callback = set_pci_reboot, ++ .ident = "Apple MacBook6,1", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), ++ }, ++ }, + { /* Handle problems with rebooting on Apple MacBookPro5 */ + .callback = set_pci_reboot, + .ident = "Apple MacBookPro5", +diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c +index 89d1190b9d94..5e5de7a7f38d 100644 +--- a/arch/x86/kernel/time.c ++++ b/arch/x86/kernel/time.c +@@ -23,10 +23,6 @@ + #include + #include + +-#ifdef CONFIG_X86_64 +-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES; +-#endif +- + unsigned long profile_pc(struct pt_regs *regs) + { + unsigned long pc = instruction_pointer(regs); +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S +index 55f04875293f..097268f85e4e 100644 +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -34,13 +34,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) + #ifdef CONFIG_X86_32 + OUTPUT_ARCH(i386) + ENTRY(phys_startup_32) +-jiffies = jiffies_64; + #else + OUTPUT_ARCH(i386:x86-64) + ENTRY(phys_startup_64) +-jiffies_64 = jiffies; + #endif + ++jiffies = jiffies_64; ++ + #if defined(CONFIG_X86_64) + /* + * On 64-bit, align RODATA to 2MB so we retain large page mappings for +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 1079228e4fef..29078eaf18c9 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -2734,7 +2734,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr + dst->iopm_base_pa = from->iopm_base_pa; + dst->msrpm_base_pa = from->msrpm_base_pa; + dst->tsc_offset = from->tsc_offset; +- dst->asid = from->asid; ++ /* asid not copied, it is handled manually for svm->vmcb. */ + dst->tlb_ctl = from->tlb_ctl; + dst->int_ctl = from->int_ctl; + dst->int_vector = from->int_vector; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index b62886f10dc1..e7fe5974c81c 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -8207,7 +8207,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) + return true; + } + +- switch (exit_reason) { ++ switch ((u16)exit_reason) { + case EXIT_REASON_EXCEPTION_NMI: + if (is_nmi(intr_info)) + return false; +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index ce092a62fc5d..bc2455c2fcab 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -110,8 +110,6 @@ __ref void *alloc_low_pages(unsigned int num) + } else { + pfn = pgt_buf_end; + pgt_buf_end += num; +- printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", +- pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); + } + + for (i = 0; i < num; i++) { +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c +index 62950ef7f84e..68e86d7cc94d 100644 +--- a/arch/x86/pci/fixup.c ++++ b/arch/x86/pci/fixup.c +@@ -571,6 +571,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar); + + /* + * Device [1022:7914] +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 58be2eaa5aaa..e0ed7317e98c 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -2331,6 +2331,10 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) + + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_freeze_queue(q); ++ /* ++ * Sync with blk_mq_queue_tag_busy_iter. ++ */ ++ synchronize_rcu(); + + set->nr_hw_queues = nr_hw_queues; + list_for_each_entry(q, &set->tag_list, tag_set_list) { +@@ -2346,10 +2350,6 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) + + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_unfreeze_queue(q); +- /* +- * Sync with blk_mq_queue_tag_busy_iter. +- */ +- synchronize_rcu(); + } + EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); + +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index 9ec4618df533..318bdfb8703c 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -793,8 +793,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) + + ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, + "acpi_cppc"); +- if (ret) ++ if (ret) { ++ kobject_put(&cpc_ptr->kobj); + goto out_free; ++ } + + kfree(output.pointer); + return 0; +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c +index 245bcdb44c64..442aac84ab88 100644 +--- a/drivers/acpi/device_pm.c ++++ b/drivers/acpi/device_pm.c +@@ -171,7 +171,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) + * possibly drop references to the power resources in use. + */ + state = ACPI_STATE_D3_HOT; +- /* If _PR3 is not available, use D3hot as the target state. */ ++ /* If D3cold is not supported, use D3hot as the target state. */ + if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) + target_state = state; + } else if (!device->power.states[state].flags.valid) { +diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c +index 46f060356a22..339e6d3dba7c 100644 +--- a/drivers/acpi/evged.c ++++ b/drivers/acpi/evged.c +@@ -82,6 +82,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, + struct resource r; + struct acpi_resource_irq *p = &ares->data.irq; + struct acpi_resource_extended_irq *pext = &ares->data.extended_irq; ++ char ev_name[5]; ++ u8 trigger; + + if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) + return AE_OK; +@@ -90,14 +92,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, + dev_err(dev, "unable to parse IRQ resource\n"); + return AE_ERROR; + } +- if (ares->type == ACPI_RESOURCE_TYPE_IRQ) ++ if (ares->type == ACPI_RESOURCE_TYPE_IRQ) { + gsi = p->interrupts[0]; +- else ++ trigger = p->triggering; ++ } else { + gsi = pext->interrupts[0]; ++ trigger = pext->triggering; ++ } + + irq = r.start; + +- if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) { ++ switch (gsi) { ++ case 0 ... 255: ++ sprintf(ev_name, "_%c%02hhX", ++ trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); ++ ++ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) ++ break; ++ /* fall through */ ++ default: ++ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) ++ break; ++ + dev_err(dev, "cannot locate _EVT method\n"); + return AE_ERROR; + } +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 0792ec5a9efc..9a7f017dda47 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -927,12 +927,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) + + if (buffer.length && package + && package->type == ACPI_TYPE_PACKAGE +- && package->package.count) { +- int err = acpi_extract_power_resources(package, 0, +- &ps->resources); +- if (!err) +- device->power.flags.power_resources = 1; +- } ++ && package->package.count) ++ acpi_extract_power_resources(package, 0, &ps->resources); ++ + ACPI_FREE(buffer.pointer); + } + +@@ -979,14 +976,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) + acpi_bus_init_power_state(device, i); + + INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); +- if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) +- device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; + +- /* Set defaults for D0 and D3hot states (always valid) */ ++ /* Set the defaults for D0 and D3hot (always supported). */ + device->power.states[ACPI_STATE_D0].flags.valid = 1; + device->power.states[ACPI_STATE_D0].power = 100; + device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; + ++ /* ++ * Use power resources only if the D0 list of them is populated, because ++ * some platforms may provide _PR3 only to indicate D3cold support and ++ * in those cases the power resources list returned by it may be bogus. ++ */ ++ if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { ++ device->power.flags.power_resources = 1; ++ /* ++ * D3cold is supported if the D3hot list of power resources is ++ * not empty. ++ */ ++ if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) ++ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; ++ } ++ + if (acpi_bus_init_power(device)) + device->flags.power_manageable = 0; + } +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c +index a36d0739dbfe..7502441b1400 100644 +--- a/drivers/acpi/sysfs.c ++++ b/drivers/acpi/sysfs.c +@@ -898,8 +898,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, + + error = kobject_init_and_add(&hotplug->kobj, + &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); +- if (error) ++ if (error) { ++ kobject_put(&hotplug->kobj); + goto err_out; ++ } + + kobject_uevent(&hotplug->kobj, KOBJ_ADD); + return; +diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c +index a7cc5b7be598..871e7f4994e8 100644 +--- a/drivers/char/agp/intel-gtt.c ++++ b/drivers/char/agp/intel-gtt.c +@@ -845,6 +845,7 @@ void intel_gtt_insert_page(dma_addr_t addr, + unsigned int flags) + { + intel_private.driver->write_entry(addr, pg, flags); ++ readl(intel_private.gtt + pg); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); + } +@@ -870,7 +871,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, + j++; + } + } +- wmb(); ++ readl(intel_private.gtt + j - 1); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); + } +@@ -1104,6 +1105,7 @@ static void i9xx_cleanup(void) + + static void i9xx_chipset_flush(void) + { ++ wmb(); + if (intel_private.i9xx_flush_page) + writel(1, intel_private.i9xx_flush_page); + } +diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c +index aee6c0d39a7c..024e6cc5025b 100644 +--- a/drivers/clocksource/dw_apb_timer_of.c ++++ b/drivers/clocksource/dw_apb_timer_of.c +@@ -146,10 +146,6 @@ static int num_called; + static int __init dw_apb_timer_init(struct device_node *timer) + { + switch (num_called) { +- case 0: +- pr_debug("%s: found clockevent timer\n", __func__); +- add_clockevent(timer); +- break; + case 1: + pr_debug("%s: found clocksource timer\n", __func__); + add_clocksource(timer); +@@ -160,6 +156,8 @@ static int __init dw_apb_timer_init(struct device_node *timer) + #endif + break; + default: ++ pr_debug("%s: found clockevent timer\n", __func__); ++ add_clockevent(timer); + break; + } + +diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c +index 9e98a5fbbc1d..e7e92ed34f0c 100644 +--- a/drivers/cpuidle/sysfs.c ++++ b/drivers/cpuidle/sysfs.c +@@ -412,7 +412,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) + ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, + &kdev->kobj, "state%d", i); + if (ret) { +- kfree(kobj); ++ kobject_put(&kobj->kobj); + goto error_state; + } + kobject_uevent(&kobj->kobj, KOBJ_ADD); +@@ -542,7 +542,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) + ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, + &kdev->kobj, "driver"); + if (ret) { +- kfree(kdrv); ++ kobject_put(&kdrv->kobj); + return ret; + } + +@@ -636,7 +636,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) + error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj, + "cpuidle"); + if (error) { +- kfree(kdev); ++ kobject_put(&kdev->kobj); + return error; + } + +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c +index 8b383d3d21c2..059c2d4ad18f 100644 +--- a/drivers/crypto/talitos.c ++++ b/drivers/crypto/talitos.c +@@ -2636,7 +2636,6 @@ static struct talitos_alg_template driver_algs[] = { + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, +- .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | +@@ -2670,6 +2669,7 @@ static struct talitos_alg_template driver_algs[] = { + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, + .setkey = ablkcipher_aes_setkey, + } + }, +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index 1c65f5ac4368..6529addd1e82 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -586,8 +586,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, + NULL, "%s", short_name); + kfree(short_name); +- if (ret) ++ if (ret) { ++ kobject_put(&new_var->kobj); + return ret; ++ } + + kobject_uevent(&new_var->kobj, KOBJ_ADD); + if (efivar_entry_add(new_var, &efivar_sysfs_list)) { +diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c +index 96d16fca68b2..24e7152cd2bf 100644 +--- a/drivers/macintosh/windfarm_pm112.c ++++ b/drivers/macintosh/windfarm_pm112.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -133,14 +134,6 @@ static int create_cpu_loop(int cpu) + s32 tmax; + int fmin; + +- /* Get PID params from the appropriate SAT */ +- hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); +- if (hdr == NULL) { +- printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); +- return -EINVAL; +- } +- piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; +- + /* Get FVT params to get Tmax; if not found, assume default */ + hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL); + if (hdr) { +@@ -153,6 +146,16 @@ static int create_cpu_loop(int cpu) + if (tmax < cpu_all_tmax) + cpu_all_tmax = tmax; + ++ kfree(hdr); ++ ++ /* Get PID params from the appropriate SAT */ ++ hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); ++ if (hdr == NULL) { ++ printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); ++ return -EINVAL; ++ } ++ piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; ++ + /* + * Darwin has a minimum fan speed of 1000 rpm for the 4-way and + * 515 for the 2-way. That appears to be overkill, so for now, +@@ -175,6 +178,9 @@ static int create_cpu_loop(int cpu) + pid.min = fmin; + + wf_cpu_pid_init(&cpu_pid[cpu], &pid); ++ ++ kfree(hdr); ++ + return 0; + } + +diff --git a/drivers/md/md.c b/drivers/md/md.c +index da8708b65356..3485d2a79600 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -7101,7 +7101,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) + */ + mddev_put(mddev); + /* Wait until bdev->bd_disk is definitely gone */ +- flush_workqueue(md_misc_wq); ++ if (work_pending(&mddev->del_work)) ++ flush_workqueue(md_misc_wq); + /* Then retry the open from the top */ + return -ERESTARTSYS; + } +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c +index 2f054db8807b..372057cabea4 100644 +--- a/drivers/media/dvb-core/dvb_frontend.c ++++ b/drivers/media/dvb-core/dvb_frontend.c +@@ -629,7 +629,7 @@ static int dvb_frontend_thread(void *data) + struct dvb_frontend *fe = data; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct dvb_frontend_private *fepriv = fe->frontend_priv; +- enum fe_status s; ++ enum fe_status s = FE_NONE; + enum dvbfe_algo algo; + bool re_tune = false; + bool semheld = false; +diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c +index f3a3f31cdfa9..8e9c3bd36d03 100644 +--- a/drivers/media/platform/rcar-fcp.c ++++ b/drivers/media/platform/rcar-fcp.c +@@ -12,6 +12,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -24,6 +25,7 @@ + struct rcar_fcp_device { + struct list_head list; + struct device *dev; ++ struct device_dma_parameters dma_parms; + }; + + static LIST_HEAD(fcp_devices); +@@ -140,6 +142,9 @@ static int rcar_fcp_probe(struct platform_device *pdev) + + fcp->dev = &pdev->dev; + ++ fcp->dev->dma_parms = &fcp->dma_parms; ++ dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32)); ++ + pm_runtime_enable(&pdev->dev); + + mutex_lock(&fcp_lock); +diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c +index 57b250847cd3..72a47da0db2a 100644 +--- a/drivers/media/tuners/si2157.c ++++ b/drivers/media/tuners/si2157.c +@@ -84,24 +84,23 @@ static int si2157_init(struct dvb_frontend *fe) + struct si2157_cmd cmd; + const struct firmware *fw; + const char *fw_name; +- unsigned int uitmp, chip_id; ++ unsigned int chip_id, xtal_trim; + + dev_dbg(&client->dev, "\n"); + +- /* Returned IF frequency is garbage when firmware is not running */ +- memcpy(cmd.args, "\x15\x00\x06\x07", 4); ++ /* Try to get Xtal trim property, to verify tuner still running */ ++ memcpy(cmd.args, "\x15\x00\x04\x02", 4); + cmd.wlen = 4; + cmd.rlen = 4; + ret = si2157_cmd_execute(client, &cmd); +- if (ret) +- goto err; + +- uitmp = cmd.args[2] << 0 | cmd.args[3] << 8; +- dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp); ++ xtal_trim = cmd.args[2] | (cmd.args[3] << 8); + +- if (uitmp == dev->if_frequency / 1000) ++ if (ret == 0 && xtal_trim < 16) + goto warm; + ++ dev->if_frequency = 0; /* we no longer know current tuner state */ ++ + /* power up */ + if (dev->chiptype == SI2157_CHIPTYPE_SI2146) { + memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9); +diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c +index a0057641cc86..c55180912c3a 100644 +--- a/drivers/media/usb/dvb-usb/dibusb-mb.c ++++ b/drivers/media/usb/dvb-usb/dibusb-mb.c +@@ -84,7 +84,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap) + + if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) { + err("tuner i2c write failed."); +- ret = -EREMOTEIO; ++ return -EREMOTEIO; + } + + if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl) +diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c +index 070871fb1fc4..6e1a85ff3fff 100644 +--- a/drivers/media/usb/go7007/snd-go7007.c ++++ b/drivers/media/usb/go7007/snd-go7007.c +@@ -243,22 +243,18 @@ int go7007_snd_init(struct go7007 *go) + gosnd->capturing = 0; + ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, + &gosnd->card); +- if (ret < 0) { +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_snd; ++ + ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, + &go7007_snd_device_ops); +- if (ret < 0) { +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; ++ + ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); +- if (ret < 0) { +- snd_card_free(gosnd->card); +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; ++ + strlcpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); + strlcpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver)); + strlcpy(gosnd->card->longname, gosnd->card->shortname, +@@ -269,11 +265,8 @@ int go7007_snd_init(struct go7007 *go) + &go7007_snd_capture_ops); + + ret = snd_card_register(gosnd->card); +- if (ret < 0) { +- snd_card_free(gosnd->card); +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; + + gosnd->substream = NULL; + go->snd_context = gosnd; +@@ -281,6 +274,12 @@ int go7007_snd_init(struct go7007 *go) + ++dev; + + return 0; ++ ++free_card: ++ snd_card_free(gosnd->card); ++free_snd: ++ kfree(gosnd); ++ return ret; + } + EXPORT_SYMBOL(go7007_snd_init); + +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c +index 445fc47dc3e7..b4336534f628 100644 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c +@@ -79,7 +79,7 @@ + #define ESDHC_STD_TUNING_EN (1 << 24) + /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ + #define ESDHC_TUNING_START_TAP_DEFAULT 0x1 +-#define ESDHC_TUNING_START_TAP_MASK 0xff ++#define ESDHC_TUNING_START_TAP_MASK 0x7f + #define ESDHC_TUNING_STEP_MASK 0x00070000 + #define ESDHC_TUNING_STEP_SHIFT 16 + +diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c +index ef9a6b22c9fa..1291492a1cef 100644 +--- a/drivers/mtd/nand/brcmnand/brcmnand.c ++++ b/drivers/mtd/nand/brcmnand/brcmnand.c +@@ -911,11 +911,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, + if (!section) { + /* + * Small-page NAND use byte 6 for BBI while large-page +- * NAND use byte 0. ++ * NAND use bytes 0 and 1. + */ +- if (cfg->page_size > 512) +- oobregion->offset++; +- oobregion->length--; ++ if (cfg->page_size > 512) { ++ oobregion->offset += 2; ++ oobregion->length -= 2; ++ } else { ++ oobregion->length--; ++ } + } + } + +diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c +index 5de7591b0510..80c98eef44d9 100644 +--- a/drivers/mtd/nand/pasemi_nand.c ++++ b/drivers/mtd/nand/pasemi_nand.c +@@ -164,7 +164,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) + if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { + dev_err(dev, "Unable to register MTD device\n"); + err = -ENODEV; +- goto out_lpc; ++ goto out_cleanup_nand; + } + + dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, +@@ -172,6 +172,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev) + + return 0; + ++ out_cleanup_nand: ++ nand_cleanup(chip); + out_lpc: + release_region(lpcctl, 4); + out_ior: +diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c +index 3a75352f632b..792a1afabf5d 100644 +--- a/drivers/net/can/usb/kvaser_usb.c ++++ b/drivers/net/can/usb/kvaser_usb.c +@@ -791,7 +791,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, + if (!urb) + return -ENOMEM; + +- buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); ++ buf = kzalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; +@@ -1459,7 +1459,7 @@ static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv) + struct kvaser_msg *msg; + int rc; + +- msg = kmalloc(sizeof(*msg), GFP_KERNEL); ++ msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + +@@ -1592,7 +1592,7 @@ static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv) + struct kvaser_msg *msg; + int rc; + +- msg = kmalloc(sizeof(*msg), GFP_KERNEL); ++ msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + +diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c +index 6ffdff68bfc4..672a8212c8d9 100644 +--- a/drivers/net/ethernet/allwinner/sun4i-emac.c ++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c +@@ -412,7 +412,7 @@ static void emac_timeout(struct net_device *dev) + /* Hardware start transmission. + * Send a packet to media from the upper layer. + */ +-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) ++static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct emac_board_info *db = netdev_priv(dev); + unsigned long channel; +@@ -420,7 +420,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) + + channel = db->tx_fifo_stat & 3; + if (channel == 3) +- return 1; ++ return NETDEV_TX_BUSY; + + channel = (channel == 1 ? 1 : 0); + +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c +index 905911f78693..e95f19e573a7 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_com.c ++++ b/drivers/net/ethernet/amazon/ena/ena_com.c +@@ -2096,6 +2096,9 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + rss->hash_key; + int rc; + ++ if (unlikely(!func)) ++ return -EINVAL; ++ + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, +@@ -2108,8 +2111,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + if (rss->hash_func) + rss->hash_func--; + +- if (func) +- *func = rss->hash_func; ++ *func = rss->hash_func; + + if (key) + memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 897a87ae8655..20f7ab4aa2f1 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -3362,12 +3362,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, + dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); + break; + } +- dev_info(dev, "Partner protocol version is %d\n", +- crq->version_exchange_rsp.version); +- if (be16_to_cpu(crq->version_exchange_rsp.version) < +- ibmvnic_version) +- ibmvnic_version = ++ ibmvnic_version = + be16_to_cpu(crq->version_exchange_rsp.version); ++ dev_info(dev, "Partner protocol version is %d\n", ++ ibmvnic_version); + send_cap_queries(adapter); + break; + case QUERY_CAPABILITY_RSP: +diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c +index 39a09e18c1b7..3b16ee0de246 100644 +--- a/drivers/net/ethernet/intel/e1000/e1000_main.c ++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c +@@ -3167,8 +3167,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { ++ case e1000_82544: { + unsigned int pull_size; +- case e1000_82544: ++ + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements +@@ -3189,6 +3190,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + } + len = skb_headlen(skb); + break; ++ } + default: + /* do nothing */ + break; +diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h +index 879cca47b021..62675938cb59 100644 +--- a/drivers/net/ethernet/intel/e1000e/e1000.h ++++ b/drivers/net/ethernet/intel/e1000e/e1000.h +@@ -589,7 +589,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) + + #define er32(reg) __er32(hw, E1000_##reg) + +-s32 __ew32_prepare(struct e1000_hw *hw); + void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); + + #define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index a0f97c5ab6ef..be324b4761eb 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -136,14 +136,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +-s32 __ew32_prepare(struct e1000_hw *hw) ++static void __ew32_prepare(struct e1000_hw *hw) + { + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); +- +- return i; + } + + void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +@@ -624,11 +622,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) + { + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; +- s32 ret_val = __ew32_prepare(hw); + ++ __ew32_prepare(hw); + writel(i, rx_ring->tail); + +- if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { ++ if (unlikely(i != readl(rx_ring->tail))) { + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl & ~E1000_RCTL_EN); +@@ -641,11 +639,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) + { + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; +- s32 ret_val = __ew32_prepare(hw); + ++ __ew32_prepare(hw); + writel(i, tx_ring->tail); + +- if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { ++ if (unlikely(i != readl(tx_ring->tail))) { + u32 tctl = er32(TCTL); + + ew32(TCTL, tctl & ~E1000_TCTL_EN); +diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c +index 737b664d004c..b02e262ed76a 100644 +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c +@@ -153,7 +153,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) + u32 status; + u32 speed; + +- status = rd32(E1000_STATUS); ++ status = pm_runtime_suspended(&adapter->pdev->dev) ? ++ 0 : rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +index 0d2baec546e1..c17135b7fca7 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +@@ -2219,7 +2219,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + } + + /* Configure pause time (2 TCs per register) */ +- reg = hw->fc.pause_time * 0x00010001; ++ reg = hw->fc.pause_time * 0x00010001U; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + +diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c +index 9fcaf1910633..9b98ec3dcb82 100644 +--- a/drivers/net/ethernet/nxp/lpc_eth.c ++++ b/drivers/net/ethernet/nxp/lpc_eth.c +@@ -845,7 +845,8 @@ static int lpc_mii_init(struct netdata_local *pldat) + if (mdiobus_register(pldat->mii_bus)) + goto err_out_unregister_bus; + +- if (lpc_mii_probe(pldat->ndev) != 0) ++ err = lpc_mii_probe(pldat->ndev); ++ if (err) + goto err_out_unregister_bus; + + return 0; +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 4f582ce929f2..9dda2dc6b5e7 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -421,6 +421,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) + int ret; + rx_handler_result_t handle_res; + ++ /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ ++ if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) ++ return RX_HANDLER_PASS; ++ + port = macvlan_port_get_rcu(skb->dev); + if (is_multicast_ether_addr(eth->h_dest)) { + unsigned int hash; +diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c +index aabc6ef366b4..d63b83605748 100644 +--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c ++++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c +@@ -691,6 +691,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; ++ if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) ++ return 0; + while (n--) + p[n] = rssConf->indTable[n]; + return 0; +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 58ddb6c90418..b1470d30d079 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -1521,6 +1521,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, + daddr = eth_hdr(request)->h_source; + ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); + for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { ++ if (!ns->opt[i + 1]) { ++ kfree_skb(reply); ++ return NULL; ++ } + if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { + daddr = ns->opt + i + sizeof(struct nd_opt_hdr); + break; +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c +index b5e12be73f2b..e27acccc3678 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.c ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c +@@ -610,6 +610,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, + hif_dev->remain_skb = nskb; + spin_unlock(&hif_dev->rx_lock); + } else { ++ if (pool_index == MAX_PKT_NUM_IN_TRANSFER) { ++ dev_err(&hif_dev->udev->dev, ++ "ath9k_htc: over RX MAX_PKT_NUM\n"); ++ goto err; ++ } + nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); + if (!nskb) { + dev_err(&hif_dev->udev->dev, +@@ -636,9 +641,9 @@ err: + + static void ath9k_hif_usb_rx_cb(struct urb *urb) + { +- struct sk_buff *skb = (struct sk_buff *) urb->context; +- struct hif_device_usb *hif_dev = +- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); ++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context; ++ struct hif_device_usb *hif_dev = rx_buf->hif_dev; ++ struct sk_buff *skb = rx_buf->skb; + int ret; + + if (!skb) +@@ -678,14 +683,15 @@ resubmit: + return; + free: + kfree_skb(skb); ++ kfree(rx_buf); + } + + static void ath9k_hif_usb_reg_in_cb(struct urb *urb) + { +- struct sk_buff *skb = (struct sk_buff *) urb->context; ++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context; ++ struct hif_device_usb *hif_dev = rx_buf->hif_dev; ++ struct sk_buff *skb = rx_buf->skb; + struct sk_buff *nskb; +- struct hif_device_usb *hif_dev = +- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + int ret; + + if (!skb) +@@ -743,6 +749,7 @@ resubmit: + return; + free: + kfree_skb(skb); ++ kfree(rx_buf); + urb->context = NULL; + } + +@@ -788,7 +795,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) + init_usb_anchor(&hif_dev->mgmt_submitted); + + for (i = 0; i < MAX_TX_URB_NUM; i++) { +- tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL); ++ tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL); + if (!tx_buf) + goto err; + +@@ -825,8 +832,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev) + + static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + { +- struct urb *urb = NULL; ++ struct rx_buf *rx_buf = NULL; + struct sk_buff *skb = NULL; ++ struct urb *urb = NULL; + int i, ret; + + init_usb_anchor(&hif_dev->rx_submitted); +@@ -834,6 +842,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + + for (i = 0; i < MAX_RX_URB_NUM; i++) { + ++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); ++ if (!rx_buf) { ++ ret = -ENOMEM; ++ goto err_rxb; ++ } ++ + /* Allocate URB */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb == NULL) { +@@ -848,11 +862,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + goto err_skb; + } + ++ rx_buf->hif_dev = hif_dev; ++ rx_buf->skb = skb; ++ + usb_fill_bulk_urb(urb, hif_dev->udev, + usb_rcvbulkpipe(hif_dev->udev, + USB_WLAN_RX_PIPE), + skb->data, MAX_RX_BUF_SIZE, +- ath9k_hif_usb_rx_cb, skb); ++ ath9k_hif_usb_rx_cb, rx_buf); + + /* Anchor URB */ + usb_anchor_urb(urb, &hif_dev->rx_submitted); +@@ -878,6 +895,8 @@ err_submit: + err_skb: + usb_free_urb(urb); + err_urb: ++ kfree(rx_buf); ++err_rxb: + ath9k_hif_usb_dealloc_rx_urbs(hif_dev); + return ret; + } +@@ -889,14 +908,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev) + + static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) + { +- struct urb *urb = NULL; ++ struct rx_buf *rx_buf = NULL; + struct sk_buff *skb = NULL; ++ struct urb *urb = NULL; + int i, ret; + + init_usb_anchor(&hif_dev->reg_in_submitted); + + for (i = 0; i < MAX_REG_IN_URB_NUM; i++) { + ++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); ++ if (!rx_buf) { ++ ret = -ENOMEM; ++ goto err_rxb; ++ } ++ + /* Allocate URB */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb == NULL) { +@@ -911,11 +937,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) + goto err_skb; + } + ++ rx_buf->hif_dev = hif_dev; ++ rx_buf->skb = skb; ++ + usb_fill_int_urb(urb, hif_dev->udev, + usb_rcvintpipe(hif_dev->udev, + USB_REG_IN_PIPE), + skb->data, MAX_REG_IN_BUF_SIZE, +- ath9k_hif_usb_reg_in_cb, skb, 1); ++ ath9k_hif_usb_reg_in_cb, rx_buf, 1); + + /* Anchor URB */ + usb_anchor_urb(urb, &hif_dev->reg_in_submitted); +@@ -941,6 +970,8 @@ err_submit: + err_skb: + usb_free_urb(urb); + err_urb: ++ kfree(rx_buf); ++err_rxb: + ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); + return ret; + } +@@ -971,7 +1002,7 @@ err: + return -ENOMEM; + } + +-static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) ++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) + { + usb_kill_anchored_urbs(&hif_dev->regout_submitted); + ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); +@@ -1338,8 +1369,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) + + if (hif_dev->flags & HIF_USB_READY) { + ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); +- ath9k_htc_hw_free(hif_dev->htc_handle); + ath9k_hif_usb_dev_deinit(hif_dev); ++ ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv); ++ ath9k_htc_hw_free(hif_dev->htc_handle); + } + + usb_set_intfdata(interface, NULL); +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h +index 7c2ef7ecd98b..835264c36595 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.h ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.h +@@ -84,6 +84,11 @@ struct tx_buf { + struct list_head list; + }; + ++struct rx_buf { ++ struct sk_buff *skb; ++ struct hif_device_usb *hif_dev; ++}; ++ + #define HIF_USB_TX_STOP BIT(0) + #define HIF_USB_TX_FLUSH BIT(1) + +@@ -131,5 +136,6 @@ struct hif_device_usb { + + int ath9k_hif_usb_init(void); + void ath9k_hif_usb_exit(void); ++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev); + + #endif /* HTC_USB_H */ +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c +index b65c1b661ade..15a0036dcc6e 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c +@@ -931,8 +931,9 @@ err_init: + int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, + u16 devid, char *product, u32 drv_info) + { +- struct ieee80211_hw *hw; ++ struct hif_device_usb *hif_dev; + struct ath9k_htc_priv *priv; ++ struct ieee80211_hw *hw; + int ret; + + hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops); +@@ -967,7 +968,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, + return 0; + + err_init: +- ath9k_deinit_wmi(priv); ++ ath9k_stop_wmi(priv); ++ hif_dev = (struct hif_device_usb *)htc_handle->hif_dev; ++ ath9k_hif_usb_dealloc_urbs(hif_dev); ++ ath9k_destoy_wmi(priv); + err_free: + ieee80211_free_hw(hw); + return ret; +@@ -982,7 +986,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) + htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED; + + ath9k_deinit_device(htc_handle->drv_priv); +- ath9k_deinit_wmi(htc_handle->drv_priv); ++ ath9k_stop_wmi(htc_handle->drv_priv); + ieee80211_free_hw(htc_handle->drv_priv->hw); + } + } +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +index 52b42ecee621..2eb169b204f8 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +@@ -998,9 +998,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, + * which are not PHY_ERROR (short radar pulses have a length of 3) + */ + if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { +- ath_warn(common, +- "Short RX data len, dropping (dlen: %d)\n", +- rs_datalen); ++ ath_dbg(common, ANY, ++ "Short RX data len, dropping (dlen: %d)\n", ++ rs_datalen); + goto rx_next; + } + +diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c +index fd85f996c554..257b6ee51e54 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_hst.c ++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c +@@ -114,6 +114,9 @@ static void htc_process_conn_rsp(struct htc_target *target, + + if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { + epid = svc_rspmsg->endpoint_id; ++ if (epid < 0 || epid >= ENDPOINT_MAX) ++ return; ++ + service_id = be16_to_cpu(svc_rspmsg->service_id); + max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); + endpoint = &target->endpoint[epid]; +diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c +index 9c16e2a6d185..8f14897ae5a3 100644 +--- a/drivers/net/wireless/ath/ath9k/wmi.c ++++ b/drivers/net/wireless/ath/ath9k/wmi.c +@@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) + return wmi; + } + +-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv) ++void ath9k_stop_wmi(struct ath9k_htc_priv *priv) + { + struct wmi *wmi = priv->wmi; + + mutex_lock(&wmi->op_mutex); + wmi->stopped = true; + mutex_unlock(&wmi->op_mutex); ++} + ++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv) ++{ + kfree(priv->wmi); + } + +diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h +index 380175d5ecd7..d8b912206232 100644 +--- a/drivers/net/wireless/ath/ath9k/wmi.h ++++ b/drivers/net/wireless/ath/ath9k/wmi.h +@@ -179,7 +179,6 @@ struct wmi { + }; + + struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv); +-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv); + int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, + enum htc_endpoint_id *wmi_ctrl_epid); + int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, +@@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, + void ath9k_wmi_event_tasklet(unsigned long data); + void ath9k_fatal_work(struct work_struct *work); + void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv); ++void ath9k_stop_wmi(struct ath9k_htc_priv *priv); ++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv); + + #define WMI_CMD(_wmi_cmd) \ + do { \ +diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c +index 88045f93a76c..62ed0977f32c 100644 +--- a/drivers/net/wireless/ath/carl9170/fw.c ++++ b/drivers/net/wireless/ath/carl9170/fw.c +@@ -351,9 +351,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) + ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + + if (SUPP(CARL9170FW_WLANTX_CAB)) { +- if_comb_types |= +- BIT(NL80211_IFTYPE_AP) | +- BIT(NL80211_IFTYPE_P2P_GO); ++ if_comb_types |= BIT(NL80211_IFTYPE_AP); + + #ifdef CONFIG_MAC80211_MESH + if_comb_types |= +diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c +index ffb22a04beeb..202f421e0e37 100644 +--- a/drivers/net/wireless/ath/carl9170/main.c ++++ b/drivers/net/wireless/ath/carl9170/main.c +@@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar, + ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && + (vif->type != NL80211_IFTYPE_AP)); + +- /* While the driver supports HW offload in a single +- * P2P client configuration, it doesn't support HW +- * offload in the favourit, concurrent P2P GO+CLIENT +- * configuration. Hence, HW offload will always be +- * disabled for P2P. ++ /* The driver used to have P2P GO+CLIENT support, ++ * but since this was dropped and we don't know if ++ * there are any gremlins lurking in the shadows, ++ * so best we keep HW offload disabled for P2P. + */ + ar->disable_offload |= vif->p2p; + +@@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, + if (vif->type == NL80211_IFTYPE_STATION) + break; + +- /* P2P GO [master] use-case +- * Because the P2P GO station is selected dynamically +- * by all participating peers of a WIFI Direct network, +- * the driver has be able to change the main interface +- * operating mode on the fly. +- */ +- if (main_vif->p2p && vif->p2p && +- vif->type == NL80211_IFTYPE_AP) { +- old_main = main_vif; +- break; +- } +- + err = -EBUSY; + rcu_read_unlock(); + +diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c +index a635fc6b1722..e57a50cc1d87 100644 +--- a/drivers/net/wireless/broadcom/b43/main.c ++++ b/drivers/net/wireless/broadcom/b43/main.c +@@ -5596,7 +5596,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) + /* fill hw info */ + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); +- ++ ieee80211_hw_set(hw, MFP_CAPABLE); + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | +diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c +index 9da8bd792702..fe658a9b53cb 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/main.c ++++ b/drivers/net/wireless/broadcom/b43legacy/main.c +@@ -3835,6 +3835,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev) + /* fill hw info */ + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); ++ ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */ + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | +diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c +index 35ccf400b02c..87045e30e585 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c ++++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c +@@ -571,6 +571,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev, + default: + b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", + chanstat); ++ goto drop; + } + + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index 94901b0041ce..c597af69f48f 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -1446,7 +1446,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) + { + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); +- static struct mwifiex_sta_node *node; ++ struct mwifiex_sta_node *node; ++ int i; + + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && + priv->media_connected && idx == 0) { +@@ -1456,13 +1457,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST, + HostCmd_ACT_GEN_GET, 0, NULL, true); + +- if (node && (&node->list == &priv->sta_list)) { +- node = NULL; +- return -ENOENT; +- } +- +- node = list_prepare_entry(node, &priv->sta_list, list); +- list_for_each_entry_continue(node, &priv->sta_list, list) { ++ i = 0; ++ list_for_each_entry(node, &priv->sta_list, list) { ++ if (i++ != idx) ++ continue; + ether_addr_copy(mac, node->mac_addr); + return mwifiex_dump_station_info(priv, node, sinfo); + } +diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c +index 1f02461de261..93b22a5b6878 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/usb.c ++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c +@@ -927,10 +927,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, + + WARN_ON(NULL == skb); + _urb = usb_alloc_urb(0, GFP_ATOMIC); +- if (!_urb) { +- kfree_skb(skb); ++ if (!_urb) + return NULL; +- } + _rtl_install_trx_info(rtlusb, skb, ep_num); + usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, + ep_num), skb->data, skb->len, _rtl_tx_complete, skb); +@@ -945,7 +943,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + u32 ep_num; + struct urb *_urb = NULL; +- struct sk_buff *_skb = NULL; + + WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); + if (unlikely(IS_USB_STOP(rtlusb))) { +@@ -955,8 +952,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, + return; + } + ep_num = rtlusb->ep_map.ep_mapping[qnum]; +- _skb = skb; +- _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); ++ _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num); + if (unlikely(!_urb)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Can't allocate urb. Drop skb!\n"); +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 16611cf3aba4..19658873b4c1 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -1251,7 +1251,7 @@ int pci_setup_device(struct pci_dev *dev) + /* device class may be changed after fixup */ + class = dev->class >> 8; + +- if (dev->non_compliant_bars) { ++ if (dev->non_compliant_bars && !dev->mmio_always_on) { + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { + dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); +@@ -1360,13 +1360,33 @@ static void pci_configure_mps(struct pci_dev *dev) + struct pci_dev *bridge = pci_upstream_bridge(dev); + int mps, p_mps, rc; + +- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) ++ if (!pci_is_pcie(dev)) + return; + + /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ + if (dev->is_virtfn) + return; + ++ /* ++ * For Root Complex Integrated Endpoints, program the maximum ++ * supported value unless limited by the PCIE_BUS_PEER2PEER case. ++ */ ++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { ++ if (pcie_bus_config == PCIE_BUS_PEER2PEER) ++ mps = 128; ++ else ++ mps = 128 << dev->pcie_mpss; ++ rc = pcie_set_mps(dev, mps); ++ if (rc) { ++ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", ++ mps); ++ } ++ return; ++ } ++ ++ if (!bridge || !pci_is_pcie(bridge)) ++ return; ++ + mps = pcie_get_mps(dev); + p_mps = pcie_get_mps(bridge); + +diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c +index e8aee6d88a40..6a23136bc813 100644 +--- a/drivers/pinctrl/samsung/pinctrl-exynos.c ++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c +@@ -289,6 +289,7 @@ struct exynos_eint_gpio_save { + u32 eint_con; + u32 eint_fltcon0; + u32 eint_fltcon1; ++ u32 eint_mask; + }; + + /* +@@ -585,10 +586,13 @@ static void exynos_pinctrl_suspend_bank( + + 2 * bank->eint_offset); + save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); ++ save->eint_mask = readl(regs + bank->irq_chip->eint_mask ++ + bank->eint_offset); + + pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); + pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); + pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); ++ pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); + } + + static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) +@@ -617,6 +621,9 @@ static void exynos_pinctrl_resume_bank( + pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4), save->eint_fltcon1); ++ pr_debug("%s: mask %#010x => %#010x\n", bank->name, ++ readl(regs + bank->irq_chip->eint_mask ++ + bank->eint_offset), save->eint_mask); + + writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); +@@ -624,6 +631,8 @@ static void exynos_pinctrl_resume_bank( + + 2 * bank->eint_offset); + writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); ++ writel(save->eint_mask, regs + bank->irq_chip->eint_mask ++ + bank->eint_offset); + } + + static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) +diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c +index e9e749f87517..8fb43c4438e6 100644 +--- a/drivers/power/reset/vexpress-poweroff.c ++++ b/drivers/power/reset/vexpress-poweroff.c +@@ -150,6 +150,7 @@ static struct platform_driver vexpress_reset_driver = { + .driver = { + .name = "vexpress-reset", + .of_match_table = vexpress_reset_of_match, ++ .suppress_bind_attrs = true, + }, + }; + +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index c7b770075caa..80341863caa5 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -1029,10 +1029,10 @@ int scsi_init_io(struct scsi_cmnd *cmd) + struct scsi_device *sdev = cmd->device; + struct request *rq = cmd->request; + bool is_mq = (rq->mq_ctx != NULL); +- int error; ++ int error = BLKPREP_KILL; + + if (WARN_ON_ONCE(!rq->nr_phys_segments)) +- return -EINVAL; ++ goto err_exit; + + error = scsi_init_sgtable(rq, &cmd->sdb); + if (error) +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c +index 63231760facc..1906b2319e5b 100644 +--- a/drivers/spi/spi-bcm-qspi.c ++++ b/drivers/spi/spi-bcm-qspi.c +@@ -698,7 +698,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) + if (buf) + buf[tp.byte] = read_rxram_slot_u8(qspi, slot); + dev_dbg(&qspi->pdev->dev, "RD %02x\n", +- buf ? buf[tp.byte] : 0xff); ++ buf ? buf[tp.byte] : 0x0); + } else { + u16 *buf = tp.trans->rx_buf; + +@@ -706,7 +706,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) + buf[tp.byte / 2] = read_rxram_slot_u16(qspi, + slot); + dev_dbg(&qspi->pdev->dev, "RD %04x\n", +- buf ? buf[tp.byte] : 0xffff); ++ buf ? buf[tp.byte / 2] : 0x0); + } + + update_qspi_trans_byte_count(qspi, &tp, +@@ -761,13 +761,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) + while (!tstatus && slot < MSPI_NUM_CDRAM) { + if (tp.trans->bits_per_word <= 8) { + const u8 *buf = tp.trans->tx_buf; +- u8 val = buf ? buf[tp.byte] : 0xff; ++ u8 val = buf ? buf[tp.byte] : 0x00; + + write_txram_slot_u8(qspi, slot, val); + dev_dbg(&qspi->pdev->dev, "WR %02x\n", val); + } else { + const u16 *buf = tp.trans->tx_buf; +- u16 val = buf ? buf[tp.byte / 2] : 0xffff; ++ u16 val = buf ? buf[tp.byte / 2] : 0x0000; + + write_txram_slot_u16(qspi, slot, val); + dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); +diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c +index eab27d41ba83..df6abc75bc16 100644 +--- a/drivers/spi/spi-bcm2835.c ++++ b/drivers/spi/spi-bcm2835.c +@@ -793,7 +793,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) + goto out_clk_disable; + } + +- err = devm_spi_register_master(&pdev->dev, master); ++ err = spi_register_master(master); + if (err) { + dev_err(&pdev->dev, "could not register SPI master: %d\n", err); + goto out_clk_disable; +@@ -813,6 +813,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev) + struct spi_master *master = platform_get_drvdata(pdev); + struct bcm2835_spi *bs = spi_master_get_devdata(master); + ++ spi_unregister_master(master); ++ + /* Clear FIFOs, and disable the HW block */ + bcm2835_wr(bs, BCM2835_SPI_CS, + BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); +diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c +index e075712c501e..b7f78e6d9bec 100644 +--- a/drivers/spi/spi-bcm2835aux.c ++++ b/drivers/spi/spi-bcm2835aux.c +@@ -485,7 +485,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) + goto out_clk_disable; + } + +- err = devm_spi_register_master(&pdev->dev, master); ++ err = spi_register_master(master); + if (err) { + dev_err(&pdev->dev, "could not register SPI master: %d\n", err); + goto out_clk_disable; +@@ -505,6 +505,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev) + struct spi_master *master = platform_get_drvdata(pdev); + struct bcm2835aux_spi *bs = spi_master_get_devdata(master); + ++ spi_unregister_master(master); ++ + bcm2835aux_spi_reset_hw(bs); + + /* disable the HW block by releasing the clock */ +diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c +index e31971f91475..c079ab36275f 100644 +--- a/drivers/spi/spi-dw-mid.c ++++ b/drivers/spi/spi-dw-mid.c +@@ -155,6 +155,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, + if (!xfer->tx_buf) + return NULL; + ++ memset(&txconf, 0, sizeof(txconf)); + txconf.direction = DMA_MEM_TO_DEV; + txconf.dst_addr = dws->dma_addr; + txconf.dst_maxburst = 16; +@@ -201,6 +202,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + if (!xfer->rx_buf) + return NULL; + ++ memset(&rxconf, 0, sizeof(rxconf)); + rxconf.direction = DMA_DEV_TO_MEM; + rxconf.src_addr = dws->dma_addr; + rxconf.src_maxburst = 16; +@@ -226,19 +228,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + + static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) + { +- u16 dma_ctrl = 0; ++ u16 imr = 0, dma_ctrl = 0; + + dw_writel(dws, DW_SPI_DMARDLR, 0xf); + dw_writel(dws, DW_SPI_DMATDLR, 0x10); + +- if (xfer->tx_buf) ++ if (xfer->tx_buf) { + dma_ctrl |= SPI_DMA_TDMAE; +- if (xfer->rx_buf) ++ imr |= SPI_INT_TXOI; ++ } ++ if (xfer->rx_buf) { + dma_ctrl |= SPI_DMA_RDMAE; ++ imr |= SPI_INT_RXUI | SPI_INT_RXOI; ++ } + dw_writel(dws, DW_SPI_DMACR, dma_ctrl); + + /* Set the interrupt mask */ +- spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); ++ spi_umask_intr(dws, imr); + + dws->transfer_handler = dma_transfer; + +@@ -268,7 +274,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) + dma_async_issue_pending(dws->txchan); + } + +- return 0; ++ return 1; + } + + static void mid_spi_dma_stop(struct dw_spi *dws) +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index babf0a337e96..91f44e3e1930 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -384,11 +384,8 @@ static int dw_spi_transfer_one(struct spi_master *master, + + spi_enable_chip(dws, 1); + +- if (dws->dma_mapped) { +- ret = dws->dma_ops->dma_transfer(dws, transfer); +- if (ret < 0) +- return ret; +- } ++ if (dws->dma_mapped) ++ return dws->dma_ops->dma_transfer(dws, transfer); + + if (chip->poll_mode) + return poll_transfer(dws); +@@ -500,6 +497,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) + snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num); + spin_lock_init(&dws->buf_lock); + ++ spi_master_set_devdata(master, dws); ++ + ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master); + if (ret < 0) { + dev_err(dev, "can not get IRQ\n"); +@@ -531,8 +530,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) + } + } + +- spi_master_set_devdata(master, dws); +- ret = devm_spi_register_master(dev, master); ++ ret = spi_register_master(master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_dma_exit; +@@ -556,6 +554,8 @@ void dw_spi_remove_host(struct dw_spi *dws) + { + dw_spi_debugfs_remove(dws); + ++ spi_unregister_master(dws->master); ++ + if (dws->dma_ops && dws->dma_ops->dma_exit) + dws->dma_ops->dma_exit(dws); + +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index 2f84d7653afd..da3834fe5e57 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -1774,7 +1774,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); +- status = devm_spi_register_master(&pdev->dev, master); ++ status = spi_register_master(master); + if (status != 0) { + dev_err(&pdev->dev, "problem registering spi master\n"); + goto out_error_clock_enabled; +@@ -1804,6 +1804,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) + + pm_runtime_get_sync(&pdev->dev); + ++ spi_unregister_master(drv_data->master); ++ + /* Disable the SSP at the peripheral and SOC level */ + pxa2xx_spi_write(drv_data, SSCR0, 0); + clk_disable_unprepare(ssp->clk); +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index d74d341f9890..3fadc564d781 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -2025,18 +2025,17 @@ static int __unregister(struct device *dev, void *null) + */ + void spi_unregister_master(struct spi_master *master) + { +- int dummy; +- + if (master->queued) { + if (spi_destroy_queue(master)) + dev_err(&master->dev, "queue remove failed\n"); + } + ++ device_for_each_child(&master->dev, NULL, __unregister); ++ + mutex_lock(&board_lock); + list_del(&master->list); + mutex_unlock(&board_lock); + +- dummy = device_for_each_child(&master->dev, NULL, __unregister); + device_unregister(&master->dev); + } + EXPORT_SYMBOL_GPL(spi_unregister_master); +diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c +index c2a7cb95725b..4fc5de13582d 100644 +--- a/drivers/staging/android/ion/ion_heap.c ++++ b/drivers/staging/android/ion/ion_heap.c +@@ -105,12 +105,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + + static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) + { +- void *addr = vm_map_ram(pages, num, -1, pgprot); ++ void *addr = vmap(pages, num, VM_MAP, pgprot); + + if (!addr) + return -ENOMEM; + memset(addr, 0, PAGE_SIZE * num); +- vm_unmap_ram(addr, num); ++ vunmap(addr); + + return 0; + } +diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c +index 5649ef1e379d..82a1c2cf6687 100644 +--- a/drivers/staging/greybus/sdio.c ++++ b/drivers/staging/greybus/sdio.c +@@ -413,6 +413,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) + struct gb_sdio_command_request request = {0}; + struct gb_sdio_command_response response; + struct mmc_data *data = host->mrq->data; ++ unsigned int timeout_ms; + u8 cmd_flags; + u8 cmd_type; + int i; +@@ -471,9 +472,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) + request.data_blksz = cpu_to_le16(data->blksz); + } + +- ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND, +- &request, sizeof(request), &response, +- sizeof(response)); ++ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : ++ GB_OPERATION_TIMEOUT_DEFAULT; ++ ++ ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND, ++ &request, sizeof(request), &response, ++ sizeof(response), timeout_ms); + if (ret < 0) + goto out; + +diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c +index 10951c82f6ed..7bd4c27cfb14 100644 +--- a/drivers/video/fbdev/w100fb.c ++++ b/drivers/video/fbdev/w100fb.c +@@ -583,6 +583,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) + memsize=par->mach->mem->size; + memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize); + vfree(par->saved_extmem); ++ par->saved_extmem = NULL; + } + if (par->saved_intmem) { + memsize=MEM_INT_SIZE; +@@ -591,6 +592,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) + else + memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize); + vfree(par->saved_intmem); ++ par->saved_intmem = NULL; + } + } + +diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c +index 86637fec4eaa..6bc6823f81fa 100644 +--- a/drivers/w1/masters/omap_hdq.c ++++ b/drivers/w1/masters/omap_hdq.c +@@ -204,7 +204,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) + /* check irqstatus */ + if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { + dev_dbg(hdq_data->dev, "timeout waiting for" +- " TXCOMPLETE/RXCOMPLETE, %x", *status); ++ " TXCOMPLETE/RXCOMPLETE, %x\n", *status); + ret = -ETIMEDOUT; + goto out; + } +@@ -215,7 +215,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) + OMAP_HDQ_FLAG_CLEAR, &tmp_status); + if (ret) { + dev_dbg(hdq_data->dev, "timeout waiting GO bit" +- " return to zero, %x", tmp_status); ++ " return to zero, %x\n", tmp_status); + } + + out: +@@ -231,7 +231,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq) + spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); + hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); + spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); +- dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); ++ dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus); + + if (hdq_data->hdq_irqstatus & + (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE +@@ -339,7 +339,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) + tmp_status = hdq_data->hdq_irqstatus; + /* check irqstatus */ + if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { +- dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", ++ dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n", + tmp_status); + ret = -ETIMEDOUT; + goto out; +@@ -366,7 +366,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) + &tmp_status); + if (ret) + dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" +- " return to zero, %x", tmp_status); ++ " return to zero, %x\n", tmp_status); + + out: + mutex_unlock(&hdq_data->hdq_mutex); +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c +index d0d571c47d33..4f919628137c 100644 +--- a/fs/btrfs/file-item.c ++++ b/fs/btrfs/file-item.c +@@ -779,10 +779,12 @@ again: + nritems = btrfs_header_nritems(path->nodes[0]); + if (!nritems || (path->slots[0] >= nritems - 1)) { + ret = btrfs_next_leaf(root, path); +- if (ret == 1) ++ if (ret < 0) { ++ goto out; ++ } else if (ret > 0) { + found_next = 1; +- if (ret != 0) + goto insert; ++ } + slot = path->slots[0]; + } + btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 250c8403ec67..c425443c31fe 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -8494,7 +8494,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, + bio->bi_private = dip; + bio->bi_end_io = btrfs_end_dio_bio; + btrfs_io_bio(bio)->logical = file_offset; +- atomic_inc(&dip->pending_bios); + + while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { + nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len); +@@ -8560,7 +8559,8 @@ submit: + if (!ret) + return 0; + +- bio_put(bio); ++ if (bio != orig_bio) ++ bio_put(bio); + out_err: + dip->errors = 1; + /* +@@ -8607,7 +8607,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, + io_bio->bi_private = dip; + dip->orig_bio = io_bio; + dip->dio_bio = dio_bio; +- atomic_set(&dip->pending_bios, 0); ++ atomic_set(&dip->pending_bios, 1); + btrfs_bio = btrfs_io_bio(io_bio); + btrfs_bio->logical = file_offset; + +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index edfc7ba38b33..080b12d65b0c 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -35,6 +35,7 @@ + #include "btrfs_inode.h" + #include "transaction.h" + #include "compression.h" ++#include "xattr.h" + + /* + * Maximum number of references an extent can have in order for us to attempt to +@@ -4368,6 +4369,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, + struct fs_path *p; + struct posix_acl_xattr_header dummy_acl; + ++ /* Capabilities are emitted by finish_inode_if_needed */ ++ if (!strncmp(name, XATTR_NAME_CAPS, name_len)) ++ return 0; ++ + p = fs_path_alloc(); + if (!p) + return -ENOMEM; +@@ -4904,6 +4909,64 @@ static int send_extent_data(struct send_ctx *sctx, + return 0; + } + ++/* ++ * Search for a capability xattr related to sctx->cur_ino. If the capability is ++ * found, call send_set_xattr function to emit it. ++ * ++ * Return 0 if there isn't a capability, or when the capability was emitted ++ * successfully, or < 0 if an error occurred. ++ */ ++static int send_capabilities(struct send_ctx *sctx) ++{ ++ struct fs_path *fspath = NULL; ++ struct btrfs_path *path; ++ struct btrfs_dir_item *di; ++ struct extent_buffer *leaf; ++ unsigned long data_ptr; ++ char *buf = NULL; ++ int buf_len; ++ int ret = 0; ++ ++ path = alloc_path_for_send(); ++ if (!path) ++ return -ENOMEM; ++ ++ di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, ++ XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); ++ if (!di) { ++ /* There is no xattr for this inode */ ++ goto out; ++ } else if (IS_ERR(di)) { ++ ret = PTR_ERR(di); ++ goto out; ++ } ++ ++ leaf = path->nodes[0]; ++ buf_len = btrfs_dir_data_len(leaf, di); ++ ++ fspath = fs_path_alloc(); ++ buf = kmalloc(buf_len, GFP_KERNEL); ++ if (!fspath || !buf) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); ++ if (ret < 0) ++ goto out; ++ ++ data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); ++ read_extent_buffer(leaf, buf, data_ptr, buf_len); ++ ++ ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, ++ strlen(XATTR_NAME_CAPS), buf, buf_len); ++out: ++ kfree(buf); ++ fs_path_free(fspath); ++ btrfs_free_path(path); ++ return ret; ++} ++ + static int clone_range(struct send_ctx *sctx, + struct clone_root *clone_root, + const u64 disk_byte, +@@ -5615,6 +5678,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) + goto out; + } + ++ ret = send_capabilities(sctx); ++ if (ret < 0) ++ goto out; ++ + /* + * If other directory inodes depended on our current directory + * inode's move/rename, now do their move/rename operations. +diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h +index a284fb28944b..63291c265aa0 100644 +--- a/fs/ext4/ext4_extents.h ++++ b/fs/ext4/ext4_extents.h +@@ -169,10 +169,13 @@ struct ext4_ext_path { + (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) + #define EXT_LAST_INDEX(__hdr__) \ + (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) +-#define EXT_MAX_EXTENT(__hdr__) \ +- (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++#define EXT_MAX_EXTENT(__hdr__) \ ++ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ++ ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ ++ : 0) + #define EXT_MAX_INDEX(__hdr__) \ +- (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ++ ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0) + + static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode) + { +diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c +index 88effb1053c7..6dc0b89c7b55 100644 +--- a/fs/ext4/fsync.c ++++ b/fs/ext4/fsync.c +@@ -43,30 +43,28 @@ + */ + static int ext4_sync_parent(struct inode *inode) + { +- struct dentry *dentry = NULL; +- struct inode *next; ++ struct dentry *dentry, *next; + int ret = 0; + + if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) + return 0; +- inode = igrab(inode); ++ dentry = d_find_any_alias(inode); ++ if (!dentry) ++ return 0; + while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { + ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); +- dentry = d_find_any_alias(inode); +- if (!dentry) +- break; +- next = igrab(d_inode(dentry->d_parent)); ++ ++ next = dget_parent(dentry); + dput(dentry); +- if (!next) +- break; +- iput(inode); +- inode = next; ++ dentry = next; ++ inode = dentry->d_inode; ++ + /* + * The directory inode may have gone through rmdir by now. But + * the inode itself and its blocks are still allocated (we hold +- * a reference to the inode so it didn't go through +- * ext4_evict_inode()) and so we are safe to flush metadata +- * blocks and the inode. ++ * a reference to the inode via its dentry), so it didn't go ++ * through ext4_evict_inode()) and so we are safe to flush ++ * metadata blocks and the inode. + */ + ret = sync_mapping_buffers(inode->i_mapping); + if (ret) +@@ -75,7 +73,7 @@ static int ext4_sync_parent(struct inode *inode) + if (ret) + break; + } +- iput(inode); ++ dput(dentry); + return ret; + } + +diff --git a/fs/fat/inode.c b/fs/fat/inode.c +index f0387d040331..9af410142f78 100644 +--- a/fs/fat/inode.c ++++ b/fs/fat/inode.c +@@ -1512,6 +1512,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b, + goto out; + } + ++ if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) { ++ if (!silent) ++ fat_msg(sb, KERN_ERR, "bogus number of FAT sectors"); ++ goto out; ++ } ++ + error = 0; + + out: +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index 882e9d6830df..5a1a6dbbc55f 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -269,6 +269,7 @@ void __inode_attach_wb(struct inode *inode, struct page *page) + if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) + wb_put(wb); + } ++EXPORT_SYMBOL_GPL(__inode_attach_wb); + + /** + * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index 36362d4bc344..a92af0ed0e28 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -2793,6 +2793,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) + if (!nilfs->ns_writer) + return -ENOMEM; + ++ inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); ++ + err = nilfs_segctor_start_thread(nilfs->ns_writer); + if (err) { + kfree(nilfs->ns_writer); +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c +index 36795eed40b0..299dbf59f28f 100644 +--- a/fs/overlayfs/copy_up.c ++++ b/fs/overlayfs/copy_up.c +@@ -56,7 +56,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) + { + ssize_t list_size, size, value_size = 0; + char *buf, *name, *value = NULL; +- int uninitialized_var(error); ++ int error = 0; + size_t slen; + + if (!(old->d_inode->i_opflags & IOP_XATTR) || +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index c2afe39f0b9e..a28934912530 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -417,7 +417,7 @@ const struct inode_operations proc_link_inode_operations = { + + struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) + { +- struct inode *inode = new_inode_pseudo(sb); ++ struct inode *inode = new_inode(sb); + + if (inode) { + inode->i_ino = de->low_ino; +diff --git a/fs/proc/self.c b/fs/proc/self.c +index 40245954c450..c8bbc1c84a39 100644 +--- a/fs/proc/self.c ++++ b/fs/proc/self.c +@@ -53,7 +53,7 @@ int proc_setup_self(struct super_block *s) + inode_lock(root_inode); + self = d_alloc_name(s->s_root, "self"); + if (self) { +- struct inode *inode = new_inode_pseudo(s); ++ struct inode *inode = new_inode(s); + if (inode) { + inode->i_ino = self_inum; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); +diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c +index 595b90a9766c..02d1db8e9723 100644 +--- a/fs/proc/thread_self.c ++++ b/fs/proc/thread_self.c +@@ -55,7 +55,7 @@ int proc_setup_thread_self(struct super_block *s) + inode_lock(root_inode); + thread_self = d_alloc_name(s->s_root, "thread-self"); + if (thread_self) { +- struct inode *inode = new_inode_pseudo(s); ++ struct inode *inode = new_inode(s); + if (inode) { + inode->i_ino = thread_self_inum; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); +diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h +index e465bb15912d..6be5545d3584 100644 +--- a/include/linux/kgdb.h ++++ b/include/linux/kgdb.h +@@ -317,7 +317,7 @@ extern void gdbstub_exit(int status); + extern int kgdb_single_step; + extern atomic_t kgdb_active; + #define in_dbg_master() \ +- (raw_smp_processor_id() == atomic_read(&kgdb_active)) ++ (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) + extern bool dbg_is_early; + extern void __init dbg_late_init(void); + #else /* ! CONFIG_KGDB */ +diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h +index 68ec78c1aa48..bd8480ae82e9 100644 +--- a/include/linux/sunrpc/gss_api.h ++++ b/include/linux/sunrpc/gss_api.h +@@ -82,6 +82,7 @@ struct pf_desc { + u32 service; + char *name; + char *auth_domain_name; ++ struct auth_domain *domain; + bool datatouch; + }; + +diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h +index 726aff1a5201..213fa12f56fc 100644 +--- a/include/linux/sunrpc/svcauth_gss.h ++++ b/include/linux/sunrpc/svcauth_gss.h +@@ -20,7 +20,8 @@ int gss_svc_init(void); + void gss_svc_shutdown(void); + int gss_svc_init_net(struct net *net); + void gss_svc_shutdown_net(struct net *net); +-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); ++struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, ++ char *name); + u32 svcauth_gss_flavor(struct auth_domain *dom); + + #endif /* __KERNEL__ */ +diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h +index 00a20cd21ee2..afc3972b0879 100644 +--- a/include/uapi/linux/dvb/frontend.h ++++ b/include/uapi/linux/dvb/frontend.h +@@ -127,6 +127,7 @@ enum fe_sec_mini_cmd { + * to reset DiSEqC, tone and parameters + */ + enum fe_status { ++ FE_NONE = 0x00, + FE_HAS_SIGNAL = 0x01, + FE_HAS_CARRIER = 0x02, + FE_HAS_VITERBI = 0x04, +diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h +index a0a365cbf3c9..0c02441d2cc9 100644 +--- a/include/uapi/linux/kvm.h ++++ b/include/uapi/linux/kvm.h +@@ -159,9 +159,11 @@ struct kvm_hyperv_exit { + #define KVM_EXIT_HYPERV_SYNIC 1 + #define KVM_EXIT_HYPERV_HCALL 2 + __u32 type; ++ __u32 pad1; + union { + struct { + __u32 msr; ++ __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; +diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c +index 009cc9a17d95..f1042d639eee 100644 +--- a/kernel/cpu_pm.c ++++ b/kernel/cpu_pm.c +@@ -97,7 +97,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); + */ + int cpu_pm_enter(void) + { +- int nr_calls; ++ int nr_calls = 0; + int ret = 0; + + read_lock(&cpu_pm_notifier_lock); +@@ -156,7 +156,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); + */ + int cpu_cluster_pm_enter(void) + { +- int nr_calls; ++ int nr_calls = 0; + int ret = 0; + + read_lock(&cpu_pm_notifier_lock); +diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c +index 79517e5549f1..9c939c6bf21c 100644 +--- a/kernel/debug/debug_core.c ++++ b/kernel/debug/debug_core.c +@@ -443,6 +443,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) + + if (exception_level > 1) { + dump_stack(); ++ kgdb_io_module_registered = false; + panic("Recursive entry to debugger"); + } + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 1f27b73bd7d4..b562467d2d49 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -90,11 +90,11 @@ static void remote_function(void *data) + * @info: the function call argument + * + * Calls the function @func when the task is currently running. This might +- * be on the current CPU, which just calls the function directly ++ * be on the current CPU, which just calls the function directly. This will ++ * retry due to any failures in smp_call_function_single(), such as if the ++ * task_cpu() goes offline concurrently. + * +- * returns: @func return value, or +- * -ESRCH - when the process isn't running +- * -EAGAIN - when the process moved away ++ * returns @func return value or -ESRCH when the process isn't running + */ + static int + task_function_call(struct task_struct *p, remote_function_f func, void *info) +@@ -107,11 +107,16 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) + }; + int ret; + +- do { +- ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); +- if (!ret) +- ret = data.ret; +- } while (ret == -EAGAIN); ++ for (;;) { ++ ret = smp_call_function_single(task_cpu(p), remote_function, ++ &data, 1); ++ ret = !ret ? data.ret : -EAGAIN; ++ ++ if (ret != -EAGAIN) ++ break; ++ ++ cond_resched(); ++ } + + return ret; + } +diff --git a/kernel/exit.c b/kernel/exit.c +index d9394fcd0e2c..27f4168eaeb1 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -739,8 +739,12 @@ void __noreturn do_exit(long code) + int group_dead; + TASKS_RCU(int tasks_rcu_i); + +- profile_task_exit(tsk); +- kcov_task_exit(tsk); ++ /* ++ * We can get here from a kernel oops, sometimes with preemption off. ++ * Start by checking for critical errors. ++ * Then fix up important state like USER_DS and preemption. ++ * Then do everything else. ++ */ + + WARN_ON(blk_needs_flush_plug(tsk)); + +@@ -758,6 +762,16 @@ void __noreturn do_exit(long code) + */ + set_fs(USER_DS); + ++ if (unlikely(in_atomic())) { ++ pr_info("note: %s[%d] exited with preempt_count %d\n", ++ current->comm, task_pid_nr(current), ++ preempt_count()); ++ preempt_count_set(PREEMPT_ENABLED); ++ } ++ ++ profile_task_exit(tsk); ++ kcov_task_exit(tsk); ++ + ptrace_event(PTRACE_EVENT_EXIT, code); + + validate_creds_for_do_exit(tsk); +@@ -794,13 +808,6 @@ void __noreturn do_exit(long code) + */ + raw_spin_unlock_wait(&tsk->pi_lock); + +- if (unlikely(in_atomic())) { +- pr_info("note: %s[%d] exited with preempt_count %d\n", +- current->comm, task_pid_nr(current), +- preempt_count()); +- preempt_count_set(PREEMPT_ENABLED); +- } +- + /* sync mm's RSS info before statistics gathering */ + if (tsk->mm) + sync_mm_rss(tsk->mm); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 5e65c7eea872..8233032a2f01 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -2542,7 +2542,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) + /* + * We don't care about NUMA placement if we don't have memory. + */ +- if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) ++ if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) + return; + + /* +diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h +index 8f383cca6bb1..623440d3d365 100644 +--- a/lib/mpi/longlong.h ++++ b/lib/mpi/longlong.h +@@ -671,7 +671,7 @@ do { \ + ************** MIPS/64 ************** + ***************************************/ + #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 +-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 ++#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) + /* + * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C + * code below, so we special case MIPS64r6 until the compiler can do better. +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 5fbd77d52602..cf4c0a61b370 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1755,6 +1755,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + spinlock_t *ptl; + struct mm_struct *mm = vma->vm_mm; + unsigned long haddr = address & HPAGE_PMD_MASK; ++ bool was_locked = false; ++ pmd_t _pmd; + + mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); + ptl = pmd_lock(mm, pmd); +@@ -1764,11 +1766,32 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + * pmd against. Otherwise we can end up replacing wrong page. + */ + VM_BUG_ON(freeze && !page); +- if (page && page != pmd_page(*pmd)) +- goto out; ++ if (page) { ++ VM_WARN_ON_ONCE(!PageLocked(page)); ++ was_locked = true; ++ if (page != pmd_page(*pmd)) ++ goto out; ++ } + ++repeat: + if (pmd_trans_huge(*pmd)) { +- page = pmd_page(*pmd); ++ if (!page) { ++ page = pmd_page(*pmd); ++ if (unlikely(!trylock_page(page))) { ++ get_page(page); ++ _pmd = *pmd; ++ spin_unlock(ptl); ++ lock_page(page); ++ spin_lock(ptl); ++ if (unlikely(!pmd_same(*pmd, _pmd))) { ++ unlock_page(page); ++ put_page(page); ++ page = NULL; ++ goto repeat; ++ } ++ put_page(page); ++ } ++ } + if (PageMlocked(page)) + clear_page_mlock(page); + } else if (!pmd_devmap(*pmd)) +@@ -1776,6 +1799,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + __split_huge_pmd_locked(vma, pmd, haddr, freeze); + out: + spin_unlock(ptl); ++ if (!was_locked && page) ++ unlock_page(page); + mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); + } + +diff --git a/mm/slub.c b/mm/slub.c +index 9b44423f1cf0..3d45713187a4 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -5620,8 +5620,10 @@ static int sysfs_slab_add(struct kmem_cache *s) + + s->kobj.kset = cache_kset(s); + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); +- if (err) ++ if (err) { ++ kobject_put(&s->kobj); + goto out; ++ } + + err = sysfs_create_group(&s->kobj, &slab_attr_group); + if (err) +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 6f78489fdb13..a8aa3f29f2d6 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -3775,6 +3775,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, + case 0x11: /* Unsupported Feature or Parameter Value */ + case 0x1c: /* SCO interval rejected */ + case 0x1a: /* Unsupported Remote Feature */ ++ case 0x1e: /* Invalid LMP Parameters */ + case 0x1f: /* Unspecified error */ + case 0x20: /* Unsupported LMP Parameter value */ + if (conn->out) { +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c +index 455fa4a30353..2c770bba212c 100644 +--- a/net/ipv6/ipv6_sockglue.c ++++ b/net/ipv6/ipv6_sockglue.c +@@ -184,14 +184,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, + retv = -EBUSY; + break; + } +- } +- if (sk->sk_protocol == IPPROTO_TCP && +- sk->sk_prot != &tcpv6_prot) { +- retv = -EBUSY; ++ } else if (sk->sk_protocol == IPPROTO_TCP) { ++ if (sk->sk_prot != &tcpv6_prot) { ++ retv = -EBUSY; ++ break; ++ } ++ } else { + break; + } +- if (sk->sk_protocol != IPPROTO_TCP) +- break; ++ + if (sk->sk_state != TCP_ESTABLISHED) { + retv = -ENOTCONN; + break; +diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c +index 4c48e9bb21e2..d2510e432c18 100644 +--- a/net/netfilter/nft_nat.c ++++ b/net/netfilter/nft_nat.c +@@ -135,7 +135,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + priv->type = NF_NAT_MANIP_DST; + break; + default: +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + err = nft_nat_validate(ctx, expr, NULL); +@@ -206,7 +206,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + if (tb[NFTA_NAT_FLAGS]) { + priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); + if (priv->flags & ~NF_NAT_RANGE_MASK) +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + return 0; +diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c +index 5fec3abbe19b..c7d88f979c56 100644 +--- a/net/sunrpc/auth_gss/gss_mech_switch.c ++++ b/net/sunrpc/auth_gss/gss_mech_switch.c +@@ -61,6 +61,8 @@ gss_mech_free(struct gss_api_mech *gm) + + for (i = 0; i < gm->gm_pf_num; i++) { + pf = &gm->gm_pfs[i]; ++ if (pf->domain) ++ auth_domain_put(pf->domain); + kfree(pf->auth_domain_name); + pf->auth_domain_name = NULL; + } +@@ -83,6 +85,7 @@ make_auth_domain_name(char *name) + static int + gss_mech_svc_setup(struct gss_api_mech *gm) + { ++ struct auth_domain *dom; + struct pf_desc *pf; + int i, status; + +@@ -92,10 +95,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm) + status = -ENOMEM; + if (pf->auth_domain_name == NULL) + goto out; +- status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, +- pf->auth_domain_name); +- if (status) ++ dom = svcauth_gss_register_pseudoflavor( ++ pf->pseudoflavor, pf->auth_domain_name); ++ if (IS_ERR(dom)) { ++ status = PTR_ERR(dom); + goto out; ++ } ++ pf->domain = dom; + } + return 0; + out: +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index d7775ca2fbb9..fd897d900d12 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -779,7 +779,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) + + EXPORT_SYMBOL_GPL(svcauth_gss_flavor); + +-int ++struct auth_domain * + svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) + { + struct gss_domain *new; +@@ -796,21 +796,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) + new->h.flavour = &svcauthops_gss; + new->pseudoflavor = pseudoflavor; + +- stat = 0; + test = auth_domain_lookup(name, &new->h); +- if (test != &new->h) { /* Duplicate registration */ ++ if (test != &new->h) { ++ pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n", ++ name); ++ stat = -EADDRINUSE; + auth_domain_put(test); +- kfree(new->h.name); +- goto out_free_dom; ++ goto out_free_name; + } +- return 0; ++ return test; + ++out_free_name: ++ kfree(new->h.name); + out_free_dom: + kfree(new); + out: +- return stat; ++ return ERR_PTR(stat); + } +- + EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); + + static inline int +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index e034dc21421e..b0440cf34970 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -240,7 +240,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, + + /* Portable EVM signatures must include an IMA hash */ + if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present) +- return -EPERM; ++ error = -EPERM; + out: + kfree(xattr_value); + kfree(desc); +diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h +index df7834aa1b8f..5f2a0a07ceac 100644 +--- a/security/integrity/ima/ima.h ++++ b/security/integrity/ima/ima.h +@@ -36,7 +36,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; + #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE + #define IMA_EVENT_NAME_LEN_MAX 255 + +-#define IMA_HASH_BITS 9 ++#define IMA_HASH_BITS 10 + #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) + + #define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 +@@ -136,9 +136,10 @@ struct ima_h_table { + }; + extern struct ima_h_table ima_htable; + +-static inline unsigned long ima_hash_key(u8 *digest) ++static inline unsigned int ima_hash_key(u8 *digest) + { +- return hash_long(*digest, IMA_HASH_BITS); ++ /* there is no point in taking a hash of part of a digest */ ++ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE; + } + + enum ima_hooks { +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c +index aed47b777a57..4926a5a1bc94 100644 +--- a/security/integrity/ima/ima_policy.c ++++ b/security/integrity/ima/ima_policy.c +@@ -150,7 +150,7 @@ static struct ima_rule_entry default_appraise_rules[] = { + static LIST_HEAD(ima_default_rules); + static LIST_HEAD(ima_policy_rules); + static LIST_HEAD(ima_temp_rules); +-static struct list_head *ima_rules; ++static struct list_head *ima_rules = &ima_default_rules; + + static int ima_policy __initdata; + +@@ -429,7 +429,6 @@ void __init ima_init_policy(void) + temp_ima_appraise |= IMA_APPRAISE_POLICY; + } + +- ima_rules = &ima_default_rules; + ima_update_policy_flag(); + } + +diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c +index 6492fe96cae4..3397b216bc6c 100644 +--- a/security/smack/smackfs.c ++++ b/security/smack/smackfs.c +@@ -901,11 +901,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + else + rule += strlen(skp->smk_known) + 1; + ++ if (rule > data + count) { ++ rc = -EOVERFLOW; ++ goto out; ++ } ++ + ret = sscanf(rule, "%d", &maplevel); + if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) + goto out; + + rule += SMK_DIGITLEN; ++ if (rule > data + count) { ++ rc = -EOVERFLOW; ++ goto out; ++ } ++ + ret = sscanf(rule, "%d", &catlen); + if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM) + goto out; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 23e17a58651b..5c07c5be3142 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -1836,6 +1836,11 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) + } + pcm_file = f.file->private_data; + substream1 = pcm_file->substream; ++ if (substream == substream1) { ++ res = -EINVAL; ++ goto _badf; ++ } ++ + group = kmalloc(sizeof(*group), GFP_KERNEL); + if (!group) { + res = -ENOMEM; +diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c +index 1901c2bb6c3b..a36e2121ef09 100644 +--- a/sound/isa/es1688/es1688.c ++++ b/sound/isa/es1688/es1688.c +@@ -284,8 +284,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard, + return error; + } + error = snd_es1688_probe(card, dev); +- if (error < 0) ++ if (error < 0) { ++ snd_card_free(card); + return error; ++ } + pnp_set_card_drvdata(pcard, card); + snd_es968_pnp_is_probed = 1; + return 0; +diff --git a/sound/usb/card.c b/sound/usb/card.c +index 549b9b061694..023a36a4922b 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -768,9 +768,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) + if (chip == (void *)-1L) + return 0; + +- chip->autosuspended = !!PMSG_IS_AUTO(message); +- if (!chip->autosuspended) +- snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); + if (!chip->num_suspended_intf++) { + list_for_each_entry(as, &chip->pcm_list, list) { + snd_pcm_suspend_all(as->pcm); +@@ -783,6 +780,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) + snd_usb_mixer_suspend(mixer); + } + ++ if (!PMSG_IS_AUTO(message) && !chip->system_suspend) { ++ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); ++ chip->system_suspend = chip->num_suspended_intf; ++ } ++ + return 0; + } + +@@ -795,10 +797,11 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) + + if (chip == (void *)-1L) + return 0; +- if (--chip->num_suspended_intf) +- return 0; + + atomic_inc(&chip->active); /* avoid autopm */ ++ if (chip->num_suspended_intf > 1) ++ goto out; ++ + /* + * ALSA leaves material resumption to user space + * we just notify and restart the mixers +@@ -813,9 +816,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) + snd_usbmidi_resume(p); + } + +- if (!chip->autosuspended) ++ out: ++ if (chip->num_suspended_intf == chip->system_suspend) { + snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); +- chip->autosuspended = 0; ++ chip->system_suspend = 0; ++ } ++ chip->num_suspended_intf--; + + err_out: + atomic_dec(&chip->active); /* allow autopm after this point */ +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h +index 4d5c89a7ba2b..f4ee83c8e0b2 100644 +--- a/sound/usb/usbaudio.h ++++ b/sound/usb/usbaudio.h +@@ -37,7 +37,7 @@ struct snd_usb_audio { + struct usb_interface *pm_intf; + u32 usb_id; + struct mutex mutex; +- unsigned int autosuspended:1; ++ unsigned int system_suspend; + atomic_t active; + atomic_t shutdown; + atomic_t usage_count; +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index b0b8ba9b800c..c7399d7f4bc7 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -778,6 +778,12 @@ static int add_special_section_alts(struct objtool_file *file) + } + + if (special_alt->group) { ++ if (!special_alt->orig_len) { ++ WARN_FUNC("empty alternative entry", ++ orig_insn->sec, orig_insn->offset); ++ continue; ++ } ++ + ret = handle_group_alt(file, special_alt, orig_insn, + &new_insn); + if (ret) +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c +index 2b420e7a92c0..929f0d0ea9da 100644 +--- a/tools/perf/builtin-probe.c ++++ b/tools/perf/builtin-probe.c +@@ -344,6 +344,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs) + + for (k = 0; k < pev->ntevs; k++) { + struct probe_trace_event *tev = &pev->tevs[k]; ++ /* Skipped events have no event name */ ++ if (!tev->event) ++ continue; + + /* We use tev's name for showing new events */ + show_perf_probe_event(tev->group, tev->event, pev, +diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c +index 8bec05365aae..9be7c95bd1e1 100644 +--- a/tools/perf/util/dso.c ++++ b/tools/perf/util/dso.c +@@ -19,6 +19,7 @@ char dso__symtab_origin(const struct dso *dso) + [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', + [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', + [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', ++ [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', + [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', + [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', + [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', +@@ -77,6 +78,21 @@ int dso__read_binary_type_filename(const struct dso *dso, + snprintf(filename + len, size - len, "%s", dso->long_name); + break; + ++ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: ++ /* ++ * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in ++ * /usr/lib/debug/lib when it is expected to be in ++ * /usr/lib/debug/usr/lib ++ */ ++ if (strlen(dso->long_name) < 9 || ++ strncmp(dso->long_name, "/usr/lib/", 9)) { ++ ret = -1; ++ break; ++ } ++ len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); ++ snprintf(filename + len, size - len, "%s", dso->long_name + 4); ++ break; ++ + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + { + const char *last_slash; +diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h +index ecc4bbd3f82e..b886720ffea0 100644 +--- a/tools/perf/util/dso.h ++++ b/tools/perf/util/dso.h +@@ -22,6 +22,7 @@ enum dso_binary_type { + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__GUEST_KMODULE, +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c +index 82e4f158c88e..7d0d44b4f3d5 100644 +--- a/tools/perf/util/probe-finder.c ++++ b/tools/perf/util/probe-finder.c +@@ -111,6 +111,7 @@ enum dso_binary_type distro_dwarf_types[] = { + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c +index acde8e489352..4e27b868f774 100644 +--- a/tools/perf/util/symbol.c ++++ b/tools/perf/util/symbol.c +@@ -58,6 +58,7 @@ static enum dso_binary_type binary_type_symtab[] = { + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + +@@ -1361,6 +1362,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, + case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: + case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: + case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: ++ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: + case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + return !kmod && dso->kernel == DSO_TYPE_USER; diff --git a/patch/kernel/odroidxu4-current/patch-5.4.41-42.patch b/patch/kernel/odroidxu4-current/patch-5.4.41-42.patch new file mode 100644 index 000000000..aa9beba86 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.41-42.patch @@ -0,0 +1,4927 @@ +diff --git a/Makefile b/Makefile +index a8c772b299aa..1bd1b17cd207 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 41 ++SUBLEVEL = 42 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +@@ -707,12 +707,9 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE + KBUILD_CFLAGS += -Os + endif + +-ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED +-KBUILD_CFLAGS += -Wno-maybe-uninitialized +-endif +- + # Tell gcc to never replace conditional load with a non-conditional one + KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) ++KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) + + include scripts/Makefile.kcov + include scripts/Makefile.gcc-plugins +@@ -860,6 +857,17 @@ KBUILD_CFLAGS += -Wno-pointer-sign + # disable stringop warnings in gcc 8+ + KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) + ++# We'll want to enable this eventually, but it's not going away for 5.7 at least ++KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) ++KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) ++KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) ++ ++# Another good warning that we'll want to enable eventually ++KBUILD_CFLAGS += $(call cc-disable-warning, restrict) ++ ++# Enabled with W=2, disabled by default as noisy ++KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) ++ + # disable invalid "can't wrap" optimizations for signed / pointers + KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) + +diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi +index c6be65249f42..a6ef3d137c7a 100644 +--- a/arch/arm/boot/dts/dra7.dtsi ++++ b/arch/arm/boot/dts/dra7.dtsi +@@ -172,6 +172,7 @@ + #address-cells = <1>; + ranges = <0x51000000 0x51000000 0x3000 + 0x0 0x20000000 0x10000000>; ++ dma-ranges; + /** + * To enable PCI endpoint mode, disable the pcie1_rc + * node and enable pcie1_ep mode. +@@ -185,7 +186,6 @@ + device_type = "pci"; + ranges = <0x81000000 0 0 0x03000 0 0x00010000 + 0x82000000 0 0x20013000 0x13000 0 0xffed000>; +- dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; + bus-range = <0x00 0xff>; + #interrupt-cells = <1>; + num-lanes = <1>; +@@ -230,6 +230,7 @@ + #address-cells = <1>; + ranges = <0x51800000 0x51800000 0x3000 + 0x0 0x30000000 0x10000000>; ++ dma-ranges; + status = "disabled"; + pcie2_rc: pcie@51800000 { + reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>; +@@ -240,7 +241,6 @@ + device_type = "pci"; + ranges = <0x81000000 0 0 0x03000 0 0x00010000 + 0x82000000 0 0x30013000 0x13000 0 0xffed000>; +- dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; + bus-range = <0x00 0xff>; + #interrupt-cells = <1>; + num-lanes = <1>; +diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts +index 0cd75dadf292..188639738dc3 100644 +--- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts ++++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts +@@ -75,8 +75,8 @@ + imx27-phycard-s-rdk { + pinctrl_i2c1: i2c1grp { + fsl,pins = < +- MX27_PAD_I2C2_SDA__I2C2_SDA 0x0 +- MX27_PAD_I2C2_SCL__I2C2_SCL 0x0 ++ MX27_PAD_I2C_DATA__I2C_DATA 0x0 ++ MX27_PAD_I2C_CLK__I2C_CLK 0x0 + >; + }; + +diff --git a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts +index 0d594e4bd559..a1173bf5bff5 100644 +--- a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts ++++ b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts +@@ -38,7 +38,7 @@ + }; + + &switch_ports { +- /delete-node/ port@2; ++ /delete-node/ port@3; + }; + + &touchscreen { +diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi +index dd865f3c2eda..4447f45f0cba 100644 +--- a/arch/arm/boot/dts/r8a73a4.dtsi ++++ b/arch/arm/boot/dts/r8a73a4.dtsi +@@ -131,7 +131,14 @@ + cmt1: timer@e6130000 { + compatible = "renesas,r8a73a4-cmt1", "renesas,rcar-gen2-cmt1"; + reg = <0 0xe6130000 0 0x1004>; +- interrupts = ; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; + clocks = <&mstp3_clks R8A73A4_CLK_CMT1>; + clock-names = "fck"; + power-domains = <&pd_c5>; +diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi +index 12ffe73bf2bc..155f58e6d4e8 100644 +--- a/arch/arm/boot/dts/r8a7740.dtsi ++++ b/arch/arm/boot/dts/r8a7740.dtsi +@@ -479,7 +479,7 @@ + cpg_clocks: cpg_clocks@e6150000 { + compatible = "renesas,r8a7740-cpg-clocks"; + reg = <0xe6150000 0x10000>; +- clocks = <&extal1_clk>, <&extalr_clk>; ++ clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>; + #clock-cells = <1>; + clock-output-names = "system", "pllc0", "pllc1", + "pllc2", "r", +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +index 0ee8a369c547..2199a54c720c 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +@@ -2365,7 +2365,7 @@ + reg = <0x0 0xff400000 0x0 0x40000>; + interrupts = ; + clocks = <&clkc CLKID_USB1_DDR_BRIDGE>; +- clock-names = "ddr"; ++ clock-names = "otg"; + phys = <&usb2_phy1>; + phy-names = "usb2-phy"; + dr_mode = "peripheral"; +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi +index 554863429aa6..e2094575f528 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi +@@ -152,6 +152,10 @@ + clock-latency = <50000>; + }; + ++&frddr_a { ++ status = "okay"; ++}; ++ + &frddr_b { + status = "okay"; + }; +diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi +index 43c4db312146..ac3a3b333efa 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi ++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi +@@ -616,7 +616,7 @@ + reg = <0x30bd0000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>, +- <&clk IMX8MN_CLK_SDMA1_ROOT>; ++ <&clk IMX8MN_CLK_AHB>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; + fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; +diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi +index 461a47ea656d..e81cd83b138b 100644 +--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi ++++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi +@@ -1318,6 +1318,7 @@ + ipmmu_vip0: mmu@e7b00000 { + compatible = "renesas,ipmmu-r8a77980"; + reg = <0 0xe7b00000 0 0x1000>; ++ renesas,ipmmu-main = <&ipmmu_mm 4>; + power-domains = <&sysc R8A77980_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; +@@ -1325,6 +1326,7 @@ + ipmmu_vip1: mmu@e7960000 { + compatible = "renesas,ipmmu-r8a77980"; + reg = <0 0xe7960000 0 0x1000>; ++ renesas,ipmmu-main = <&ipmmu_mm 11>; + power-domains = <&sysc R8A77980_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts +index 49c4b96da3d4..6abc6f4a86cf 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts +@@ -92,7 +92,7 @@ + &i2c1 { + status = "okay"; + +- rk805: rk805@18 { ++ rk805: pmic@18 { + compatible = "rockchip,rk805"; + reg = <0x18>; + interrupt-parent = <&gpio2>; +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +index 62936b432f9a..304fad1a0b57 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +@@ -169,7 +169,7 @@ + &i2c1 { + status = "okay"; + +- rk805: rk805@18 { ++ rk805: pmic@18 { + compatible = "rockchip,rk805"; + reg = <0x18>; + interrupt-parent = <&gpio2>; +diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi +index cede1ad81be2..cd97016b7c18 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi +@@ -410,7 +410,7 @@ + reset-names = "usb3-otg"; + status = "disabled"; + +- usbdrd_dwc3_0: dwc3 { ++ usbdrd_dwc3_0: usb@fe800000 { + compatible = "snps,dwc3"; + reg = <0x0 0xfe800000 0x0 0x100000>; + interrupts = ; +@@ -446,7 +446,7 @@ + reset-names = "usb3-otg"; + status = "disabled"; + +- usbdrd_dwc3_1: dwc3 { ++ usbdrd_dwc3_1: usb@fe900000 { + compatible = "snps,dwc3"; + reg = <0x0 0xfe900000 0x0 0x100000>; + interrupts = ; +diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c +index 0df8493624e0..cc049ff5c6a5 100644 +--- a/arch/arm64/kernel/machine_kexec.c ++++ b/arch/arm64/kernel/machine_kexec.c +@@ -189,6 +189,7 @@ void machine_kexec(struct kimage *kimage) + * the offline CPUs. Therefore, we must use the __* variant here. + */ + __flush_icache_range((uintptr_t)reboot_code_buffer, ++ (uintptr_t)reboot_code_buffer + + arm64_relocate_new_kernel_size); + + /* Flush the kimage list and its buffers. */ +diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h +index 91c8f1d9bcee..1a2c80e8be84 100644 +--- a/arch/powerpc/include/asm/book3s/32/kup.h ++++ b/arch/powerpc/include/asm/book3s/32/kup.h +@@ -75,7 +75,7 @@ + + .macro kuap_check current, gpr + #ifdef CONFIG_PPC_KUAP_DEBUG +- lwz \gpr2, KUAP(thread) ++ lwz \gpr, KUAP(thread) + 999: twnei \gpr, 0 + EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) + #endif +diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile +index 33b16f4212f7..a4ee3a0e7d20 100644 +--- a/arch/riscv/kernel/vdso/Makefile ++++ b/arch/riscv/kernel/vdso/Makefile +@@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold) + + # We also create a special relocatable object that should mirror the symbol +-# table and layout of the linked DSO. With ld -R we can then refer to +-# these symbols in the kernel code rather than hand-coded addresses. ++# table and layout of the linked DSO. With ld --just-symbols we can then ++# refer to these symbols in the kernel code rather than hand-coded addresses. + + SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ + -Wl,--build-id -Wl,--hash-style=both + $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE + $(call if_changed,vdsold) + +-LDFLAGS_vdso-syms.o := -r -R ++LDFLAGS_vdso-syms.o := -r --just-symbols + $(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE + $(call if_changed,ld) + +diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h +index 91e29b6a86a5..9804a7957f4e 100644 +--- a/arch/x86/include/asm/stackprotector.h ++++ b/arch/x86/include/asm/stackprotector.h +@@ -55,8 +55,13 @@ + /* + * Initialize the stackprotector canary value. + * +- * NOTE: this must only be called from functions that never return, ++ * NOTE: this must only be called from functions that never return + * and it must always be inlined. ++ * ++ * In addition, it should be called from a compilation unit for which ++ * stack protector is disabled. Alternatively, the caller should not end ++ * with a function call which gets tail-call optimized as that would ++ * lead to checking a modified canary value. + */ + static __always_inline void boot_init_stack_canary(void) + { +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index 69881b2d446c..9674321ce3a3 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -262,6 +262,14 @@ static void notrace start_secondary(void *unused) + + wmb(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); ++ ++ /* ++ * Prevent tail call to cpu_startup_entry() because the stack protector ++ * guard has been changed a couple of function calls up, in ++ * boot_init_stack_canary() and must not be checked before tail calling ++ * another function. ++ */ ++ prevent_tail_call_optimization(); + } + + /** +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c +index fb37221a1532..647e6af0883d 100644 +--- a/arch/x86/kernel/unwind_orc.c ++++ b/arch/x86/kernel/unwind_orc.c +@@ -608,23 +608,23 @@ EXPORT_SYMBOL_GPL(unwind_next_frame); + void __unwind_start(struct unwind_state *state, struct task_struct *task, + struct pt_regs *regs, unsigned long *first_frame) + { +- if (!orc_init) +- goto done; +- + memset(state, 0, sizeof(*state)); + state->task = task; + ++ if (!orc_init) ++ goto err; ++ + /* + * Refuse to unwind the stack of a task while it's executing on another + * CPU. This check is racy, but that's ok: the unwinder has other + * checks to prevent it from going off the rails. + */ + if (task_on_another_cpu(task)) +- goto done; ++ goto err; + + if (regs) { + if (user_mode(regs)) +- goto done; ++ goto the_end; + + state->ip = regs->ip; + state->sp = regs->sp; +@@ -657,6 +657,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, + * generate some kind of backtrace if this happens. + */ + void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); ++ state->error = true; + if (get_stack_info(next_page, state->task, &state->stack_info, + &state->stack_mask)) + return; +@@ -682,8 +683,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, + + return; + +-done: ++err: ++ state->error = true; ++the_end: + state->stack_info.type = STACK_TYPE_UNKNOWN; +- return; + } + EXPORT_SYMBOL_GPL(__unwind_start); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 56a0f9c18892..41408065574f 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -3682,7 +3682,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, + unsigned bank_num = mcg_cap & 0xff, bank; + + r = -EINVAL; +- if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) ++ if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) + goto out; + if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000)) + goto out; +diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c +index 802ee5bba66c..0cebe5db691d 100644 +--- a/arch/x86/xen/smp_pv.c ++++ b/arch/x86/xen/smp_pv.c +@@ -92,6 +92,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void) + cpu_bringup(); + boot_init_stack_canary(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); ++ prevent_tail_call_optimization(); + } + + void xen_smp_intr_free_pv(unsigned int cpu) +diff --git a/crypto/lrw.c b/crypto/lrw.c +index be829f6afc8e..3d40e1f32bea 100644 +--- a/crypto/lrw.c ++++ b/crypto/lrw.c +@@ -289,7 +289,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) + crypto_free_skcipher(ctx->child); + } + +-static void free(struct skcipher_instance *inst) ++static void free_inst(struct skcipher_instance *inst) + { + crypto_drop_skcipher(skcipher_instance_ctx(inst)); + kfree(inst); +@@ -401,7 +401,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) + inst->alg.encrypt = encrypt; + inst->alg.decrypt = decrypt; + +- inst->free = free; ++ inst->free = free_inst; + + err = skcipher_register_instance(tmpl, inst); + if (err) +diff --git a/crypto/xts.c b/crypto/xts.c +index ab117633d64e..9d72429f666e 100644 +--- a/crypto/xts.c ++++ b/crypto/xts.c +@@ -328,7 +328,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) + crypto_free_cipher(ctx->tweak); + } + +-static void free(struct skcipher_instance *inst) ++static void free_inst(struct skcipher_instance *inst) + { + crypto_drop_skcipher(skcipher_instance_ctx(inst)); + kfree(inst); +@@ -439,7 +439,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) + inst->alg.encrypt = encrypt; + inst->alg.decrypt = decrypt; + +- inst->free = free; ++ inst->free = free_inst; + + err = skcipher_register_instance(tmpl, inst); + if (err) +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 5e6c8bfc6612..5b53a66d403d 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -1962,23 +1962,31 @@ void acpi_ec_set_gpe_wake_mask(u8 action) + acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); + } + +-bool acpi_ec_other_gpes_active(void) +-{ +- return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX); +-} +- + bool acpi_ec_dispatch_gpe(void) + { + u32 ret; + + if (!first_ec) ++ return acpi_any_gpe_status_set(U32_MAX); ++ ++ /* ++ * Report wakeup if the status bit is set for any enabled GPE other ++ * than the EC one. ++ */ ++ if (acpi_any_gpe_status_set(first_ec->gpe)) ++ return true; ++ ++ if (ec_no_wakeup) + return false; + ++ /* ++ * Dispatch the EC GPE in-band, but do not report wakeup in any case ++ * to allow the caller to process events properly after that. ++ */ + ret = acpi_dispatch_gpe(NULL, first_ec->gpe); +- if (ret == ACPI_INTERRUPT_HANDLED) { ++ if (ret == ACPI_INTERRUPT_HANDLED) + pm_pr_dbg("EC GPE dispatched\n"); +- return true; +- } ++ + return false; + } + #endif /* CONFIG_PM_SLEEP */ +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index cbf7f34c3ce7..afe6636f9ad3 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -201,7 +201,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); + + #ifdef CONFIG_PM_SLEEP + void acpi_ec_flush_work(void); +-bool acpi_ec_other_gpes_active(void); + bool acpi_ec_dispatch_gpe(void); + #endif + +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c +index edad89e58c58..85514c0f3aa5 100644 +--- a/drivers/acpi/sleep.c ++++ b/drivers/acpi/sleep.c +@@ -1010,20 +1010,10 @@ static bool acpi_s2idle_wake(void) + if (acpi_check_wakeup_handlers()) + return true; + +- /* +- * If the status bit is set for any enabled GPE other than the +- * EC one, the wakeup is regarded as a genuine one. +- */ +- if (acpi_ec_other_gpes_active()) ++ /* Check non-EC GPE wakeups and dispatch the EC GPE. */ ++ if (acpi_ec_dispatch_gpe()) + return true; + +- /* +- * If the EC GPE status bit has not been set, the wakeup is +- * regarded as a spurious one. +- */ +- if (!acpi_ec_dispatch_gpe()) +- return false; +- + /* + * Cancel the wakeup and process all pending events in case + * there are any wakeup ones in there. +diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c +index a55383b139df..0cf2fe290230 100644 +--- a/drivers/block/virtio_blk.c ++++ b/drivers/block/virtio_blk.c +@@ -33,6 +33,15 @@ struct virtio_blk_vq { + } ____cacheline_aligned_in_smp; + + struct virtio_blk { ++ /* ++ * This mutex must be held by anything that may run after ++ * virtblk_remove() sets vblk->vdev to NULL. ++ * ++ * blk-mq, virtqueue processing, and sysfs attribute code paths are ++ * shut down before vblk->vdev is set to NULL and therefore do not need ++ * to hold this mutex. ++ */ ++ struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ +@@ -44,6 +53,13 @@ struct virtio_blk { + /* Process context for config space updates */ + struct work_struct config_work; + ++ /* ++ * Tracks references from block_device_operations open/release and ++ * virtio_driver probe/remove so this object can be freed once no ++ * longer in use. ++ */ ++ refcount_t refs; ++ + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + +@@ -388,10 +404,55 @@ out: + return err; + } + ++static void virtblk_get(struct virtio_blk *vblk) ++{ ++ refcount_inc(&vblk->refs); ++} ++ ++static void virtblk_put(struct virtio_blk *vblk) ++{ ++ if (refcount_dec_and_test(&vblk->refs)) { ++ ida_simple_remove(&vd_index_ida, vblk->index); ++ mutex_destroy(&vblk->vdev_mutex); ++ kfree(vblk); ++ } ++} ++ ++static int virtblk_open(struct block_device *bd, fmode_t mode) ++{ ++ struct virtio_blk *vblk = bd->bd_disk->private_data; ++ int ret = 0; ++ ++ mutex_lock(&vblk->vdev_mutex); ++ ++ if (vblk->vdev) ++ virtblk_get(vblk); ++ else ++ ret = -ENXIO; ++ ++ mutex_unlock(&vblk->vdev_mutex); ++ return ret; ++} ++ ++static void virtblk_release(struct gendisk *disk, fmode_t mode) ++{ ++ struct virtio_blk *vblk = disk->private_data; ++ ++ virtblk_put(vblk); ++} ++ + /* We provide getgeo only to please some old bootloader/partitioning tools */ + static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) + { + struct virtio_blk *vblk = bd->bd_disk->private_data; ++ int ret = 0; ++ ++ mutex_lock(&vblk->vdev_mutex); ++ ++ if (!vblk->vdev) { ++ ret = -ENXIO; ++ goto out; ++ } + + /* see if the host passed in geometry config */ + if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { +@@ -407,12 +468,16 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) + geo->sectors = 1 << 5; + geo->cylinders = get_capacity(bd->bd_disk) >> 11; + } +- return 0; ++out: ++ mutex_unlock(&vblk->vdev_mutex); ++ return ret; + } + + static const struct block_device_operations virtblk_fops = { + .ioctl = virtblk_ioctl, + .owner = THIS_MODULE, ++ .open = virtblk_open, ++ .release = virtblk_release, + .getgeo = virtblk_getgeo, + }; + +@@ -767,6 +832,10 @@ static int virtblk_probe(struct virtio_device *vdev) + goto out_free_index; + } + ++ /* This reference is dropped in virtblk_remove(). */ ++ refcount_set(&vblk->refs, 1); ++ mutex_init(&vblk->vdev_mutex); ++ + vblk->vdev = vdev; + vblk->sg_elems = sg_elems; + +@@ -932,8 +1001,6 @@ out: + static void virtblk_remove(struct virtio_device *vdev) + { + struct virtio_blk *vblk = vdev->priv; +- int index = vblk->index; +- int refc; + + /* Make sure no work handler is accessing the device. */ + flush_work(&vblk->config_work); +@@ -943,18 +1010,21 @@ static void virtblk_remove(struct virtio_device *vdev) + + blk_mq_free_tag_set(&vblk->tag_set); + ++ mutex_lock(&vblk->vdev_mutex); ++ + /* Stop all the virtqueues. */ + vdev->config->reset(vdev); + +- refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref); ++ /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ ++ vblk->vdev = NULL; ++ + put_disk(vblk->disk); + vdev->config->del_vqs(vdev); + kfree(vblk->vqs); +- kfree(vblk); + +- /* Only free device id if we don't have any users */ +- if (refc == 1) +- ida_simple_remove(&vd_index_ida, index); ++ mutex_unlock(&vblk->vdev_mutex); ++ ++ virtblk_put(vblk); + } + + #ifdef CONFIG_PM_SLEEP +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c +index 80b029713722..9728d1282e43 100644 +--- a/drivers/clk/clk.c ++++ b/drivers/clk/clk.c +@@ -3448,6 +3448,9 @@ static int __clk_core_init(struct clk_core *core) + out: + clk_pm_runtime_put(core); + unlock: ++ if (ret) ++ hlist_del_init(&core->child_node); ++ + clk_prepare_unlock(); + + if (!ret) +diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c +index d17cfb7a3ff4..d7243c09cc84 100644 +--- a/drivers/clk/rockchip/clk-rk3228.c ++++ b/drivers/clk/rockchip/clk-rk3228.c +@@ -156,8 +156,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" }; + PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" }; + PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" }; + +-PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" }; +- + PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; + PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; + PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; +@@ -468,16 +466,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { + RK2928_CLKSEL_CON(24), 6, 10, DFLAGS, + RK2928_CLKGATE_CON(2), 8, GFLAGS), + +- GATE(0, "cpll_gpu", "cpll", 0, +- RK2928_CLKGATE_CON(3), 13, GFLAGS), +- GATE(0, "gpll_gpu", "gpll", 0, +- RK2928_CLKGATE_CON(3), 13, GFLAGS), +- GATE(0, "hdmiphy_gpu", "hdmiphy", 0, +- RK2928_CLKGATE_CON(3), 13, GFLAGS), +- GATE(0, "usb480m_gpu", "usb480m", 0, ++ COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0, ++ RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK2928_CLKGATE_CON(3), 13, GFLAGS), +- COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0, +- RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS), + + COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0, + RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS, +@@ -582,8 +573,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { + GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS), + + /* PD_GPU */ +- GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS), +- GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS), ++ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS), ++ GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS), + + /* PD_BUS */ + GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS), +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index 45499e0b9f2f..d3d7c4ef7d04 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -1058,7 +1058,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, + + update_turbo_state(); + if (global.turbo_disabled) { +- pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); ++ pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); + mutex_unlock(&intel_pstate_limits_lock); + mutex_unlock(&intel_pstate_driver_lock); + return -EPERM; +diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c +index e7d1e12bf464..89d90c456c0c 100644 +--- a/drivers/dma/mmp_tdma.c ++++ b/drivers/dma/mmp_tdma.c +@@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) + gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, + size); + tdmac->desc_arr = NULL; ++ if (tdmac->status == DMA_ERROR) ++ tdmac->status = DMA_COMPLETE; + + return; + } +@@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( + if (!desc) + goto err_out; + +- mmp_tdma_config_write(chan, direction, &tdmac->slave_config); ++ if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config)) ++ goto err_out; + + while (buf < buf_len) { + desc = &tdmac->desc_arr[i]; +diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c +index 581e7a290d98..a3b0b4c56a19 100644 +--- a/drivers/dma/pch_dma.c ++++ b/drivers/dma/pch_dma.c +@@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev, + } + + pci_set_master(pdev); ++ pd->dma.dev = &pdev->dev; + + err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); + if (err) { +@@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev, + goto err_free_irq; + } + +- pd->dma.dev = &pdev->dev; + + INIT_LIST_HEAD(&pd->dma.channels); + +diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c +index 31f9f0e369b9..55b031d2c989 100644 +--- a/drivers/firmware/efi/tpm.c ++++ b/drivers/firmware/efi/tpm.c +@@ -16,7 +16,7 @@ + int efi_tpm_final_log_size; + EXPORT_SYMBOL(efi_tpm_final_log_size); + +-static int tpm2_calc_event_log_size(void *data, int count, void *size_info) ++static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info) + { + struct tcg_pcr_event2_head *header; + int event_size, size = 0; +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c +index de5d1383f28d..3edc1762803a 100644 +--- a/drivers/gpio/gpio-pca953x.c ++++ b/drivers/gpio/gpio-pca953x.c +@@ -528,7 +528,7 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + { + struct pca953x_chip *chip = gpiochip_get_data(gc); + +- switch (config) { ++ switch (pinconf_to_config_param(config)) { + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + return pca953x_gpio_set_pull_up_down(chip, offset, config); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +index 143753d237e7..eaa5e7b7c19d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +@@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, + u32 cpp; + u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | +- AMDGPU_GEM_CREATE_VRAM_CLEARED | +- AMDGPU_GEM_CREATE_CPU_GTT_USWC; ++ AMDGPU_GEM_CREATE_VRAM_CLEARED; + + info = drm_get_format_info(adev->ddev, mode_cmd); + cpp = info->cpp[0]; +diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +index c45304f1047c..4af9acc2dc4f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c ++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +@@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, + u32 extra_bits = vmid & 0xf; + + /* IB packet must end on a 8 DW boundary */ +- cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); ++ cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7); + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); + amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ +@@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) + u32 pad_count; + int i; + +- pad_count = (8 - (ib->length_dw & 0x7)) % 8; ++ pad_count = (-ib->length_dw) & 7; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = +diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h +index 074a9a09c0a7..a5b60c9a2418 100644 +--- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h ++++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h +@@ -73,6 +73,22 @@ + #define SDMA_OP_AQL_COPY 0 + #define SDMA_OP_AQL_BARRIER_OR 0 + ++#define SDMA_GCR_RANGE_IS_PA (1 << 18) ++#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16) ++#define SDMA_GCR_GL2_WB (1 << 15) ++#define SDMA_GCR_GL2_INV (1 << 14) ++#define SDMA_GCR_GL2_DISCARD (1 << 13) ++#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11) ++#define SDMA_GCR_GL2_US (1 << 10) ++#define SDMA_GCR_GL1_INV (1 << 9) ++#define SDMA_GCR_GLV_INV (1 << 8) ++#define SDMA_GCR_GLK_INV (1 << 7) ++#define SDMA_GCR_GLK_WB (1 << 6) ++#define SDMA_GCR_GLM_INV (1 << 5) ++#define SDMA_GCR_GLM_WB (1 << 4) ++#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2) ++#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0) ++ + /*define for op field*/ + #define SDMA_PKT_HEADER_op_offset 0 + #define SDMA_PKT_HEADER_op_mask 0x000000FF +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +index a10175838013..b6af67f6f214 100644 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +@@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + /* IB packet must end on a 8 DW boundary */ +- sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); ++ sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); +@@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib + u32 pad_count; + int i; + +- pad_count = (8 - (ib->length_dw & 0x7)) % 8; ++ pad_count = (-ib->length_dw) & 7; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +index 5f4e2c616241..cd3ebed46d05 100644 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +@@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + /* IB packet must end on a 8 DW boundary */ +- sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); ++ sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); +@@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib + u32 pad_count; + int i; + +- pad_count = (8 - (ib->length_dw & 0x7)) % 8; ++ pad_count = (-ib->length_dw) & 7; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +index 4554e72c8378..23de332f3c6e 100644 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +@@ -698,7 +698,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + /* IB packet must end on a 8 DW boundary */ +- sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); ++ sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); +@@ -1579,7 +1579,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib + u32 pad_count; + int i; + +- pad_count = (8 - (ib->length_dw & 0x7)) % 8; ++ pad_count = (-ib->length_dw) & 7; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +index 8493bfbbc148..bd715012185c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +@@ -382,8 +382,27 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); + +- /* IB packet must end on a 8 DW boundary */ +- sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); ++ /* Invalidate L2, because if we don't do it, we might get stale cache ++ * lines from previous IBs. ++ */ ++ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); ++ amdgpu_ring_write(ring, 0); ++ amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV | ++ SDMA_GCR_GL2_WB | ++ SDMA_GCR_GLM_INV | ++ SDMA_GCR_GLM_WB) << 16); ++ amdgpu_ring_write(ring, 0xffffff80); ++ amdgpu_ring_write(ring, 0xffff); ++ ++ /* An IB packet must end on a 8 DW boundary--the next dword ++ * must be on a 8-dword boundary. Our IB packet below is 6 ++ * dwords long, thus add x number of NOPs, such that, in ++ * modular arithmetic, ++ * wptr + 6 + x = 8k, k >= 0, which in C is, ++ * (wptr + 6 + x) % 8 = 0. ++ * The expression below, is a solution of x. ++ */ ++ sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); +@@ -1086,10 +1105,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib, + } + + /** +- * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw +- * ++ * sdma_v5_0_ring_pad_ib - pad the IB + * @ib: indirect buffer to fill with padding + * ++ * Pad the IB with NOPs to a boundary multiple of 8. + */ + static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) + { +@@ -1097,7 +1116,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib + u32 pad_count; + int i; + +- pad_count = (8 - (ib->length_dw & 0x7)) % 8; ++ pad_count = (-ib->length_dw) & 0x7; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = +@@ -1600,7 +1619,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + + 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ +- .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ ++ .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */ + .emit_ib = sdma_v5_0_ring_emit_ib, + .emit_fence = sdma_v5_0_ring_emit_fence, + .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index be61ae1430ed..99906435dcf7 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -6921,6 +6921,7 @@ static int dm_update_plane_state(struct dc *dc, + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; + struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; ++ struct amdgpu_crtc *new_acrtc; + bool needs_reset; + int ret = 0; + +@@ -6930,9 +6931,30 @@ static int dm_update_plane_state(struct dc *dc, + dm_new_plane_state = to_dm_plane_state(new_plane_state); + dm_old_plane_state = to_dm_plane_state(old_plane_state); + +- /*TODO Implement atomic check for cursor plane */ +- if (plane->type == DRM_PLANE_TYPE_CURSOR) ++ /*TODO Implement better atomic check for cursor plane */ ++ if (plane->type == DRM_PLANE_TYPE_CURSOR) { ++ if (!enable || !new_plane_crtc || ++ drm_atomic_plane_disabling(plane->state, new_plane_state)) ++ return 0; ++ ++ new_acrtc = to_amdgpu_crtc(new_plane_crtc); ++ ++ if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) || ++ (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) { ++ DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n", ++ new_plane_state->crtc_w, new_plane_state->crtc_h); ++ return -EINVAL; ++ } ++ ++ if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width || ++ new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) { ++ DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n", ++ new_plane_state->crtc_x, new_plane_state->crtc_y); ++ return -EINVAL; ++ } ++ + return 0; ++ } + + needs_reset = should_reset_plane(state, plane, old_plane_state, + new_plane_state); +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +index e933f6a369f9..083c42e521f5 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +@@ -2015,7 +2015,8 @@ static void dcn20_fpga_init_hw(struct dc *dc) + + REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); + REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); +- REG_WRITE(REFCLK_CNTL, 0); ++ if (REG(REFCLK_CNTL)) ++ REG_WRITE(REFCLK_CNTL, 0); + // + + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +index 161bf7caf3ae..bb7add5ea227 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +@@ -247,7 +247,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { + .dram_channel_width_bytes = 4, + .fabric_datapath_to_dcn_data_return_bytes = 32, + .dcn_downspread_percent = 0.5, +- .downspread_percent = 0.5, ++ .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, +diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +index d306cc711997..8bb5fbef7de0 100644 +--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c ++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +@@ -1425,7 +1425,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap) + if (!hwmgr) + return -EINVAL; + +- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability) ++ if (!(hwmgr->not_vf && amdgpu_dpm) || ++ !hwmgr->hwmgr_func->get_asic_baco_capability) + return 0; + + mutex_lock(&hwmgr->smu_lock); +@@ -1459,7 +1460,8 @@ static int pp_set_asic_baco_state(void *handle, int state) + if (!hwmgr) + return -EINVAL; + +- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state) ++ if (!(hwmgr->not_vf && amdgpu_dpm) || ++ !hwmgr->hwmgr_func->set_asic_baco_state) + return 0; + + mutex_lock(&hwmgr->smu_lock); +diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c +index 07a038f21619..caf6166622e4 100644 +--- a/drivers/gpu/drm/i915/display/intel_fbc.c ++++ b/drivers/gpu/drm/i915/display/intel_fbc.c +@@ -504,8 +504,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) + if (!ret) + goto err_llb; + else if (ret > 1) { +- DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); +- ++ DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); + } + + fbc->threshold = ret; +diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c +index 6c79d16b381e..058dcd541644 100644 +--- a/drivers/gpu/drm/i915/gvt/scheduler.c ++++ b/drivers/gpu/drm/i915/gvt/scheduler.c +@@ -374,7 +374,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, + for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { + struct i915_page_directory * const pd = + i915_pd_entry(ppgtt->pd, i); +- ++ /* skip now as current i915 ppgtt alloc won't allocate ++ top level pdp for non 4-level table, won't impact ++ shadow ppgtt. */ ++ if (!pd) ++ break; + px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; + } + } +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index 3ccfc025fde2..ade607d93e45 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -4784,7 +4784,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, + * WaIncreaseLatencyIPCEnabled: kbl,cfl + * Display WA #1141: kbl,cfl + */ +- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) || ++ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) && + dev_priv->ipc_enabled) + latency += 4; + +diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c +index 43688ecdd8a0..60ab7151b84d 100644 +--- a/drivers/gpu/drm/qxl/qxl_image.c ++++ b/drivers/gpu/drm/qxl/qxl_image.c +@@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev, + break; + default: + DRM_ERROR("unsupported image bit depth\n"); +- return -EINVAL; /* TODO: cleanup */ ++ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); ++ return -EINVAL; + } + image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; + image->u.bitmap.x = width; +diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +index f83522717488..4f944ace665d 100644 +--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c ++++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +@@ -718,7 +718,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder) + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder); + struct mipi_dsi_device *device = dsi->device; +- union phy_configure_opts opts = { 0 }; ++ union phy_configure_opts opts = { }; + struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy; + u16 delay; + +diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c +index 53b517dbe7e6..4af2fc309c28 100644 +--- a/drivers/hwmon/da9052-hwmon.c ++++ b/drivers/hwmon/da9052-hwmon.c +@@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev, + int channel = to_sensor_dev_attr(devattr)->index; + int ret; + +- mutex_lock(&hwmon->hwmon_lock); ++ mutex_lock(&hwmon->da9052->auxadc_lock); + ret = __da9052_read_tsi(dev, channel); +- mutex_unlock(&hwmon->hwmon_lock); ++ mutex_unlock(&hwmon->da9052->auxadc_lock); + + if (ret < 0) + return ret; +diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c +index 65b10efca2b8..7affe6b4ae21 100644 +--- a/drivers/infiniband/core/cache.c ++++ b/drivers/infiniband/core/cache.c +@@ -1542,8 +1542,11 @@ int ib_cache_setup_one(struct ib_device *device) + if (err) + return err; + +- rdma_for_each_port (device, p) +- ib_cache_update(device, p, true); ++ rdma_for_each_port (device, p) { ++ err = ib_cache_update(device, p, true); ++ if (err) ++ return err; ++ } + + return 0; + } +diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c +index ef4b0c7061e4..244ebf285fc3 100644 +--- a/drivers/infiniband/core/nldev.c ++++ b/drivers/infiniband/core/nldev.c +@@ -1248,10 +1248,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + + has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); + ret = fe->fill_res_func(msg, has_cap_net_admin, res, port); +- rdma_restrack_put(res); + if (ret) + goto err_free; + ++ rdma_restrack_put(res); + nlmsg_end(msg, nlh); + ib_device_put(device); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c +index d82e0589cfd2..6b4e7235d2f5 100644 +--- a/drivers/infiniband/hw/cxgb4/cm.c ++++ b/drivers/infiniband/hw/cxgb4/cm.c +@@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) + srqidx = ABORT_RSS_SRQIDX_G( + be32_to_cpu(req->srqidx_status)); + if (srqidx) { +- complete_cached_srq_buffers(ep, +- req->srqidx_status); ++ complete_cached_srq_buffers(ep, srqidx); + } else { + /* Hold ep ref until finish_peer_abort() */ + c4iw_get_ep(&ep->com); +@@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) + return 0; + } + +- ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W, +- TCB_RQ_START_S); ++ ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M, ++ TCB_RQ_START_S); + cleanup: + pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx); + +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c +index 13e4203497b3..a92346e88628 100644 +--- a/drivers/infiniband/hw/hfi1/user_sdma.c ++++ b/drivers/infiniband/hw/hfi1/user_sdma.c +@@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, + + set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); + pq->state = SDMA_PKT_Q_ACTIVE; +- /* Send the first N packets in the request to buy us some time */ +- ret = user_sdma_send_pkts(req, pcount); +- if (unlikely(ret < 0 && ret != -EBUSY)) +- goto free_req; + + /* + * This is a somewhat blocking send implementation. +diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c +index 55a1fbf0e670..ae8b97c30665 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c +@@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev, + int arp_index; + + arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action); +- if (arp_index == -1) ++ if (arp_index < 0) + return; + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c +index bd4aa04416c6..6e2b3e2f83f1 100644 +--- a/drivers/infiniband/hw/mlx4/qp.c ++++ b/drivers/infiniband/hw/mlx4/qp.c +@@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, + int send_size; + int header_size; + int spc; ++ int err; + int i; + + if (wr->wr.opcode != IB_WR_SEND) +@@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, + + sqp->ud_header.lrh.virtual_lane = 0; + sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); +- ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); ++ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); ++ if (err) ++ return err; + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); + if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) + sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); +@@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr, + } + sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); + if (!sqp->qp.ibqp.qp_num) +- ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); ++ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, ++ &pkey); + else +- ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); ++ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, ++ &pkey); ++ if (err) ++ return err; ++ + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); + sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); + sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); +diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c +index 48f48122ddcb..6a413d73b95d 100644 +--- a/drivers/infiniband/sw/rxe/rxe_mmap.c ++++ b/drivers/infiniband/sw/rxe/rxe_mmap.c +@@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, + + ip = kmalloc(sizeof(*ip), GFP_KERNEL); + if (!ip) +- return NULL; ++ return ERR_PTR(-ENOMEM); + + size = PAGE_ALIGN(size); + +diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c +index ff92704de32f..245040c3a35d 100644 +--- a/drivers/infiniband/sw/rxe/rxe_queue.c ++++ b/drivers/infiniband/sw/rxe/rxe_queue.c +@@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, + + if (outbuf) { + ip = rxe_create_mmap_info(rxe, buf_size, udata, buf); +- if (!ip) ++ if (IS_ERR(ip)) { ++ err = PTR_ERR(ip); + goto err1; ++ } + +- err = copy_to_user(outbuf, &ip->info, sizeof(ip->info)); +- if (err) ++ if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) { ++ err = -EFAULT; + goto err2; ++ } + + spin_lock_bh(&rxe->pending_lock); + list_add(&ip->pending_mmaps, &rxe->pending_mmaps); +@@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, + err2: + kfree(ip); + err1: +- return -EINVAL; ++ return err; + } + + inline void rxe_queue_reset(struct rxe_queue *q) +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c +index 95b41c0891d0..9d01b5dca519 100644 +--- a/drivers/mmc/core/block.c ++++ b/drivers/mmc/core/block.c +@@ -1417,6 +1417,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) + struct mmc_request *mrq = &mqrq->brq.mrq; + struct request_queue *q = req->q; + struct mmc_host *host = mq->card->host; ++ enum mmc_issue_type issue_type = mmc_issue_type(mq, req); + unsigned long flags; + bool put_card; + int err; +@@ -1446,7 +1447,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) + + spin_lock_irqsave(&mq->lock, flags); + +- mq->in_flight[mmc_issue_type(mq, req)] -= 1; ++ mq->in_flight[issue_type] -= 1; + + put_card = (mmc_tot_in_flight(mq) == 0); + +diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c +index 9edc08685e86..9c0ccb3744c2 100644 +--- a/drivers/mmc/core/queue.c ++++ b/drivers/mmc/core/queue.c +@@ -107,11 +107,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) + case MMC_ISSUE_DCMD: + if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { + if (recovery_needed) +- __mmc_cqe_recovery_notifier(mq); ++ mmc_cqe_recovery_notifier(mrq); + return BLK_EH_RESET_TIMER; + } +- /* No timeout (XXX: huh? comment doesn't make much sense) */ +- blk_mq_complete_request(req); ++ /* The request has gone already */ + return BLK_EH_DONE; + default: + /* Timeout is handled by mmc core */ +@@ -125,18 +124,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, + struct request_queue *q = req->q; + struct mmc_queue *mq = q->queuedata; + unsigned long flags; +- int ret; ++ bool ignore_tout; + + spin_lock_irqsave(&mq->lock, flags); +- +- if (mq->recovery_needed || !mq->use_cqe) +- ret = BLK_EH_RESET_TIMER; +- else +- ret = mmc_cqe_timed_out(req); +- ++ ignore_tout = mq->recovery_needed || !mq->use_cqe; + spin_unlock_irqrestore(&mq->lock, flags); + +- return ret; ++ return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req); + } + + static void mmc_mq_recovery_handler(struct work_struct *work) +diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c +index 1aee485d56d4..026ca9194ce5 100644 +--- a/drivers/mmc/host/alcor.c ++++ b/drivers/mmc/host/alcor.c +@@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) + + if (ret) { + dev_err(&pdev->dev, "Failed to get irq for data line\n"); +- return ret; ++ goto free_host; + } + + mutex_init(&host->cmd_mutex); +@@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) + dev_set_drvdata(&pdev->dev, host); + mmc_add_host(mmc); + return 0; ++ ++free_host: ++ mmc_free_host(mmc); ++ return ret; + } + + static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev) +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c +index 1604f512c7bd..01fc437ed965 100644 +--- a/drivers/mmc/host/sdhci-acpi.c ++++ b/drivers/mmc/host/sdhci-acpi.c +@@ -602,10 +602,12 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, + } + + static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { +- .chip = &sdhci_acpi_chip_amd, +- .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, +- .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE | +- SDHCI_QUIRK_32BIT_ADMA_SIZE, ++ .chip = &sdhci_acpi_chip_amd, ++ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, ++ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | ++ SDHCI_QUIRK_32BIT_DMA_SIZE | ++ SDHCI_QUIRK_32BIT_ADMA_SIZE, ++ .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, + .probe_slot = sdhci_acpi_emmc_amd_probe_slot, + }; + +diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c +index ce15a05f23d4..fd76aa672e02 100644 +--- a/drivers/mmc/host/sdhci-pci-gli.c ++++ b/drivers/mmc/host/sdhci-pci-gli.c +@@ -26,6 +26,9 @@ + #define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26) + #define GLI_9750_DRIVING_1_VALUE 0xFFF + #define GLI_9750_DRIVING_2_VALUE 0x3 ++#define SDHCI_GLI_9750_SEL_1 BIT(29) ++#define SDHCI_GLI_9750_SEL_2 BIT(31) ++#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30)) + + #define SDHCI_GLI_9750_PLL 0x864 + #define SDHCI_GLI_9750_PLL_TX2_INV BIT(23) +@@ -122,6 +125,8 @@ static void gli_set_9750(struct sdhci_host *host) + GLI_9750_DRIVING_1_VALUE); + driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2, + GLI_9750_DRIVING_2_VALUE); ++ driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST); ++ driving_value |= SDHCI_GLI_9750_SEL_2; + sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING); + + sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4; +@@ -334,6 +339,18 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg) + return value; + } + ++#ifdef CONFIG_PM_SLEEP ++static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip) ++{ ++ struct sdhci_pci_slot *slot = chip->slots[0]; ++ ++ pci_free_irq_vectors(slot->chip->pdev); ++ gli_pcie_enable_msi(slot); ++ ++ return sdhci_pci_resume_host(chip); ++} ++#endif ++ + static const struct sdhci_ops sdhci_gl9755_ops = { + .set_clock = sdhci_set_clock, + .enable_dma = sdhci_pci_enable_dma, +@@ -348,6 +365,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = { + .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50, + .probe_slot = gli_probe_slot_gl9755, + .ops = &sdhci_gl9755_ops, ++#ifdef CONFIG_PM_SLEEP ++ .resume = sdhci_pci_gli_resume, ++#endif + }; + + static const struct sdhci_ops sdhci_gl9750_ops = { +@@ -366,4 +386,7 @@ const struct sdhci_pci_fixes sdhci_gl9750 = { + .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50, + .probe_slot = gli_probe_slot_gl9750, + .ops = &sdhci_gl9750_ops, ++#ifdef CONFIG_PM_SLEEP ++ .resume = sdhci_pci_gli_resume, ++#endif + }; +diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c +index 925ed135a4d9..0df6c2b9484a 100644 +--- a/drivers/net/dsa/dsa_loop.c ++++ b/drivers/net/dsa/dsa_loop.c +@@ -356,6 +356,7 @@ static void __exit dsa_loop_exit(void) + } + module_exit(dsa_loop_exit); + ++MODULE_SOFTDEP("pre: dsa_loop_bdinfo"); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Florian Fainelli"); + MODULE_DESCRIPTION("DSA loopback driver"); +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +index bf5add954181..a935b20effa3 100644 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +@@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv, + for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { + addr = dpaa2_sg_get_addr(&sgt[i]); + sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); +- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, ++ dma_unmap_page(dev, addr, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + + free_pages((unsigned long)sg_vaddr, 0); +@@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, + /* Get the address and length from the S/G entry */ + sg_addr = dpaa2_sg_get_addr(sge); + sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); +- dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, ++ dma_unmap_page(dev, sg_addr, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + + sg_length = dpaa2_sg_get_len(sge); +@@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, + (page_address(page) - page_address(head_page)); + + skb_add_rx_frag(skb, i - 1, head_page, page_offset, +- sg_length, DPAA2_ETH_RX_BUF_SIZE); ++ sg_length, priv->rx_buf_size); + } + + if (dpaa2_sg_is_final(sge)) +@@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) + + for (i = 0; i < count; i++) { + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); +- dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, ++ dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)vaddr, 0); + } +@@ -331,7 +331,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, + break; + case XDP_REDIRECT: + dma_unmap_page(priv->net_dev->dev.parent, addr, +- DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL); ++ priv->rx_buf_size, DMA_BIDIRECTIONAL); + ch->buf_count--; + xdp.data_hard_start = vaddr; + err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); +@@ -370,7 +370,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + trace_dpaa2_rx_fd(priv->net_dev, fd); + + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); +- dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE, ++ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + + fas = dpaa2_get_fas(vaddr, false); +@@ -389,13 +389,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + return; + } + +- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, ++ dma_unmap_page(dev, addr, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + skb = build_linear_skb(ch, fd, vaddr); + } else if (fd_format == dpaa2_fd_sg) { + WARN_ON(priv->xdp_prog); + +- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, ++ dma_unmap_page(dev, addr, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + skb = build_frag_skb(priv, ch, buf_data); + free_pages((unsigned long)vaddr, 0); +@@ -963,7 +963,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, + if (!page) + goto err_alloc; + +- addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE, ++ addr = dma_map_page(dev, page, 0, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; +@@ -973,7 +973,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, + /* tracing point */ + trace_dpaa2_eth_buf_seed(priv->net_dev, + page, DPAA2_ETH_RX_BUF_RAW_SIZE, +- addr, DPAA2_ETH_RX_BUF_SIZE, ++ addr, priv->rx_buf_size, + bpid); + } + +@@ -1680,7 +1680,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) + int mfl, linear_mfl; + + mfl = DPAA2_ETH_L2_MAX_FRM(mtu); +- linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE - ++ linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - + dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; + + if (mfl > linear_mfl) { +@@ -2432,6 +2432,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) + else + rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; + ++ /* We need to ensure that the buffer size seen by WRIOP is a multiple ++ * of 64 or 256 bytes depending on the WRIOP version. ++ */ ++ priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); ++ + /* tx buffer */ + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; + buf_layout.pass_timestamp = true; +@@ -3096,7 +3101,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) + pools_params.num_dpbp = 1; + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; + pools_params.pools[0].backup_pool = 0; +- pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; ++ pools_params.pools[0].buffer_size = priv->rx_buf_size; + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) { + dev_err(dev, "dpni_set_pools() failed\n"); +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +index 8a0e65b3267f..4570ed53c6c7 100644 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +@@ -373,6 +373,7 @@ struct dpaa2_eth_priv { + u16 tx_data_offset; + + struct fsl_mc_device *dpbp_dev; ++ u16 rx_buf_size; + u16 bpid; + struct iommu_domain *iommu_domain; + +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +index dc9a6c36cac0..e4d9fb0e72bf 100644 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +@@ -590,7 +590,7 @@ static int num_rules(struct dpaa2_eth_priv *priv) + + static int update_cls_rule(struct net_device *net_dev, + struct ethtool_rx_flow_spec *new_fs, +- int location) ++ unsigned int location) + { + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpaa2_eth_cls_rule *rule; +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +index 8995e32dd1c0..992908e6eebf 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +@@ -45,6 +45,8 @@ + + #define MGMT_MSG_TIMEOUT 5000 + ++#define SET_FUNC_PORT_MGMT_TIMEOUT 25000 ++ + #define mgmt_to_pfhwdev(pf_mgmt) \ + container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) + +@@ -238,12 +240,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, + u8 *buf_in, u16 in_size, + u8 *buf_out, u16 *out_size, + enum mgmt_direction_type direction, +- u16 resp_msg_id) ++ u16 resp_msg_id, u32 timeout) + { + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_recv_msg *recv_msg; + struct completion *recv_done; ++ unsigned long timeo; + u16 msg_id; + int err; + +@@ -267,8 +270,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, + goto unlock_sync_msg; + } + +- if (!wait_for_completion_timeout(recv_done, +- msecs_to_jiffies(MGMT_MSG_TIMEOUT))) { ++ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); ++ ++ if (!wait_for_completion_timeout(recv_done, timeo)) { + dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); + err = -ETIMEDOUT; + goto unlock_sync_msg; +@@ -342,6 +346,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, + { + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; ++ u32 timeout = 0; + + if (sync != HINIC_MGMT_MSG_SYNC) { + dev_err(&pdev->dev, "Invalid MGMT msg type\n"); +@@ -353,9 +358,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, + return -EINVAL; + } + ++ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) ++ timeout = SET_FUNC_PORT_MGMT_TIMEOUT; ++ + return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + buf_out, out_size, MGMT_DIRECT_SEND, +- MSG_NOT_RESP); ++ MSG_NOT_RESP, timeout); + } + + /** +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c +index 42d00b049c6e..3f739ce40201 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c +@@ -483,7 +483,6 @@ static int hinic_close(struct net_device *netdev) + { + struct hinic_dev *nic_dev = netdev_priv(netdev); + unsigned int flags; +- int err; + + down(&nic_dev->mgmt_lock); + +@@ -497,20 +496,9 @@ static int hinic_close(struct net_device *netdev) + + up(&nic_dev->mgmt_lock); + +- err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); +- if (err) { +- netif_err(nic_dev, drv, netdev, +- "Failed to set func port state\n"); +- nic_dev->flags |= (flags & HINIC_INTF_UP); +- return err; +- } ++ hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); + +- err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); +- if (err) { +- netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); +- nic_dev->flags |= (flags & HINIC_INTF_UP); +- return err; +- } ++ hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); + + if (nic_dev->flags & HINIC_RSS_ENABLE) { + hinic_rss_deinit(nic_dev); +diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c +index e1651756bf9d..f70bb81e1ed6 100644 +--- a/drivers/net/ethernet/moxa/moxart_ether.c ++++ b/drivers/net/ethernet/moxa/moxart_ether.c +@@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev) + struct net_device *ndev = platform_get_drvdata(pdev); + + unregister_netdev(ndev); +- free_irq(ndev->irq, ndev); ++ devm_free_irq(&pdev->dev, ndev->irq, ndev); + moxart_mac_free_memory(ndev); + free_netdev(ndev); + +diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c +index 51fa82b429a3..40970352d208 100644 +--- a/drivers/net/ethernet/natsemi/jazzsonic.c ++++ b/drivers/net/ethernet/natsemi/jazzsonic.c +@@ -235,11 +235,13 @@ static int jazz_sonic_probe(struct platform_device *pdev) + + err = register_netdev(dev); + if (err) +- goto out1; ++ goto undo_probe1; + + return 0; + +-out1: ++undo_probe1: ++ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), ++ lp->descriptors, lp->descriptors_laddr); + release_mem_region(dev->base_addr, SONIC_MEM_SIZE); + out: + free_netdev(dev); +diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c +index 354efffac0f9..bdbf0726145e 100644 +--- a/drivers/net/ethernet/netronome/nfp/abm/main.c ++++ b/drivers/net/ethernet/netronome/nfp/abm/main.c +@@ -333,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) + goto err_free_alink; + + alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL); +- if (!alink->prio_map) ++ if (!alink->prio_map) { ++ err = -ENOMEM; + goto err_free_alink; ++ } + + /* This is a multi-host app, make sure MAC/PHY is up, but don't + * make the MAC/PHY state follow the state of any of the ports. +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index 3bc6d1ef29ec..6fa9852e3f97 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -2202,6 +2202,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp) + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, ++ /* RTL8401, reportedly works if treated as RTL8101e */ ++ { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 }, + { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, + { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +index e0a5fe83d8e0..bfc4a92f1d92 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +@@ -75,6 +75,11 @@ struct ethqos_emac_por { + unsigned int value; + }; + ++struct ethqos_emac_driver_data { ++ const struct ethqos_emac_por *por; ++ unsigned int num_por; ++}; ++ + struct qcom_ethqos { + struct platform_device *pdev; + void __iomem *rgmii_base; +@@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = { + { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 }, + }; + ++static const struct ethqos_emac_driver_data emac_v2_3_0_data = { ++ .por = emac_v2_3_0_por, ++ .num_por = ARRAY_SIZE(emac_v2_3_0_por), ++}; ++ + static int ethqos_dll_configure(struct qcom_ethqos *ethqos) + { + unsigned int val; +@@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) + struct device_node *np = pdev->dev.of_node; + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; ++ const struct ethqos_emac_driver_data *data; + struct qcom_ethqos *ethqos; + struct resource *res; + int ret; +@@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev) + goto err_mem; + } + +- ethqos->por = of_device_get_match_data(&pdev->dev); ++ data = of_device_get_match_data(&pdev->dev); ++ ethqos->por = data->por; ++ ethqos->num_por = data->num_por; + + ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii"); + if (IS_ERR(ethqos->rgmii_clk)) { +@@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev) + } + + static const struct of_device_id qcom_ethqos_match[] = { +- { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por}, ++ { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data}, + { } + }; + MODULE_DEVICE_TABLE(of, qcom_ethqos_match); +diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c +index 001def4509c2..fed3e395f18e 100644 +--- a/drivers/net/phy/microchip_t1.c ++++ b/drivers/net/phy/microchip_t1.c +@@ -3,9 +3,21 @@ + + #include + #include ++#include + #include + #include + ++/* External Register Control Register */ ++#define LAN87XX_EXT_REG_CTL (0x14) ++#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000) ++#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800) ++ ++/* External Register Read Data Register */ ++#define LAN87XX_EXT_REG_RD_DATA (0x15) ++ ++/* External Register Write Data Register */ ++#define LAN87XX_EXT_REG_WR_DATA (0x16) ++ + /* Interrupt Source Register */ + #define LAN87XX_INTERRUPT_SOURCE (0x18) + +@@ -14,9 +26,160 @@ + #define LAN87XX_MASK_LINK_UP (0x0004) + #define LAN87XX_MASK_LINK_DOWN (0x0002) + ++/* phyaccess nested types */ ++#define PHYACC_ATTR_MODE_READ 0 ++#define PHYACC_ATTR_MODE_WRITE 1 ++#define PHYACC_ATTR_MODE_MODIFY 2 ++ ++#define PHYACC_ATTR_BANK_SMI 0 ++#define PHYACC_ATTR_BANK_MISC 1 ++#define PHYACC_ATTR_BANK_PCS 2 ++#define PHYACC_ATTR_BANK_AFE 3 ++#define PHYACC_ATTR_BANK_MAX 7 ++ + #define DRIVER_AUTHOR "Nisar Sayed " + #define DRIVER_DESC "Microchip LAN87XX T1 PHY driver" + ++struct access_ereg_val { ++ u8 mode; ++ u8 bank; ++ u8 offset; ++ u16 val; ++ u16 mask; ++}; ++ ++static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank, ++ u8 offset, u16 val) ++{ ++ u16 ereg = 0; ++ int rc = 0; ++ ++ if (mode > PHYACC_ATTR_MODE_WRITE || bank > PHYACC_ATTR_BANK_MAX) ++ return -EINVAL; ++ ++ if (bank == PHYACC_ATTR_BANK_SMI) { ++ if (mode == PHYACC_ATTR_MODE_WRITE) ++ rc = phy_write(phydev, offset, val); ++ else ++ rc = phy_read(phydev, offset); ++ return rc; ++ } ++ ++ if (mode == PHYACC_ATTR_MODE_WRITE) { ++ ereg = LAN87XX_EXT_REG_CTL_WR_CTL; ++ rc = phy_write(phydev, LAN87XX_EXT_REG_WR_DATA, val); ++ if (rc < 0) ++ return rc; ++ } else { ++ ereg = LAN87XX_EXT_REG_CTL_RD_CTL; ++ } ++ ++ ereg |= (bank << 8) | offset; ++ ++ rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg); ++ if (rc < 0) ++ return rc; ++ ++ if (mode == PHYACC_ATTR_MODE_READ) ++ rc = phy_read(phydev, LAN87XX_EXT_REG_RD_DATA); ++ ++ return rc; ++} ++ ++static int access_ereg_modify_changed(struct phy_device *phydev, ++ u8 bank, u8 offset, u16 val, u16 mask) ++{ ++ int new = 0, rc = 0; ++ ++ if (bank > PHYACC_ATTR_BANK_MAX) ++ return -EINVAL; ++ ++ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, bank, offset, val); ++ if (rc < 0) ++ return rc; ++ ++ new = val | (rc & (mask ^ 0xFFFF)); ++ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, bank, offset, new); ++ ++ return rc; ++} ++ ++static int lan87xx_phy_init(struct phy_device *phydev) ++{ ++ static const struct access_ereg_val init[] = { ++ /* TX Amplitude = 5 */ ++ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B, ++ 0x000A, 0x001E}, ++ /* Clear SMI interrupts */ ++ {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18, ++ 0, 0}, ++ /* Clear MISC interrupts */ ++ {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08, ++ 0, 0}, ++ /* Turn on TC10 Ring Oscillator (ROSC) */ ++ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20, ++ 0x0020, 0x0020}, ++ /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */ ++ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20, ++ 0x283C, 0}, ++ /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */ ++ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21, ++ 0x274F, 0}, ++ /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep, ++ * and Wake_In to wake PHY ++ */ ++ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20, ++ 0x80A7, 0}, ++ /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer ++ * to 128 uS ++ */ ++ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24, ++ 0xF110, 0}, ++ /* Enable HW Init */ ++ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A, ++ 0x0100, 0x0100}, ++ }; ++ int rc, i; ++ ++ /* Start manual initialization procedures in Managed Mode */ ++ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI, ++ 0x1a, 0x0000, 0x0100); ++ if (rc < 0) ++ return rc; ++ ++ /* Soft Reset the SMI block */ ++ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI, ++ 0x00, 0x8000, 0x8000); ++ if (rc < 0) ++ return rc; ++ ++ /* Check to see if the self-clearing bit is cleared */ ++ usleep_range(1000, 2000); ++ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, ++ PHYACC_ATTR_BANK_SMI, 0x00, 0); ++ if (rc < 0) ++ return rc; ++ if ((rc & 0x8000) != 0) ++ return -ETIMEDOUT; ++ ++ /* PHY Initialization */ ++ for (i = 0; i < ARRAY_SIZE(init); i++) { ++ if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) { ++ rc = access_ereg_modify_changed(phydev, init[i].bank, ++ init[i].offset, ++ init[i].val, ++ init[i].mask); ++ } else { ++ rc = access_ereg(phydev, init[i].mode, init[i].bank, ++ init[i].offset, init[i].val); ++ } ++ if (rc < 0) ++ return rc; ++ } ++ ++ return 0; ++} ++ + static int lan87xx_phy_config_intr(struct phy_device *phydev) + { + int rc, val = 0; +@@ -40,6 +203,13 @@ static int lan87xx_phy_ack_interrupt(struct phy_device *phydev) + return rc < 0 ? rc : 0; + } + ++static int lan87xx_config_init(struct phy_device *phydev) ++{ ++ int rc = lan87xx_phy_init(phydev); ++ ++ return rc < 0 ? rc : 0; ++} ++ + static struct phy_driver microchip_t1_phy_driver[] = { + { + .phy_id = 0x0007c150, +@@ -48,6 +218,7 @@ static struct phy_driver microchip_t1_phy_driver[] = { + + .features = PHY_BASIC_T1_FEATURES, + ++ .config_init = lan87xx_config_init, + .config_aneg = genphy_config_aneg, + + .ack_interrupt = lan87xx_phy_ack_interrupt, +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index ea890d802ffe..54e5d4f9622c 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -1160,9 +1160,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) + /* Restart autonegotiation so the new modes get sent to the + * link partner. + */ +- ret = phy_restart_aneg(phydev); +- if (ret < 0) +- return ret; ++ if (phydev->autoneg == AUTONEG_ENABLE) { ++ ret = phy_restart_aneg(phydev); ++ if (ret < 0) ++ return ret; ++ } + } + + return 0; +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c +index a44dd3c8af63..087b01684135 100644 +--- a/drivers/net/ppp/pppoe.c ++++ b/drivers/net/ppp/pppoe.c +@@ -492,6 +492,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, + if (!skb) + goto out; + ++ if (skb->pkt_type != PACKET_HOST) ++ goto abort; ++ + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) + goto abort; + +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 5a635f028bdc..030d30603c29 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1231,9 +1231,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, + break; + } while (rq->vq->num_free); + if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { +- u64_stats_update_begin(&rq->stats.syncp); ++ unsigned long flags; ++ ++ flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); + rq->stats.kicks++; +- u64_stats_update_end(&rq->stats.syncp); ++ u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); + } + + return !oom; +diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c +index 606fe216f902..cae7caf5ab28 100644 +--- a/drivers/pinctrl/intel/pinctrl-baytrail.c ++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c +@@ -1297,6 +1297,7 @@ static const struct gpio_chip byt_gpio_chip = { + .direction_output = byt_gpio_direction_output, + .get = byt_gpio_get, + .set = byt_gpio_set, ++ .set_config = gpiochip_generic_config, + .dbg_show = byt_gpio_dbg_show, + }; + +diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c +index 2c419fa5d1c1..8f06445a8e39 100644 +--- a/drivers/pinctrl/intel/pinctrl-cherryview.c ++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c +@@ -1474,11 +1474,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) + struct chv_pinctrl *pctrl = gpiochip_get_data(gc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long pending; ++ unsigned long flags; + u32 intr_line; + + chained_irq_enter(chip, desc); + ++ raw_spin_lock_irqsave(&chv_lock, flags); + pending = readl(pctrl->regs + CHV_INTSTAT); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); ++ + for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) { + unsigned irq, offset; + +diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +index d936e7aa74c4..7b7736abe9d8 100644 +--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c ++++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +@@ -15,17 +15,18 @@ + + #include "pinctrl-intel.h" + +-#define SPT_PAD_OWN 0x020 +-#define SPT_PADCFGLOCK 0x0a0 +-#define SPT_HOSTSW_OWN 0x0d0 +-#define SPT_GPI_IS 0x100 +-#define SPT_GPI_IE 0x120 ++#define SPT_PAD_OWN 0x020 ++#define SPT_H_PADCFGLOCK 0x090 ++#define SPT_LP_PADCFGLOCK 0x0a0 ++#define SPT_HOSTSW_OWN 0x0d0 ++#define SPT_GPI_IS 0x100 ++#define SPT_GPI_IE 0x120 + + #define SPT_COMMUNITY(b, s, e) \ + { \ + .barno = (b), \ + .padown_offset = SPT_PAD_OWN, \ +- .padcfglock_offset = SPT_PADCFGLOCK, \ ++ .padcfglock_offset = SPT_LP_PADCFGLOCK, \ + .hostown_offset = SPT_HOSTSW_OWN, \ + .is_offset = SPT_GPI_IS, \ + .ie_offset = SPT_GPI_IE, \ +@@ -47,7 +48,7 @@ + { \ + .barno = (b), \ + .padown_offset = SPT_PAD_OWN, \ +- .padcfglock_offset = SPT_PADCFGLOCK, \ ++ .padcfglock_offset = SPT_H_PADCFGLOCK, \ + .hostown_offset = SPT_HOSTSW_OWN, \ + .is_offset = SPT_GPI_IS, \ + .ie_offset = SPT_GPI_IE, \ +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c +index 763da0be10d6..44320322037d 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -688,7 +688,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl, + + pol = msm_readl_intr_cfg(pctrl, g); + pol ^= BIT(g->intr_polarity_bit); +- msm_writel_intr_cfg(val, pctrl, g); ++ msm_writel_intr_cfg(pol, pctrl, g); + + val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit); + intstat = msm_readl_intr_status(pctrl, g); +diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c +index 4fc2056bd227..e615dc240150 100644 +--- a/drivers/s390/net/ism_drv.c ++++ b/drivers/s390/net/ism_drv.c +@@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops, + ISM_NR_DMBS); +- if (!ism->smcd) ++ if (!ism->smcd) { ++ ret = -ENOMEM; + goto err_resource; ++ } + + ism->smcd->priv = ism; + ret = ism_dev_init(ism); +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 94af30f768f7..9c6bf13daaee 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -689,8 +689,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + hp->flags = input_size; /* structure abuse ... */ + hp->pack_id = old_hdr.pack_id; + hp->usr_ptr = NULL; +- if (__copy_from_user(cmnd, buf, cmd_size)) ++ if (__copy_from_user(cmnd, buf, cmd_size)) { ++ sg_remove_request(sfp, srp); + return -EFAULT; ++ } + /* + * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, + * but is is possible that the app intended SG_DXFER_TO_DEV, because there +diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c +index f624cc87cbab..856c34010021 100644 +--- a/drivers/usb/cdns3/gadget.c ++++ b/drivers/usb/cdns3/gadget.c +@@ -2105,7 +2105,7 @@ found: + link_trb = priv_req->trb; + + /* Update ring only if removed request is on pending_req_list list */ +- if (req_on_hw_ring) { ++ if (req_on_hw_ring && link_trb) { + link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma + + ((priv_req->end_trb + 1) * TRB_SIZE)); + link_trb->control = (link_trb->control & TRB_CYCLE) | +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index 6ca40d135430..e26a6f18f421 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) + { + struct usb_memory *usbm = NULL; + struct usb_dev_state *ps = file->private_data; ++ struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); + size_t size = vma->vm_end - vma->vm_start; + void *mem; + unsigned long flags; +@@ -250,11 +251,19 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) + usbm->vma_use_count = 1; + INIT_LIST_HEAD(&usbm->memlist); + +- if (remap_pfn_range(vma, vma->vm_start, +- virt_to_phys(usbm->mem) >> PAGE_SHIFT, +- size, vma->vm_page_prot) < 0) { +- dec_usb_memory_use_count(usbm, &usbm->vma_use_count); +- return -EAGAIN; ++ if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { ++ if (remap_pfn_range(vma, vma->vm_start, ++ virt_to_phys(usbm->mem) >> PAGE_SHIFT, ++ size, vma->vm_page_prot) < 0) { ++ dec_usb_memory_use_count(usbm, &usbm->vma_use_count); ++ return -EAGAIN; ++ } ++ } else { ++ if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, ++ size)) { ++ dec_usb_memory_use_count(usbm, &usbm->vma_use_count); ++ return -EAGAIN; ++ } + } + + vma->vm_flags |= VM_IO; +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 847c85430b05..4d3de33885ff 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -38,6 +38,7 @@ + + #define USB_VENDOR_GENESYS_LOGIC 0x05e3 + #define USB_VENDOR_SMSC 0x0424 ++#define USB_PRODUCT_USB5534B 0x5534 + #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 + #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02 + +@@ -5506,8 +5507,11 @@ out_hdev_lock: + } + + static const struct usb_device_id hub_id_table[] = { +- { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, ++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR ++ | USB_DEVICE_ID_MATCH_PRODUCT ++ | USB_DEVICE_ID_MATCH_INT_CLASS, + .idVendor = USB_VENDOR_SMSC, ++ .idProduct = USB_PRODUCT_USB5534B, + .bInterfaceClass = USB_CLASS_HUB, + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 3d30dec42c81..c30c5b1c478c 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -2480,9 +2480,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, + for_each_sg(sg, s, pending, i) { + trb = &dep->trb_pool[dep->trb_dequeue]; + +- if (trb->ctrl & DWC3_TRB_CTRL_HWO) +- break; +- + req->sg = sg_next(s); + req->num_pending_sgs--; + +diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c +index ab9ac48a751a..a7709d126b29 100644 +--- a/drivers/usb/gadget/configfs.c ++++ b/drivers/usb/gadget/configfs.c +@@ -260,6 +260,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, + char *name; + int ret; + ++ if (strlen(page) < len) ++ return -EOVERFLOW; ++ + name = kstrdup(page, GFP_KERNEL); + if (!name) + return -ENOMEM; +diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c +index dd81fd538cb8..a748ed0842e8 100644 +--- a/drivers/usb/gadget/legacy/audio.c ++++ b/drivers/usb/gadget/legacy/audio.c +@@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(cdev->gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail; ++ } + usb_otg_descriptor_init(cdev->gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c +index 8d7a556ece30..563363aba48f 100644 +--- a/drivers/usb/gadget/legacy/cdc2.c ++++ b/drivers/usb/gadget/legacy/cdc2.c +@@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail1; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c +index c61e71ba7045..0f1b45e3abd1 100644 +--- a/drivers/usb/gadget/legacy/ncm.c ++++ b/drivers/usb/gadget/legacy/ncm.c +@@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c +index 247de0faaeb7..5980540a8fff 100644 +--- a/drivers/usb/gadget/udc/net2272.c ++++ b/drivers/usb/gadget/udc/net2272.c +@@ -2647,6 +2647,8 @@ net2272_plat_probe(struct platform_device *pdev) + err_req: + release_mem_region(base, len); + err: ++ kfree(dev); ++ + return ret; + } + +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 315b4552693c..52c625c02341 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -363,6 +363,7 @@ static int xhci_plat_remove(struct platform_device *dev) + struct clk *reg_clk = xhci->reg_clk; + struct usb_hcd *shared_hcd = xhci->shared_hcd; + ++ pm_runtime_get_sync(&dev->dev); + xhci->xhc_state |= XHCI_STATE_REMOVING; + + usb_remove_hcd(shared_hcd); +@@ -376,8 +377,9 @@ static int xhci_plat_remove(struct platform_device *dev) + clk_disable_unprepare(reg_clk); + usb_put_hcd(hcd); + +- pm_runtime_set_suspended(&dev->dev); + pm_runtime_disable(&dev->dev); ++ pm_runtime_put_noidle(&dev->dev); ++ pm_runtime_set_suspended(&dev->dev); + + return 0; + } +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index a54f8f3234f9..49894541ea9a 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -3421,8 +3421,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + /* New sg entry */ + --num_sgs; + sent_len -= block_len; +- if (num_sgs != 0) { +- sg = sg_next(sg); ++ sg = sg_next(sg); ++ if (num_sgs != 0 && sg) { + block_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + addr += sent_len; +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index c8494fa5e19d..4b8632eda2bd 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -2135,8 +2135,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata) + } + } + ++ kref_put(&wdata2->refcount, cifs_writedata_release); + if (rc) { +- kref_put(&wdata2->refcount, cifs_writedata_release); + if (is_retryable_error(rc)) + continue; + i += nr_pages; +diff --git a/fs/exec.c b/fs/exec.c +index fc2870f2aca9..d62cd1d71098 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1274,6 +1274,8 @@ int flush_old_exec(struct linux_binprm * bprm) + */ + set_mm_exe_file(bprm->mm, bprm->file); + ++ would_dump(bprm, bprm->file); ++ + /* + * Release all of the old mmap stuff + */ +@@ -1817,8 +1819,6 @@ static int __do_execve_file(int fd, struct filename *filename, + if (retval < 0) + goto out; + +- would_dump(bprm, bprm->file); +- + retval = exec_binprm(bprm); + if (retval < 0) + goto out; +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c +index f63df54a08c6..adbb8fef2216 100644 +--- a/fs/gfs2/bmap.c ++++ b/fs/gfs2/bmap.c +@@ -528,10 +528,12 @@ lower_metapath: + + /* Advance in metadata tree. */ + (mp->mp_list[hgt])++; +- if (mp->mp_list[hgt] >= sdp->sd_inptrs) { +- if (!hgt) ++ if (hgt) { ++ if (mp->mp_list[hgt] >= sdp->sd_inptrs) ++ goto lower_metapath; ++ } else { ++ if (mp->mp_list[hgt] >= sdp->sd_diptrs) + break; +- goto lower_metapath; + } + + fill_up_metapath: +@@ -876,10 +878,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length, + ret = -ENOENT; + goto unlock; + } else { +- /* report a hole */ + iomap->offset = pos; + iomap->length = length; +- goto do_alloc; ++ goto hole_found; + } + } + iomap->length = size; +@@ -933,8 +934,6 @@ unlock: + return ret; + + do_alloc: +- iomap->addr = IOMAP_NULL_ADDR; +- iomap->type = IOMAP_HOLE; + if (flags & IOMAP_REPORT) { + if (pos >= size) + ret = -ENOENT; +@@ -956,6 +955,9 @@ do_alloc: + if (pos < size && height == ip->i_height) + ret = gfs2_hole_size(inode, lblock, len, mp, iomap); + } ++hole_found: ++ iomap->addr = IOMAP_NULL_ADDR; ++ iomap->type = IOMAP_HOLE; + goto out; + } + +diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c +index 7ca84be20cf6..8303b44a5068 100644 +--- a/fs/gfs2/lops.c ++++ b/fs/gfs2/lops.c +@@ -264,7 +264,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, + struct super_block *sb = sdp->sd_vfs; + struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); + +- bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9); ++ bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; + bio_set_dev(bio, sb->s_bdev); + bio->bi_end_io = end_io; + bio->bi_private = sdp; +@@ -504,7 +504,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, + unsigned int bsize = sdp->sd_sb.sb_bsize, off; + unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; + unsigned int shift = PAGE_SHIFT - bsize_shift; +- unsigned int readahead_blocks = BIO_MAX_PAGES << shift; ++ unsigned int max_bio_size = 2 * 1024 * 1024; + struct gfs2_journal_extent *je; + int sz, ret = 0; + struct bio *bio = NULL; +@@ -532,12 +532,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, + off = 0; + } + +- if (!bio || (bio_chained && !off)) { ++ if (!bio || (bio_chained && !off) || ++ bio->bi_iter.bi_size >= max_bio_size) { + /* start new bio */ + } else { +- sz = bio_add_page(bio, page, bsize, off); +- if (sz == bsize) +- goto block_added; ++ sector_t sector = dblock << sdp->sd_fsb2bb_shift; ++ ++ if (bio_end_sector(bio) == sector) { ++ sz = bio_add_page(bio, page, bsize, off); ++ if (sz == bsize) ++ goto block_added; ++ } + if (off) { + unsigned int blocks = + (PAGE_SIZE - off) >> bsize_shift; +@@ -563,7 +568,7 @@ block_added: + off += bsize; + if (off == PAGE_SIZE) + page = NULL; +- if (blocks_submitted < blocks_read + readahead_blocks) { ++ if (blocks_submitted < 2 * max_bio_size >> bsize_shift) { + /* Keep at least one bio in flight */ + continue; + } +diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c +index 15f271401dcc..573b1da9342c 100644 +--- a/fs/nfs/fscache-index.c ++++ b/fs/nfs/fscache-index.c +@@ -84,8 +84,10 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data, + return FSCACHE_CHECKAUX_OBSOLETE; + + memset(&auxdata, 0, sizeof(auxdata)); +- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime); +- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime); ++ auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec; ++ auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec; ++ auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec; ++ auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec; + + if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4) + auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode); +diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c +index a6dcc2151e77..7d6721ec31d4 100644 +--- a/fs/nfs/fscache.c ++++ b/fs/nfs/fscache.c +@@ -188,7 +188,8 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int + /* create a cache index for looking up filehandles */ + nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache, + &nfs_fscache_super_index_def, +- key, sizeof(*key) + ulen, ++ &key->key, ++ sizeof(key->key) + ulen, + NULL, 0, + nfss, 0, true); + dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n", +@@ -226,6 +227,19 @@ void nfs_fscache_release_super_cookie(struct super_block *sb) + } + } + ++static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata, ++ struct nfs_inode *nfsi) ++{ ++ memset(auxdata, 0, sizeof(*auxdata)); ++ auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec; ++ auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec; ++ auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec; ++ auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec; ++ ++ if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4) ++ auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode); ++} ++ + /* + * Initialise the per-inode cache cookie pointer for an NFS inode. + */ +@@ -239,12 +253,7 @@ void nfs_fscache_init_inode(struct inode *inode) + if (!(nfss->fscache && S_ISREG(inode->i_mode))) + return; + +- memset(&auxdata, 0, sizeof(auxdata)); +- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime); +- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime); +- +- if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4) +- auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode); ++ nfs_fscache_update_auxdata(&auxdata, nfsi); + + nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache, + &nfs_fscache_inode_object_def, +@@ -264,9 +273,7 @@ void nfs_fscache_clear_inode(struct inode *inode) + + dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie); + +- memset(&auxdata, 0, sizeof(auxdata)); +- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime); +- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime); ++ nfs_fscache_update_auxdata(&auxdata, nfsi); + fscache_relinquish_cookie(cookie, &auxdata, false); + nfsi->fscache = NULL; + } +@@ -306,9 +313,7 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp) + if (!fscache_cookie_valid(cookie)) + return; + +- memset(&auxdata, 0, sizeof(auxdata)); +- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime); +- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime); ++ nfs_fscache_update_auxdata(&auxdata, nfsi); + + if (inode_is_open_for_write(inode)) { + dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi); +diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h +index ad041cfbf9ec..6754c8607230 100644 +--- a/fs/nfs/fscache.h ++++ b/fs/nfs/fscache.h +@@ -62,9 +62,11 @@ struct nfs_fscache_key { + * cache object. + */ + struct nfs_fscache_inode_auxdata { +- struct timespec mtime; +- struct timespec ctime; +- u64 change_attr; ++ s64 mtime_sec; ++ s64 mtime_nsec; ++ s64 ctime_sec; ++ s64 ctime_nsec; ++ u64 change_attr; + }; + + /* +diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c +index cb7c10e9721e..a2593b787cc7 100644 +--- a/fs/nfs/mount_clnt.c ++++ b/fs/nfs/mount_clnt.c +@@ -32,6 +32,7 @@ + #define MNT_fhs_status_sz (1) + #define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE) + #define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE)) ++#define MNT_fhandlev3_sz XDR_QUADLEN(NFS3_FHSIZE) + #define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS) + + /* +@@ -39,7 +40,7 @@ + */ + #define MNT_enc_dirpath_sz encode_dirpath_sz + #define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz) +-#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \ ++#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandlev3_sz + \ + MNT_authflav3_sz) + + /* +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index b53bcf40e2a7..ea680f619438 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -733,9 +733,9 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) + state = new; + state->owner = owner; + atomic_inc(&owner->so_count); +- list_add_rcu(&state->inode_states, &nfsi->open_states); + ihold(inode); + state->inode = inode; ++ list_add_rcu(&state->inode_states, &nfsi->open_states); + spin_unlock(&inode->i_lock); + /* Note: The reclaim code dictates that we add stateless + * and read-only stateids to the end of the list */ +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c +index f5d30573f4a9..deb13f0a0f7d 100644 +--- a/fs/notify/fanotify/fanotify.c ++++ b/fs/notify/fanotify/fanotify.c +@@ -171,6 +171,13 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, + if (!fsnotify_iter_should_report_type(iter_info, type)) + continue; + mark = iter_info->marks[type]; ++ /* ++ * If the event is on dir and this mark doesn't care about ++ * events on dir, don't send it! ++ */ ++ if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR)) ++ continue; ++ + /* + * If the event is for a child and this mark doesn't care about + * events on a child, don't send it! +@@ -203,10 +210,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, + user_mask &= ~FAN_ONDIR; + } + +- if (event_mask & FS_ISDIR && +- !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) +- return 0; +- + return test_mask & user_mask; + } + +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 034b0a644efc..448c91bf543b 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -356,4 +356,10 @@ static inline void *offset_to_ptr(const int *off) + /* &a[0] degrades to a pointer: a different type from an array */ + #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) + ++/* ++ * This is needed in functions which generate the stack canary, see ++ * arch/x86/kernel/smpboot.c::start_secondary() for an example. ++ */ ++#define prevent_tail_call_optimization() mb() ++ + #endif /* __LINUX_COMPILER_H */ +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 06668379109e..5bd384dbdca5 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -978,7 +978,7 @@ struct file_handle { + __u32 handle_bytes; + int handle_type; + /* file identifier */ +- unsigned char f_handle[0]; ++ unsigned char f_handle[]; + }; + + static inline struct file *get_file(struct file *f) +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h +index 8faca7b52543..fb5b2a41bd45 100644 +--- a/include/linux/memcontrol.h ++++ b/include/linux/memcontrol.h +@@ -793,6 +793,8 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg, + atomic_long_inc(&memcg->memory_events[event]); + cgroup_file_notify(&memcg->events_file); + ++ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) ++ break; + if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) + break; + } while ((memcg = parent_mem_cgroup(memcg)) && +diff --git a/include/linux/pnp.h b/include/linux/pnp.h +index 3b12fd28af78..fc4df3ccefc9 100644 +--- a/include/linux/pnp.h ++++ b/include/linux/pnp.h +@@ -220,10 +220,8 @@ struct pnp_card { + #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) + #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) + #define to_pnp_card(n) container_of(n, struct pnp_card, dev) +-#define pnp_for_each_card(card) \ +- for((card) = global_to_pnp_card(pnp_cards.next); \ +- (card) != global_to_pnp_card(&pnp_cards); \ +- (card) = global_to_pnp_card((card)->global_list.next)) ++#define pnp_for_each_card(card) \ ++ list_for_each_entry(card, &pnp_cards, global_list) + + struct pnp_card_link { + struct pnp_card *card; +@@ -276,14 +274,9 @@ struct pnp_dev { + #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) + #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) + #define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) +-#define pnp_for_each_dev(dev) \ +- for((dev) = global_to_pnp_dev(pnp_global.next); \ +- (dev) != global_to_pnp_dev(&pnp_global); \ +- (dev) = global_to_pnp_dev((dev)->global_list.next)) +-#define card_for_each_dev(card,dev) \ +- for((dev) = card_to_pnp_dev((card)->devices.next); \ +- (dev) != card_to_pnp_dev(&(card)->devices); \ +- (dev) = card_to_pnp_dev((dev)->card_list.next)) ++#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list) ++#define card_for_each_dev(card, dev) \ ++ list_for_each_entry(dev, &(card)->devices, card_list) + #define pnp_dev_name(dev) (dev)->name + + static inline void *pnp_get_drvdata(struct pnp_dev *pdev) +@@ -437,14 +430,10 @@ struct pnp_protocol { + }; + + #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) +-#define protocol_for_each_card(protocol,card) \ +- for((card) = protocol_to_pnp_card((protocol)->cards.next); \ +- (card) != protocol_to_pnp_card(&(protocol)->cards); \ +- (card) = protocol_to_pnp_card((card)->protocol_list.next)) +-#define protocol_for_each_dev(protocol,dev) \ +- for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \ +- (dev) != protocol_to_pnp_dev(&(protocol)->devices); \ +- (dev) = protocol_to_pnp_dev((dev)->protocol_list.next)) ++#define protocol_for_each_card(protocol, card) \ ++ list_for_each_entry(card, &(protocol)->cards, protocol_list) ++#define protocol_for_each_dev(protocol, dev) \ ++ list_for_each_entry(dev, &(protocol)->devices, protocol_list) + + extern struct bus_type pnp_bus_type; + +diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h +index 7eb6a8754f19..a3adbe593505 100644 +--- a/include/linux/skmsg.h ++++ b/include/linux/skmsg.h +@@ -186,6 +186,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, + dst->sg.data[which] = src->sg.data[which]; + dst->sg.data[which].length = size; + dst->sg.size += size; ++ src->sg.size -= size; + src->sg.data[which].length -= size; + src->sg.data[which].offset += size; + } +diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h +index 5ac5db4d295f..d4326d6662a4 100644 +--- a/include/linux/sunrpc/gss_api.h ++++ b/include/linux/sunrpc/gss_api.h +@@ -22,6 +22,7 @@ + struct gss_ctx { + struct gss_api_mech *mech_type; + void *internal_ctx_id; ++ unsigned int slack, align; + }; + + #define GSS_C_NO_BUFFER ((struct xdr_netobj) 0) +@@ -67,6 +68,7 @@ u32 gss_wrap( + u32 gss_unwrap( + struct gss_ctx *ctx_id, + int offset, ++ int len, + struct xdr_buf *inbuf); + u32 gss_delete_sec_context( + struct gss_ctx **ctx_id); +@@ -127,6 +129,7 @@ struct gss_api_ops { + u32 (*gss_unwrap)( + struct gss_ctx *ctx_id, + int offset, ++ int len, + struct xdr_buf *buf); + void (*gss_delete_sec_context)( + void *internal_ctx_id); +diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h +index 02c0412e368c..07930bc9ad60 100644 +--- a/include/linux/sunrpc/gss_krb5.h ++++ b/include/linux/sunrpc/gss_krb5.h +@@ -83,7 +83,7 @@ struct gss_krb5_enctype { + u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, + struct page **pages); /* v2 encryption function */ +- u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, ++ u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len, + struct xdr_buf *buf, u32 *headskip, + u32 *tailskip); /* v2 decryption function */ + }; +@@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, + struct xdr_buf *outbuf, struct page **pages); + + u32 +-gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, ++gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len, + struct xdr_buf *buf); + + +@@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, + struct page **pages); + + u32 +-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, ++gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, + struct xdr_buf *buf, u32 *plainoffset, + u32 *plainlen); + +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h +index f33e5013bdfb..9db6097c22c5 100644 +--- a/include/linux/sunrpc/xdr.h ++++ b/include/linux/sunrpc/xdr.h +@@ -186,6 +186,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) + extern void xdr_shift_buf(struct xdr_buf *, size_t); + extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); + extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); ++extern void xdr_buf_trim(struct xdr_buf *, unsigned int); + extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int); + extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); + extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); +diff --git a/include/linux/tty.h b/include/linux/tty.h +index bd5fe0e907e8..a99e9b8e4e31 100644 +--- a/include/linux/tty.h ++++ b/include/linux/tty.h +@@ -66,7 +66,7 @@ struct tty_buffer { + int read; + int flags; + /* Data points here */ +- unsigned long data[0]; ++ unsigned long data[]; + }; + + /* Values for .flags field of tty_buffer */ +diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h +index 9f551f3b69c6..90690e37a56f 100644 +--- a/include/net/netfilter/nf_conntrack.h ++++ b/include/net/netfilter/nf_conntrack.h +@@ -87,7 +87,7 @@ struct nf_conn { + struct hlist_node nat_bysource; + #endif + /* all members below initialized via memset */ +- u8 __nfct_init_offset[0]; ++ struct { } __nfct_init_offset; + + /* If we were expected by an expectation, this will be it */ + struct nf_conn *master; +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index 9fb7cf1cdf36..3d03756e1069 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -407,6 +407,7 @@ struct tcf_block { + struct mutex lock; + struct list_head chain_list; + u32 index; /* block index for shared blocks */ ++ u32 classid; /* which class this block belongs to */ + refcount_t refcnt; + struct net *net; + struct Qdisc *q; +diff --git a/include/net/tcp.h b/include/net/tcp.h +index cce285f70c8e..7cf1b4972c66 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1401,6 +1401,19 @@ static inline int tcp_full_space(const struct sock *sk) + return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); + } + ++/* We provision sk_rcvbuf around 200% of sk_rcvlowat. ++ * If 87.5 % (7/8) of the space has been consumed, we want to override ++ * SO_RCVLOWAT constraint, since we are receiving skbs with too small ++ * len/truesize ratio. ++ */ ++static inline bool tcp_rmem_pressure(const struct sock *sk) ++{ ++ int rcvbuf = READ_ONCE(sk->sk_rcvbuf); ++ int threshold = rcvbuf - (rcvbuf >> 3); ++ ++ return atomic_read(&sk->sk_rmem_alloc) > threshold; ++} ++ + extern void tcp_openreq_init_rwin(struct request_sock *req, + const struct sock *sk_listener, + const struct dst_entry *dst); +diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h +index a36b7227a15a..334842daa904 100644 +--- a/include/sound/rawmidi.h ++++ b/include/sound/rawmidi.h +@@ -61,6 +61,7 @@ struct snd_rawmidi_runtime { + size_t avail_min; /* min avail for wakeup */ + size_t avail; /* max used buffer for wakeup */ + size_t xruns; /* over/underruns counter */ ++ int buffer_ref; /* buffer reference count */ + /* misc */ + spinlock_t lock; + wait_queue_head_t sleep; +diff --git a/init/Kconfig b/init/Kconfig +index 0bffc8fdbf3d..6db3e310a5e4 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -36,22 +36,6 @@ config TOOLS_SUPPORT_RELR + config CC_HAS_ASM_INLINE + def_bool $(success,echo 'void foo(void) { asm inline (""); }' | $(CC) -x c - -c -o /dev/null) + +-config CC_HAS_WARN_MAYBE_UNINITIALIZED +- def_bool $(cc-option,-Wmaybe-uninitialized) +- help +- GCC >= 4.7 supports this option. +- +-config CC_DISABLE_WARN_MAYBE_UNINITIALIZED +- bool +- depends on CC_HAS_WARN_MAYBE_UNINITIALIZED +- default CC_IS_GCC && GCC_VERSION < 40900 # unreliable for GCC < 4.9 +- help +- GCC's -Wmaybe-uninitialized is not reliable by definition. +- Lots of false positive warnings are produced in some cases. +- +- If this option is enabled, -Wno-maybe-uninitialzed is passed +- to the compiler to suppress maybe-uninitialized warnings. +- + config CONSTRUCTORS + bool + depends on !UML +@@ -1226,14 +1210,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE + config CC_OPTIMIZE_FOR_PERFORMANCE_O3 + bool "Optimize more for performance (-O3)" + depends on ARC +- imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives + help + Choosing this option will pass "-O3" to your compiler to optimize + the kernel yet more for performance. + + config CC_OPTIMIZE_FOR_SIZE + bool "Optimize for size (-Os)" +- imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives + help + Choosing this option will pass "-Os" to your compiler resulting + in a smaller kernel. +diff --git a/init/initramfs.c b/init/initramfs.c +index c47dad0884f7..5feee4f616d5 100644 +--- a/init/initramfs.c ++++ b/init/initramfs.c +@@ -534,7 +534,7 @@ void __weak free_initrd_mem(unsigned long start, unsigned long end) + } + + #ifdef CONFIG_KEXEC_CORE +-static bool kexec_free_initrd(void) ++static bool __init kexec_free_initrd(void) + { + unsigned long crashk_start = (unsigned long)__va(crashk_res.start); + unsigned long crashk_end = (unsigned long)__va(crashk_res.end); +diff --git a/init/main.c b/init/main.c +index 5cbb9fe937e0..8c7d6b8ee6bd 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -782,6 +782,8 @@ asmlinkage __visible void __init start_kernel(void) + + /* Do the rest non-__init'ed, we're now alive */ + arch_call_rest_init(); ++ ++ prevent_tail_call_optimization(); + } + + /* Call all constructor functions linked into the kernel. */ +diff --git a/ipc/util.c b/ipc/util.c +index 594871610d45..1821b6386d3b 100644 +--- a/ipc/util.c ++++ b/ipc/util.c +@@ -764,21 +764,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, + total++; + } + +- *new_pos = pos + 1; ++ ipc = NULL; + if (total >= ids->in_use) +- return NULL; ++ goto out; + + for (; pos < ipc_mni; pos++) { + ipc = idr_find(&ids->ipcs_idr, pos); + if (ipc != NULL) { + rcu_read_lock(); + ipc_lock_object(ipc); +- return ipc; ++ break; + } + } +- +- /* Out of range - return NULL to terminate iteration */ +- return NULL; ++out: ++ *new_pos = pos + 1; ++ return ipc; + } + + static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index 14f4a76b44d5..946cfdd3b2cc 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -1146,8 +1146,10 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) + if (err) + goto free_value; + +- if (copy_to_user(uvalue, value, value_size) != 0) ++ if (copy_to_user(uvalue, value, value_size) != 0) { ++ err = -EFAULT; + goto free_value; ++ } + + err = 0; + +diff --git a/kernel/fork.c b/kernel/fork.c +index 27c0ef30002e..9180f4416dba 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -2412,11 +2412,11 @@ long do_fork(unsigned long clone_flags, + int __user *child_tidptr) + { + struct kernel_clone_args args = { +- .flags = (clone_flags & ~CSIGNAL), ++ .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), + .pidfd = parent_tidptr, + .child_tid = child_tidptr, + .parent_tid = parent_tidptr, +- .exit_signal = (clone_flags & CSIGNAL), ++ .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), + .stack = stack_start, + .stack_size = stack_size, + }; +@@ -2434,8 +2434,9 @@ long do_fork(unsigned long clone_flags, + pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) + { + struct kernel_clone_args args = { +- .flags = ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), +- .exit_signal = (flags & CSIGNAL), ++ .flags = ((lower_32_bits(flags) | CLONE_VM | ++ CLONE_UNTRACED) & ~CSIGNAL), ++ .exit_signal = (lower_32_bits(flags) & CSIGNAL), + .stack = (unsigned long)fn, + .stack_size = (unsigned long)arg, + }; +@@ -2496,11 +2497,11 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, + #endif + { + struct kernel_clone_args args = { +- .flags = (clone_flags & ~CSIGNAL), ++ .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), + .pidfd = parent_tidptr, + .child_tid = child_tidptr, + .parent_tid = parent_tidptr, +- .exit_signal = (clone_flags & CSIGNAL), ++ .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), + .stack = newsp, + .tls = tls, + }; +diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig +index e08527f50d2a..f3f2fc8ad81a 100644 +--- a/kernel/trace/Kconfig ++++ b/kernel/trace/Kconfig +@@ -371,7 +371,6 @@ config PROFILE_ANNOTATED_BRANCHES + config PROFILE_ALL_BRANCHES + bool "Profile all if conditionals" if !FORTIFY_SOURCE + select TRACE_BRANCH_PROFILING +- imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives + help + This tracer profiles all branch conditions. Every if () + taken in the kernel is recorded whether it hit or miss. +diff --git a/kernel/umh.c b/kernel/umh.c +index 11bf5eea474c..3474d6aa55d8 100644 +--- a/kernel/umh.c ++++ b/kernel/umh.c +@@ -475,6 +475,12 @@ static void umh_clean_and_save_pid(struct subprocess_info *info) + { + struct umh_info *umh_info = info->data; + ++ /* cleanup if umh_pipe_setup() was successful but exec failed */ ++ if (info->pid && info->retval) { ++ fput(umh_info->pipe_to_umh); ++ fput(umh_info->pipe_from_umh); ++ } ++ + argv_free(info->argv); + umh_info->pid = info->pid; + } +diff --git a/mm/shmem.c b/mm/shmem.c +index e71b15da1985..98802ca76a5c 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2183,7 +2183,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) + struct shmem_inode_info *info = SHMEM_I(inode); + int retval = -ENOMEM; + +- spin_lock_irq(&info->lock); ++ /* ++ * What serializes the accesses to info->flags? ++ * ipc_lock_object() when called from shmctl_do_lock(), ++ * no serialization needed when called from shm_destroy(). ++ */ + if (lock && !(info->flags & VM_LOCKED)) { + if (!user_shm_lock(inode->i_size, user)) + goto out_nomem; +@@ -2198,7 +2202,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) + retval = 0; + + out_nomem: +- spin_unlock_irq(&info->lock); + return retval; + } + +diff --git a/net/core/dev.c b/net/core/dev.c +index 8ad1e8f00958..120b994af31c 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -8595,11 +8595,13 @@ static void netdev_sync_lower_features(struct net_device *upper, + netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", + &feature, lower->name); + lower->wanted_features &= ~feature; +- netdev_update_features(lower); ++ __netdev_update_features(lower); + + if (unlikely(lower->features & feature)) + netdev_WARN(upper, "failed to disable %pNF on %s!\n", + &feature, lower->name); ++ else ++ netdev_features_change(lower); + } + } + } +diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c +index 246a258b1fac..af0130039f37 100644 +--- a/net/core/drop_monitor.c ++++ b/net/core/drop_monitor.c +@@ -212,6 +212,7 @@ static void sched_send_work(struct timer_list *t) + static void trace_drop_common(struct sk_buff *skb, void *location) + { + struct net_dm_alert_msg *msg; ++ struct net_dm_drop_point *point; + struct nlmsghdr *nlh; + struct nlattr *nla; + int i; +@@ -230,11 +231,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location) + nlh = (struct nlmsghdr *)dskb->data; + nla = genlmsg_data(nlmsg_data(nlh)); + msg = nla_data(nla); ++ point = msg->points; + for (i = 0; i < msg->entries; i++) { +- if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { +- msg->points[i].count++; ++ if (!memcmp(&location, &point->pc, sizeof(void *))) { ++ point->count++; + goto out; + } ++ point++; + } + if (msg->entries == dm_hit_limit) + goto out; +@@ -243,8 +246,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location) + */ + __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); + nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); +- memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); +- msg->points[msg->entries].count = 1; ++ memcpy(point->pc, &location, sizeof(void *)); ++ point->count = 1; + msg->entries++; + + if (!timer_pending(&data->send_timer)) { +diff --git a/net/core/filter.c b/net/core/filter.c +index d59dbc88fef5..f1f2304822e3 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -2590,8 +2590,8 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, + } + pop = 0; + } else if (pop >= sge->length - a) { +- sge->length = a; + pop -= (sge->length - a); ++ sge->length = a; + } + } + +diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c +index 256b7954b720..8618242c677a 100644 +--- a/net/core/netprio_cgroup.c ++++ b/net/core/netprio_cgroup.c +@@ -236,6 +236,8 @@ static void net_prio_attach(struct cgroup_taskset *tset) + struct task_struct *p; + struct cgroup_subsys_state *css; + ++ cgroup_sk_alloc_disable(); ++ + cgroup_taskset_for_each(p, css, tset) { + void *v = (void *)(unsigned long)css->cgroup->id; + +diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c +index 716d265ba8ca..0f7f38c29579 100644 +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -461,18 +461,12 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) + + err = dsa_port_setup(dp); + if (err) +- goto ports_teardown; ++ continue; + } + } + + return 0; + +-ports_teardown: +- for (i = 0; i < port; i++) +- dsa_port_teardown(&ds->ports[i]); +- +- dsa_switch_teardown(ds); +- + switch_teardown: + for (i = 0; i < device; i++) { + ds = dst->ds[i]; +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c +index 0bd10a1f477f..a23094b050f8 100644 +--- a/net/ipv4/cipso_ipv4.c ++++ b/net/ipv4/cipso_ipv4.c +@@ -1258,7 +1258,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, + return ret_val; + } + +- secattr->flags |= NETLBL_SECATTR_MLS_CAT; ++ if (secattr->attr.mls.cat) ++ secattr->flags |= NETLBL_SECATTR_MLS_CAT; + } + + return 0; +@@ -1439,7 +1440,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, + return ret_val; + } + +- secattr->flags |= NETLBL_SECATTR_MLS_CAT; ++ if (secattr->attr.mls.cat) ++ secattr->flags |= NETLBL_SECATTR_MLS_CAT; + } + + return 0; +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index fe34e9e0912a..558ddf7ab395 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -914,7 +914,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) + /* Check for load limit; set rate_last to the latest sent + * redirect. + */ +- if (peer->rate_tokens == 0 || ++ if (peer->n_redirects == 0 || + time_after(jiffies, + (peer->rate_last + + (ip_rt_redirect_load << peer->n_redirects)))) { +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index e378ff17f8c6..fe3cdeddd097 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -477,9 +477,17 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) + static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, + int target, struct sock *sk) + { +- return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) || +- (sk->sk_prot->stream_memory_read ? +- sk->sk_prot->stream_memory_read(sk) : false); ++ int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq); ++ ++ if (avail > 0) { ++ if (avail >= target) ++ return true; ++ if (tcp_rmem_pressure(sk)) ++ return true; ++ } ++ if (sk->sk_prot->stream_memory_read) ++ return sk->sk_prot->stream_memory_read(sk); ++ return false; + } + + /* +@@ -1757,10 +1765,11 @@ static int tcp_zerocopy_receive(struct sock *sk, + + down_read(¤t->mm->mmap_sem); + +- ret = -EINVAL; + vma = find_vma(current->mm, address); +- if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) +- goto out; ++ if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) { ++ up_read(¤t->mm->mmap_sem); ++ return -EINVAL; ++ } + zc->length = min_t(unsigned long, zc->length, vma->vm_end - address); + + tp = tcp_sk(sk); +@@ -2149,13 +2158,15 @@ skip_copy: + tp->urg_data = 0; + tcp_fast_path_check(sk); + } +- if (used + offset < skb->len) +- continue; + + if (TCP_SKB_CB(skb)->has_rxtstamp) { + tcp_update_recv_tstamps(skb, &tss); + cmsg_flags |= 2; + } ++ ++ if (used + offset < skb->len) ++ continue; ++ + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + goto found_fin_ok; + if (!(flags & MSG_PEEK)) +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c +index 8a01428f80c1..69b025408390 100644 +--- a/net/ipv4/tcp_bpf.c ++++ b/net/ipv4/tcp_bpf.c +@@ -121,14 +121,17 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + struct sk_psock *psock; + int copied, ret; + ++ if (unlikely(flags & MSG_ERRQUEUE)) ++ return inet_recv_error(sk, msg, len, addr_len); ++ + psock = sk_psock_get(sk); + if (unlikely(!psock)) + return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); +- if (unlikely(flags & MSG_ERRQUEUE)) +- return inet_recv_error(sk, msg, len, addr_len); + if (!skb_queue_empty(&sk->sk_receive_queue) && +- sk_psock_queue_empty(psock)) ++ sk_psock_queue_empty(psock)) { ++ sk_psock_put(sk, psock); + return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); ++ } + lock_sock(sk); + msg_bytes_ready: + copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags); +@@ -200,7 +203,6 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, + + if (!ret) { + msg->sg.start = i; +- msg->sg.size -= apply_bytes; + sk_psock_queue_msg(psock, tmp); + sk_psock_data_ready(sk, psock); + } else { +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 5af22c9712a6..677facbeed26 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -4751,7 +4751,8 @@ void tcp_data_ready(struct sock *sk) + const struct tcp_sock *tp = tcp_sk(sk); + int avail = tp->rcv_nxt - tp->copied_seq; + +- if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE)) ++ if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) && ++ !sock_flag(sk, SOCK_DONE)) + return; + + sk->sk_data_ready(sk); +diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c +index 221c81f85cbf..8d3f66c310db 100644 +--- a/net/ipv6/calipso.c ++++ b/net/ipv6/calipso.c +@@ -1047,7 +1047,8 @@ static int calipso_opt_getattr(const unsigned char *calipso, + goto getattr_return; + } + +- secattr->flags |= NETLBL_SECATTR_MLS_CAT; ++ if (secattr->attr.mls.cat) ++ secattr->flags |= NETLBL_SECATTR_MLS_CAT; + } + + secattr->type = NETLBL_NLTYPE_CALIPSO; +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index c81d8e9e5169..3b4af0a8bca6 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -2728,8 +2728,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, + const struct in6_addr *daddr, *saddr; + struct rt6_info *rt6 = (struct rt6_info *)dst; + +- if (dst_metric_locked(dst, RTAX_MTU)) +- return; ++ /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU) ++ * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it. ++ * [see also comment in rt6_mtu_change_route()] ++ */ + + if (iph) { + daddr = &iph->daddr; +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index 5cd610b547e0..c2ad462f33f1 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -1381,9 +1381,9 @@ __nf_conntrack_alloc(struct net *net, + ct->status = 0; + ct->timeout = 0; + write_pnet(&ct->ct_net, net); +- memset(&ct->__nfct_init_offset[0], 0, ++ memset(&ct->__nfct_init_offset, 0, + offsetof(struct nf_conn, proto) - +- offsetof(struct nf_conn, __nfct_init_offset[0])); ++ offsetof(struct nf_conn, __nfct_init_offset)); + + nf_ct_zone_add(ct, zone); + +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c +index a9f804f7a04a..ee7c29e0a9d7 100644 +--- a/net/netfilter/nft_set_rbtree.c ++++ b/net/netfilter/nft_set_rbtree.c +@@ -33,6 +33,11 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe) + (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END); + } + ++static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe) ++{ ++ return !nft_rbtree_interval_end(rbe); ++} ++ + static bool nft_rbtree_equal(const struct nft_set *set, const void *this, + const struct nft_rbtree_elem *interval) + { +@@ -64,7 +69,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set + if (interval && + nft_rbtree_equal(set, this, interval) && + nft_rbtree_interval_end(rbe) && +- !nft_rbtree_interval_end(interval)) ++ nft_rbtree_interval_start(interval)) + continue; + interval = rbe; + } else if (d > 0) +@@ -74,6 +79,10 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set + parent = rcu_dereference_raw(parent->rb_left); + continue; + } ++ ++ if (nft_set_elem_expired(&rbe->ext)) ++ return false; ++ + if (nft_rbtree_interval_end(rbe)) { + if (nft_set_is_anonymous(set)) + return false; +@@ -89,7 +98,8 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set + + if (set->flags & NFT_SET_INTERVAL && interval != NULL && + nft_set_elem_active(&interval->ext, genmask) && +- !nft_rbtree_interval_end(interval)) { ++ !nft_set_elem_expired(&interval->ext) && ++ nft_rbtree_interval_start(interval)) { + *ext = &interval->ext; + return true; + } +@@ -149,6 +159,9 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, + continue; + } + ++ if (nft_set_elem_expired(&rbe->ext)) ++ return false; ++ + if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) || + (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) == + (flags & NFT_SET_ELEM_INTERVAL_END)) { +@@ -165,6 +178,7 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, + + if (set->flags & NFT_SET_INTERVAL && interval != NULL && + nft_set_elem_active(&interval->ext, genmask) && ++ !nft_set_elem_expired(&interval->ext) && + ((!nft_rbtree_interval_end(interval) && + !(flags & NFT_SET_ELEM_INTERVAL_END)) || + (nft_rbtree_interval_end(interval) && +@@ -224,9 +238,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, + p = &parent->rb_right; + else { + if (nft_rbtree_interval_end(rbe) && +- !nft_rbtree_interval_end(new)) { ++ nft_rbtree_interval_start(new)) { + p = &parent->rb_left; +- } else if (!nft_rbtree_interval_end(rbe) && ++ } else if (nft_rbtree_interval_start(rbe) && + nft_rbtree_interval_end(new)) { + p = &parent->rb_right; + } else if (nft_set_elem_active(&rbe->ext, genmask)) { +@@ -317,10 +331,10 @@ static void *nft_rbtree_deactivate(const struct net *net, + parent = parent->rb_right; + else { + if (nft_rbtree_interval_end(rbe) && +- !nft_rbtree_interval_end(this)) { ++ nft_rbtree_interval_start(this)) { + parent = parent->rb_left; + continue; +- } else if (!nft_rbtree_interval_end(rbe) && ++ } else if (nft_rbtree_interval_start(rbe) && + nft_rbtree_interval_end(this)) { + parent = parent->rb_right; + continue; +@@ -350,6 +364,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, + + if (iter->count < iter->skip) + goto cont; ++ if (nft_set_elem_expired(&rbe->ext)) ++ goto cont; + if (!nft_set_elem_active(&rbe->ext, iter->genmask)) + goto cont; + +diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c +index 409a3ae47ce2..5e1239cef000 100644 +--- a/net/netlabel/netlabel_kapi.c ++++ b/net/netlabel/netlabel_kapi.c +@@ -734,6 +734,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap, + if ((off & (BITS_PER_LONG - 1)) != 0) + return -EINVAL; + ++ /* a null catmap is equivalent to an empty one */ ++ if (!catmap) { ++ *offset = (u32)-1; ++ return 0; ++ } ++ + if (off < catmap->startbit) { + off = catmap->startbit; + *offset = off; +diff --git a/net/rds/message.c b/net/rds/message.c +index 50f13f1d4ae0..2d43e13d6dd5 100644 +--- a/net/rds/message.c ++++ b/net/rds/message.c +@@ -308,26 +308,20 @@ out: + /* + * RDS ops use this to grab SG entries from the rm's sg pool. + */ +-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents, +- int *ret) ++struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) + { + struct scatterlist *sg_first = (struct scatterlist *) &rm[1]; + struct scatterlist *sg_ret; + +- if (WARN_ON(!ret)) +- return NULL; +- + if (nents <= 0) { + pr_warn("rds: alloc sgs failed! nents <= 0\n"); +- *ret = -EINVAL; +- return NULL; ++ return ERR_PTR(-EINVAL); + } + + if (rm->m_used_sgs + nents > rm->m_total_sgs) { + pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n", + rm->m_total_sgs, rm->m_used_sgs, nents); +- *ret = -ENOMEM; +- return NULL; ++ return ERR_PTR(-ENOMEM); + } + + sg_ret = &sg_first[rm->m_used_sgs]; +@@ -343,7 +337,6 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in + unsigned int i; + int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE); + int extra_bytes = num_sgs * sizeof(struct scatterlist); +- int ret; + + rm = rds_message_alloc(extra_bytes, GFP_NOWAIT); + if (!rm) +@@ -352,10 +345,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in + set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); + rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); + rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE); +- rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); +- if (!rm->data.op_sg) { ++ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); ++ if (IS_ERR(rm->data.op_sg)) { + rds_message_put(rm); +- return ERR_PTR(ret); ++ return ERR_CAST(rm->data.op_sg); + } + + for (i = 0; i < rm->data.op_nents; ++i) { +diff --git a/net/rds/rdma.c b/net/rds/rdma.c +index 916f5ec373d8..8e10f954a22f 100644 +--- a/net/rds/rdma.c ++++ b/net/rds/rdma.c +@@ -624,9 +624,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, + op->op_active = 1; + op->op_recverr = rs->rs_recverr; + WARN_ON(!nr_pages); +- op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret); +- if (!op->op_sg) ++ op->op_sg = rds_message_alloc_sgs(rm, nr_pages); ++ if (IS_ERR(op->op_sg)) { ++ ret = PTR_ERR(op->op_sg); + goto out_pages; ++ } + + if (op->op_notify || op->op_recverr) { + /* We allocate an uninitialized notifier here, because +@@ -828,9 +830,11 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, + rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); + rm->atomic.op_active = 1; + rm->atomic.op_recverr = rs->rs_recverr; +- rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret); +- if (!rm->atomic.op_sg) ++ rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); ++ if (IS_ERR(rm->atomic.op_sg)) { ++ ret = PTR_ERR(rm->atomic.op_sg); + goto err; ++ } + + /* verify 8 byte-aligned */ + if (args->local_addr & 0x7) { +diff --git a/net/rds/rds.h b/net/rds/rds.h +index 53e86911773a..2ac5b5e55901 100644 +--- a/net/rds/rds.h ++++ b/net/rds/rds.h +@@ -849,8 +849,7 @@ rds_conn_connecting(struct rds_connection *conn) + + /* message.c */ + struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); +-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents, +- int *ret); ++struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents); + int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from, + bool zcopy); + struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); +diff --git a/net/rds/send.c b/net/rds/send.c +index 82dcd8b84fe7..68e2bdb08fd0 100644 +--- a/net/rds/send.c ++++ b/net/rds/send.c +@@ -1274,9 +1274,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) + + /* Attach data to the rm */ + if (payload_len) { +- rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); +- if (!rm->data.op_sg) ++ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); ++ if (IS_ERR(rm->data.op_sg)) { ++ ret = PTR_ERR(rm->data.op_sg); + goto out; ++ } + ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy); + if (ret) + goto out; +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index c2cdd0fc2e70..68c8fc6f535c 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -2005,6 +2005,7 @@ replay: + err = PTR_ERR(block); + goto errout; + } ++ block->classid = parent; + + chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; + if (chain_index > TC_ACT_EXT_VAL_MASK) { +@@ -2547,12 +2548,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) + return skb->len; + + parent = tcm->tcm_parent; +- if (!parent) { ++ if (!parent) + q = dev->qdisc; +- parent = q->handle; +- } else { ++ else + q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); +- } + if (!q) + goto out; + cops = q->ops->cl_ops; +@@ -2568,6 +2567,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) + block = cops->tcf_block(q, cl, NULL); + if (!block) + goto out; ++ parent = block->classid; + if (tcf_block_shared(block)) + q = NULL; + } +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c +index ff5fcb3e1208..5fc6c028f89c 100644 +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -2030,7 +2030,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, + struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; + struct kvec *head = rqstp->rq_rcv_buf.head; + struct rpc_auth *auth = cred->cr_auth; +- unsigned int savedlen = rcv_buf->len; + u32 offset, opaque_len, maj_stat; + __be32 *p; + +@@ -2041,9 +2040,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, + offset = (u8 *)(p) - (u8 *)head->iov_base; + if (offset + opaque_len > rcv_buf->len) + goto unwrap_failed; +- rcv_buf->len = offset + opaque_len; + +- maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); ++ maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, ++ offset + opaque_len, rcv_buf); + if (maj_stat == GSS_S_CONTEXT_EXPIRED) + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); + if (maj_stat != GSS_S_COMPLETE) +@@ -2057,10 +2056,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, + */ + xdr_init_decode(xdr, rcv_buf, p, rqstp); + +- auth->au_rslack = auth->au_verfsize + 2 + +- XDR_QUADLEN(savedlen - rcv_buf->len); +- auth->au_ralign = auth->au_verfsize + 2 + +- XDR_QUADLEN(savedlen - rcv_buf->len); ++ auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack; ++ auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align; ++ + return 0; + unwrap_failed: + trace_rpcgss_unwrap_failed(task); +diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c +index 6f2d30d7b766..e7180da1fc6a 100644 +--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c ++++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c +@@ -851,8 +851,8 @@ out_err: + } + + u32 +-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, +- u32 *headskip, u32 *tailskip) ++gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, ++ struct xdr_buf *buf, u32 *headskip, u32 *tailskip) + { + struct xdr_buf subbuf; + u32 ret = 0; +@@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, + + /* create a segment skipping the header and leaving out the checksum */ + xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, +- (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - ++ (len - offset - GSS_KRB5_TOK_HDR_LEN - + kctx->gk5e->cksumlength)); + + nblocks = (subbuf.len + blocksize - 1) / blocksize; +@@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, + goto out_err; + + /* Get the packet's hmac value */ +- ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, ++ ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength, + pkt_hmac, kctx->gk5e->cksumlength); + if (ret) + goto out_err; +diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c +index 14a0aff0cd84..683755d95075 100644 +--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c ++++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c +@@ -261,7 +261,9 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, + } + + static u32 +-gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ++gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len, ++ struct xdr_buf *buf, unsigned int *slack, ++ unsigned int *align) + { + int signalg; + int sealalg; +@@ -279,12 +281,13 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) + u32 conflen = kctx->gk5e->conflen; + int crypt_offset; + u8 *cksumkey; ++ unsigned int saved_len = buf->len; + + dprintk("RPC: gss_unwrap_kerberos\n"); + + ptr = (u8 *)buf->head[0].iov_base + offset; + if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, +- buf->len - offset)) ++ len - offset)) + return GSS_S_DEFECTIVE_TOKEN; + + if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || +@@ -324,6 +327,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) + (!kctx->initiate && direction != 0)) + return GSS_S_BAD_SIG; + ++ buf->len = len; + if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { + struct crypto_sync_skcipher *cipher; + int err; +@@ -376,11 +380,15 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) + data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; + memmove(orig_start, data_start, data_len); + buf->head[0].iov_len -= (data_start - orig_start); +- buf->len -= (data_start - orig_start); ++ buf->len = len - (data_start - orig_start); + + if (gss_krb5_remove_padding(buf, blocksize)) + return GSS_S_DEFECTIVE_TOKEN; + ++ /* slack must include room for krb5 padding */ ++ *slack = XDR_QUADLEN(saved_len - buf->len); ++ /* The GSS blob always precedes the RPC message payload */ ++ *align = *slack; + return GSS_S_COMPLETE; + } + +@@ -486,7 +494,9 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, + } + + static u32 +-gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ++gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len, ++ struct xdr_buf *buf, unsigned int *slack, ++ unsigned int *align) + { + s32 now; + u8 *ptr; +@@ -532,7 +542,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) + if (rrc != 0) + rotate_left(offset + 16, buf, rrc); + +- err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, ++ err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf, + &headskip, &tailskip); + if (err) + return GSS_S_FAILURE; +@@ -542,7 +552,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) + * it against the original + */ + err = read_bytes_from_xdr_buf(buf, +- buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip, ++ len - GSS_KRB5_TOK_HDR_LEN - tailskip, + decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); + if (err) { + dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); +@@ -568,18 +578,19 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) + * Note that buf->head[0].iov_len may indicate the available + * head buffer space rather than that actually occupied. + */ +- movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); ++ movelen = min_t(unsigned int, buf->head[0].iov_len, len); + movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; +- if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > +- buf->head[0].iov_len) +- return GSS_S_FAILURE; ++ BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > ++ buf->head[0].iov_len); + memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); + buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; +- buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; ++ buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip; + + /* Trim off the trailing "extra count" and checksum blob */ +- buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip; ++ xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip); + ++ *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip); ++ *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip); + return GSS_S_COMPLETE; + } + +@@ -603,7 +614,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset, + } + + u32 +-gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) ++gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, ++ int len, struct xdr_buf *buf) + { + struct krb5_ctx *kctx = gctx->internal_ctx_id; + +@@ -613,9 +625,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) + case ENCTYPE_DES_CBC_RAW: + case ENCTYPE_DES3_CBC_RAW: + case ENCTYPE_ARCFOUR_HMAC: +- return gss_unwrap_kerberos_v1(kctx, offset, buf); ++ return gss_unwrap_kerberos_v1(kctx, offset, len, buf, ++ &gctx->slack, &gctx->align); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: +- return gss_unwrap_kerberos_v2(kctx, offset, buf); ++ return gss_unwrap_kerberos_v2(kctx, offset, len, buf, ++ &gctx->slack, &gctx->align); + } + } +diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c +index 82060099a429..8fa924c8e282 100644 +--- a/net/sunrpc/auth_gss/gss_mech_switch.c ++++ b/net/sunrpc/auth_gss/gss_mech_switch.c +@@ -438,10 +438,11 @@ gss_wrap(struct gss_ctx *ctx_id, + u32 + gss_unwrap(struct gss_ctx *ctx_id, + int offset, ++ int len, + struct xdr_buf *buf) + { + return ctx_id->mech_type->gm_ops +- ->gss_unwrap(ctx_id, offset, buf); ++ ->gss_unwrap(ctx_id, offset, len, buf); + } + + +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index ed20fa8a6f70..d9f7439e2431 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -897,7 +897,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g + if (svc_getnl(&buf->head[0]) != seq) + goto out; + /* trim off the mic and padding at the end before returning */ +- buf->len -= 4 + round_up_to_quad(mic.len); ++ xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4); + stat = 0; + out: + kfree(mic.data); +@@ -925,7 +925,7 @@ static int + unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) + { + u32 priv_len, maj_stat; +- int pad, saved_len, remaining_len, offset; ++ int pad, remaining_len, offset; + + clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); + +@@ -945,12 +945,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs + buf->len -= pad; + fix_priv_head(buf, pad); + +- /* Maybe it would be better to give gss_unwrap a length parameter: */ +- saved_len = buf->len; +- buf->len = priv_len; +- maj_stat = gss_unwrap(ctx, 0, buf); ++ maj_stat = gss_unwrap(ctx, 0, priv_len, buf); + pad = priv_len - buf->len; +- buf->len = saved_len; + buf->len -= pad; + /* The upper layers assume the buffer is aligned on 4-byte boundaries. + * In the krb5p case, at least, the data ends up offset, so we need to +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c +index f7f78566be46..f1088ca39d44 100644 +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -2422,6 +2422,11 @@ rpc_check_timeout(struct rpc_task *task) + { + struct rpc_clnt *clnt = task->tk_client; + ++ if (RPC_SIGNALLED(task)) { ++ rpc_call_rpcerror(task, -ERESTARTSYS); ++ return; ++ } ++ + if (xprt_adjust_timeout(task->tk_rqstp) == 0) + return; + +diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c +index f3104be8ff5d..451ca7ec321c 100644 +--- a/net/sunrpc/xdr.c ++++ b/net/sunrpc/xdr.c +@@ -1150,6 +1150,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, + } + EXPORT_SYMBOL_GPL(xdr_buf_subsegment); + ++/** ++ * xdr_buf_trim - lop at most "len" bytes off the end of "buf" ++ * @buf: buf to be trimmed ++ * @len: number of bytes to reduce "buf" by ++ * ++ * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note ++ * that it's possible that we'll trim less than that amount if the xdr_buf is ++ * too small, or if (for instance) it's all in the head and the parser has ++ * already read too far into it. ++ */ ++void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) ++{ ++ size_t cur; ++ unsigned int trim = len; ++ ++ if (buf->tail[0].iov_len) { ++ cur = min_t(size_t, buf->tail[0].iov_len, trim); ++ buf->tail[0].iov_len -= cur; ++ trim -= cur; ++ if (!trim) ++ goto fix_len; ++ } ++ ++ if (buf->page_len) { ++ cur = min_t(unsigned int, buf->page_len, trim); ++ buf->page_len -= cur; ++ trim -= cur; ++ if (!trim) ++ goto fix_len; ++ } ++ ++ if (buf->head[0].iov_len) { ++ cur = min_t(size_t, buf->head[0].iov_len, trim); ++ buf->head[0].iov_len -= cur; ++ trim -= cur; ++ } ++fix_len: ++ buf->len -= (len - trim); ++} ++EXPORT_SYMBOL_GPL(xdr_buf_trim); ++ + static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) + { + unsigned int this_len; +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c +index 8a12a7538d63..94db4683cfaf 100644 +--- a/sound/core/rawmidi.c ++++ b/sound/core/rawmidi.c +@@ -97,6 +97,17 @@ static void snd_rawmidi_input_event_work(struct work_struct *work) + runtime->event(runtime->substream); + } + ++/* buffer refcount management: call with runtime->lock held */ ++static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime) ++{ ++ runtime->buffer_ref++; ++} ++ ++static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime) ++{ ++ runtime->buffer_ref--; ++} ++ + static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream) + { + struct snd_rawmidi_runtime *runtime; +@@ -646,6 +657,11 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime, + if (!newbuf) + return -ENOMEM; + spin_lock_irq(&runtime->lock); ++ if (runtime->buffer_ref) { ++ spin_unlock_irq(&runtime->lock); ++ kvfree(newbuf); ++ return -EBUSY; ++ } + oldbuf = runtime->buffer; + runtime->buffer = newbuf; + runtime->buffer_size = params->buffer_size; +@@ -945,8 +961,10 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, + long result = 0, count1; + struct snd_rawmidi_runtime *runtime = substream->runtime; + unsigned long appl_ptr; ++ int err = 0; + + spin_lock_irqsave(&runtime->lock, flags); ++ snd_rawmidi_buffer_ref(runtime); + while (count > 0 && runtime->avail) { + count1 = runtime->buffer_size - runtime->appl_ptr; + if (count1 > count) +@@ -965,16 +983,19 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, + if (userbuf) { + spin_unlock_irqrestore(&runtime->lock, flags); + if (copy_to_user(userbuf + result, +- runtime->buffer + appl_ptr, count1)) { +- return result > 0 ? result : -EFAULT; +- } ++ runtime->buffer + appl_ptr, count1)) ++ err = -EFAULT; + spin_lock_irqsave(&runtime->lock, flags); ++ if (err) ++ goto out; + } + result += count1; + count -= count1; + } ++ out: ++ snd_rawmidi_buffer_unref(runtime); + spin_unlock_irqrestore(&runtime->lock, flags); +- return result; ++ return result > 0 ? result : err; + } + + long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream, +@@ -1268,6 +1289,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, + return -EAGAIN; + } + } ++ snd_rawmidi_buffer_ref(runtime); + while (count > 0 && runtime->avail > 0) { + count1 = runtime->buffer_size - runtime->appl_ptr; + if (count1 > count) +@@ -1299,6 +1321,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, + } + __end: + count1 = runtime->avail < runtime->buffer_size; ++ snd_rawmidi_buffer_unref(runtime); + spin_unlock_irqrestore(&runtime->lock, flags); + if (count1) + snd_rawmidi_output_trigger(substream, 1); +diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h +index 16c7f6605511..26e7cb555d3c 100644 +--- a/sound/firewire/amdtp-stream-trace.h ++++ b/sound/firewire/amdtp-stream-trace.h +@@ -66,8 +66,7 @@ TRACE_EVENT(amdtp_packet, + __entry->irq, + __entry->index, + __print_array(__get_dynamic_array(cip_header), +- __get_dynamic_array_len(cip_header), +- sizeof(u8))) ++ __get_dynamic_array_len(cip_header), 1)) + ); + + #endif +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index 663168ddce72..d48263d1f6a2 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -2234,7 +2234,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec) + + for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { + struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); ++ struct hdmi_eld *pin_eld = &per_pin->sink_eld; + ++ pin_eld->eld_valid = false; + hdmi_present_sense(per_pin, 0); + } + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 64270983ab7d..004d2f638cf2 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -5743,6 +5743,15 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec, + } + } + ++static void alc225_fixup_s3_pop_noise(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ if (action != HDA_FIXUP_ACT_PRE_PROBE) ++ return; ++ ++ codec->power_save_node = 1; ++} ++ + /* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */ + static void alc274_fixup_bind_dacs(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +@@ -5847,6 +5856,7 @@ enum { + ALC269_FIXUP_HP_LINE1_MIC1_LED, + ALC269_FIXUP_INV_DMIC, + ALC269_FIXUP_LENOVO_DOCK, ++ ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, + ALC269_FIXUP_NO_SHUTUP, + ALC286_FIXUP_SONY_MIC_NO_PRESENCE, + ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, +@@ -5932,6 +5942,7 @@ enum { + ALC233_FIXUP_ACER_HEADSET_MIC, + ALC294_FIXUP_LENOVO_MIC_LOCATION, + ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, ++ ALC225_FIXUP_S3_POP_NOISE, + ALC700_FIXUP_INTEL_REFERENCE, + ALC274_FIXUP_DELL_BIND_DACS, + ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, +@@ -5967,6 +5978,7 @@ enum { + ALC294_FIXUP_ASUS_DUAL_SPK, + ALC285_FIXUP_THINKPAD_HEADSET_JACK, + ALC294_FIXUP_ASUS_HPE, ++ ALC294_FIXUP_ASUS_COEF_1B, + ALC285_FIXUP_HP_GPIO_LED, + }; + +@@ -6165,6 +6177,12 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT + }, ++ [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc269_fixup_limit_int_mic_boost, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_LENOVO_DOCK, ++ }, + [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_pincfg_no_hp_to_lineout, +@@ -6817,6 +6835,12 @@ static const struct hda_fixup alc269_fixups[] = { + { } + }, + .chained = true, ++ .chain_id = ALC225_FIXUP_S3_POP_NOISE ++ }, ++ [ALC225_FIXUP_S3_POP_NOISE] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc225_fixup_s3_pop_noise, ++ .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + }, + [ALC700_FIXUP_INTEL_REFERENCE] = { +@@ -7089,6 +7113,17 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC + }, ++ [ALC294_FIXUP_ASUS_COEF_1B] = { ++ .type = HDA_FIXUP_VERBS, ++ .v.verbs = (const struct hda_verb[]) { ++ /* Set bit 10 to correct noisy output after reboot from ++ * Windows 10 (due to pop noise reduction?) ++ */ ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x1b }, ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b }, ++ { } ++ }, ++ }, + [ALC285_FIXUP_HP_GPIO_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_gpio_led, +@@ -7260,6 +7295,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), + SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), ++ SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B), + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), +@@ -7301,7 +7337,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), + SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), + SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), +- SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK), ++ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST), + SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), +@@ -7440,6 +7476,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"}, + {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, + {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, ++ {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = "lenovo-dock-limit-boost"}, + {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, + {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"}, + {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, +@@ -8113,8 +8150,6 @@ static int patch_alc269(struct hda_codec *codec) + spec->gen.mixer_nid = 0; + break; + case 0x10ec0225: +- codec->power_save_node = 1; +- /* fall through */ + case 0x10ec0295: + case 0x10ec0299: + spec->codec_variant = ALC269_TYPE_ALC225; +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 5a81c444a18b..092720ce2c55 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1592,13 +1592,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, + && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + msleep(20); + +- /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here, +- * otherwise requests like get/set frequency return as failed despite +- * actually succeeding. ++ /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny ++ * delay here, otherwise requests like get/set frequency return as ++ * failed despite actually succeeding. + */ + if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || + chip->usb_id == USB_ID(0x046d, 0x0a46) || +- chip->usb_id == USB_ID(0x0b0e, 0x0349)) && ++ chip->usb_id == USB_ID(0x0b0e, 0x0349) || ++ chip->usb_id == USB_ID(0x0951, 0x16ad)) && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + usleep_range(1000, 2000); + } +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c +index b6403712c2f4..281cc65276e0 100644 +--- a/tools/lib/bpf/libbpf.c ++++ b/tools/lib/bpf/libbpf.c +@@ -5905,62 +5905,104 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) + } + } + +-int libbpf_num_possible_cpus(void) ++int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) + { +- static const char *fcpu = "/sys/devices/system/cpu/possible"; +- int len = 0, n = 0, il = 0, ir = 0; +- unsigned int start = 0, end = 0; +- int tmp_cpus = 0; +- static int cpus; +- char buf[128]; +- int error = 0; +- int fd = -1; ++ int err = 0, n, len, start, end = -1; ++ bool *tmp; + +- tmp_cpus = READ_ONCE(cpus); +- if (tmp_cpus > 0) +- return tmp_cpus; ++ *mask = NULL; ++ *mask_sz = 0; ++ ++ /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ ++ while (*s) { ++ if (*s == ',' || *s == '\n') { ++ s++; ++ continue; ++ } ++ n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); ++ if (n <= 0 || n > 2) { ++ pr_warning("Failed to get CPU range %s: %d\n", s, n); ++ err = -EINVAL; ++ goto cleanup; ++ } else if (n == 1) { ++ end = start; ++ } ++ if (start < 0 || start > end) { ++ pr_warning("Invalid CPU range [%d,%d] in %s\n", ++ start, end, s); ++ err = -EINVAL; ++ goto cleanup; ++ } ++ tmp = realloc(*mask, end + 1); ++ if (!tmp) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ *mask = tmp; ++ memset(tmp + *mask_sz, 0, start - *mask_sz); ++ memset(tmp + start, 1, end - start + 1); ++ *mask_sz = end + 1; ++ s += len; ++ } ++ if (!*mask_sz) { ++ pr_warning("Empty CPU range\n"); ++ return -EINVAL; ++ } ++ return 0; ++cleanup: ++ free(*mask); ++ *mask = NULL; ++ return err; ++} ++ ++int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) ++{ ++ int fd, err = 0, len; ++ char buf[128]; + + fd = open(fcpu, O_RDONLY); + if (fd < 0) { +- error = errno; +- pr_warning("Failed to open file %s: %s\n", +- fcpu, strerror(error)); +- return -error; ++ err = -errno; ++ pr_warning("Failed to open cpu mask file %s: %d\n", fcpu, err); ++ return err; + } + len = read(fd, buf, sizeof(buf)); + close(fd); + if (len <= 0) { +- error = len ? errno : EINVAL; +- pr_warning("Failed to read # of possible cpus from %s: %s\n", +- fcpu, strerror(error)); +- return -error; ++ err = len ? -errno : -EINVAL; ++ pr_warning("Failed to read cpu mask from %s: %d\n", fcpu, err); ++ return err; + } +- if (len == sizeof(buf)) { +- pr_warning("File %s size overflow\n", fcpu); +- return -EOVERFLOW; ++ if (len >= sizeof(buf)) { ++ pr_warning("CPU mask is too big in file %s\n", fcpu); ++ return -E2BIG; + } + buf[len] = '\0'; + +- for (ir = 0, tmp_cpus = 0; ir <= len; ir++) { +- /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ +- if (buf[ir] == ',' || buf[ir] == '\0') { +- buf[ir] = '\0'; +- n = sscanf(&buf[il], "%u-%u", &start, &end); +- if (n <= 0) { +- pr_warning("Failed to get # CPUs from %s\n", +- &buf[il]); +- return -EINVAL; +- } else if (n == 1) { +- end = start; +- } +- tmp_cpus += end - start + 1; +- il = ir + 1; +- } +- } +- if (tmp_cpus <= 0) { +- pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu); +- return -EINVAL; ++ return parse_cpu_mask_str(buf, mask, mask_sz); ++} ++ ++int libbpf_num_possible_cpus(void) ++{ ++ static const char *fcpu = "/sys/devices/system/cpu/possible"; ++ static int cpus; ++ int err, n, i, tmp_cpus; ++ bool *mask; ++ ++ tmp_cpus = READ_ONCE(cpus); ++ if (tmp_cpus > 0) ++ return tmp_cpus; ++ ++ err = parse_cpu_mask_file(fcpu, &mask, &n); ++ if (err) ++ return err; ++ ++ tmp_cpus = 0; ++ for (i = 0; i < n; i++) { ++ if (mask[i]) ++ tmp_cpus++; + } ++ free(mask); + + WRITE_ONCE(cpus, tmp_cpus); + return tmp_cpus; +diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h +index 98216a69c32f..92940ae26ada 100644 +--- a/tools/lib/bpf/libbpf_internal.h ++++ b/tools/lib/bpf/libbpf_internal.h +@@ -63,6 +63,8 @@ do { \ + #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) + #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) + ++int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz); ++int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); + int libbpf__load_raw_btf(const char *raw_types, size_t types_len, + const char *str_sec, size_t str_len); + +diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +index 1735faf17536..437cb93e72ac 100644 +--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c ++++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +@@ -52,7 +52,7 @@ retry: + if (pmu_fd < 0 && errno == ENOENT) { + printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__); + test__skip(); +- goto cleanup; ++ goto close_prog; + } + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) +diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c +index 8941a41c2a55..cce6d605c017 100644 +--- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c ++++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c +@@ -1,7 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 + + #include +-#include ++#include "bpf_helpers.h" + + #define MAX_STACK_RAWTP 10 + +diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c +index 079d0f5a2909..7e4c91f2238d 100644 +--- a/tools/testing/selftests/bpf/test_select_reuseport.c ++++ b/tools/testing/selftests/bpf/test_select_reuseport.c +@@ -668,12 +668,12 @@ static void cleanup_per_test(void) + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY); +- RET_IF(err, "reset elem in result_map", ++ CHECK(err, "reset elem in result_map", + "i:%u err:%d errno:%d\n", i, err, errno); + } + + err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY); +- RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n", ++ CHECK(err, "reset line number in linum_map", "err:%d errno:%d\n", + err, errno); + + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) +diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest +index 063ecb290a5a..144308a757b7 100755 +--- a/tools/testing/selftests/ftrace/ftracetest ++++ b/tools/testing/selftests/ftrace/ftracetest +@@ -29,8 +29,25 @@ err_ret=1 + # kselftest skip code is 4 + err_skip=4 + ++# cgroup RT scheduling prevents chrt commands from succeeding, which ++# induces failures in test wakeup tests. Disable for the duration of ++# the tests. ++ ++readonly sched_rt_runtime=/proc/sys/kernel/sched_rt_runtime_us ++ ++sched_rt_runtime_orig=$(cat $sched_rt_runtime) ++ ++setup() { ++ echo -1 > $sched_rt_runtime ++} ++ ++cleanup() { ++ echo $sched_rt_runtime_orig > $sched_rt_runtime ++} ++ + errexit() { # message + echo "Error: $1" 1>&2 ++ cleanup + exit $err_ret + } + +@@ -39,6 +56,8 @@ if [ `id -u` -ne 0 ]; then + errexit "this must be run by root user" + fi + ++setup ++ + # Utilities + absdir() { # file_path + (cd `dirname $1`; pwd) +@@ -235,6 +254,7 @@ TOTAL_RESULT=0 + + INSTANCE= + CASENO=0 ++ + testcase() { # testfile + CASENO=$((CASENO+1)) + desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:` +@@ -406,5 +426,7 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w` + prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w` + prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w` + ++cleanup ++ + # if no error, return 0 + exit $TOTAL_RESULT +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc +index 1bcb67dcae26..81490ecaaa92 100644 +--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc +@@ -38,7 +38,7 @@ for width in 64 32 16 8; do + echo 0 > events/kprobes/testprobe/enable + + : "Confirm the arguments is recorded in given types correctly" +- ARGS=`grep "testprobe" trace | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'` ++ ARGS=`grep "testprobe" trace | head -n 1 | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'` + check_types $ARGS $width + + : "Clear event for next loop" +diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c +index 5945f062d749..d63881f60e1a 100644 +--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c ++++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c +@@ -422,11 +422,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, + vgic_mmio_read_active, vgic_mmio_write_sactive, +- NULL, vgic_mmio_uaccess_write_sactive, 1, ++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, + vgic_mmio_read_active, vgic_mmio_write_cactive, +- NULL, vgic_mmio_uaccess_write_cactive, 1, ++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, + vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, +diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c +index 7dfd15dbb308..4c5909e38f78 100644 +--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c ++++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c +@@ -491,11 +491,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, + vgic_mmio_read_active, vgic_mmio_write_sactive, +- NULL, vgic_mmio_uaccess_write_sactive, 1, ++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, + vgic_mmio_read_active, vgic_mmio_write_cactive, +- NULL, vgic_mmio_uaccess_write_cactive, ++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, + 1, VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, + vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, +@@ -563,12 +563,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0, + vgic_mmio_read_active, vgic_mmio_write_sactive, +- NULL, vgic_mmio_uaccess_write_sactive, +- 4, VGIC_ACCESS_32bit), ++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4, ++ VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0, + vgic_mmio_read_active, vgic_mmio_write_cactive, +- NULL, vgic_mmio_uaccess_write_cactive, +- 4, VGIC_ACCESS_32bit), ++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4, ++ VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0, + vgic_mmio_read_priority, vgic_mmio_write_priority, 32, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), +diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c +index 7eacf00e5abe..fb1dcd397b93 100644 +--- a/virt/kvm/arm/vgic/vgic-mmio.c ++++ b/virt/kvm/arm/vgic/vgic-mmio.c +@@ -300,8 +300,39 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, + } + } + +-unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, +- gpa_t addr, unsigned int len) ++ ++/* ++ * If we are fiddling with an IRQ's active state, we have to make sure the IRQ ++ * is not queued on some running VCPU's LRs, because then the change to the ++ * active state can be overwritten when the VCPU's state is synced coming back ++ * from the guest. ++ * ++ * For shared interrupts as well as GICv3 private interrupts, we have to ++ * stop all the VCPUs because interrupts can be migrated while we don't hold ++ * the IRQ locks and we don't want to be chasing moving targets. ++ * ++ * For GICv2 private interrupts we don't have to do anything because ++ * userspace accesses to the VGIC state already require all VCPUs to be ++ * stopped, and only the VCPU itself can modify its private interrupts ++ * active state, which guarantees that the VCPU is not running. ++ */ ++static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid) ++{ ++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || ++ intid >= VGIC_NR_PRIVATE_IRQS) ++ kvm_arm_halt_guest(vcpu->kvm); ++} ++ ++/* See vgic_access_active_prepare */ ++static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid) ++{ ++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || ++ intid >= VGIC_NR_PRIVATE_IRQS) ++ kvm_arm_resume_guest(vcpu->kvm); ++} ++ ++static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu, ++ gpa_t addr, unsigned int len) + { + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + u32 value = 0; +@@ -311,6 +342,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, + for (i = 0; i < len * 8; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + ++ /* ++ * Even for HW interrupts, don't evaluate the HW state as ++ * all the guest is interested in is the virtual state. ++ */ + if (irq->active) + value |= (1U << i); + +@@ -320,6 +355,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, + return value; + } + ++unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, ++ gpa_t addr, unsigned int len) ++{ ++ u32 intid = VGIC_ADDR_TO_INTID(addr, 1); ++ u32 val; ++ ++ mutex_lock(&vcpu->kvm->lock); ++ vgic_access_active_prepare(vcpu, intid); ++ ++ val = __vgic_mmio_read_active(vcpu, addr, len); ++ ++ vgic_access_active_finish(vcpu, intid); ++ mutex_unlock(&vcpu->kvm->lock); ++ ++ return val; ++} ++ ++unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, ++ gpa_t addr, unsigned int len) ++{ ++ return __vgic_mmio_read_active(vcpu, addr, len); ++} ++ + /* Must be called with irq->irq_lock held */ + static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, + bool active, bool is_uaccess) +@@ -371,36 +429,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + } + +-/* +- * If we are fiddling with an IRQ's active state, we have to make sure the IRQ +- * is not queued on some running VCPU's LRs, because then the change to the +- * active state can be overwritten when the VCPU's state is synced coming back +- * from the guest. +- * +- * For shared interrupts, we have to stop all the VCPUs because interrupts can +- * be migrated while we don't hold the IRQ locks and we don't want to be +- * chasing moving targets. +- * +- * For private interrupts we don't have to do anything because userspace +- * accesses to the VGIC state already require all VCPUs to be stopped, and +- * only the VCPU itself can modify its private interrupts active state, which +- * guarantees that the VCPU is not running. +- */ +-static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) +-{ +- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || +- intid >= VGIC_NR_PRIVATE_IRQS) +- kvm_arm_halt_guest(vcpu->kvm); +-} +- +-/* See vgic_change_active_prepare */ +-static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) +-{ +- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || +- intid >= VGIC_NR_PRIVATE_IRQS) +- kvm_arm_resume_guest(vcpu->kvm); +-} +- + static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +@@ -422,11 +450,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + + mutex_lock(&vcpu->kvm->lock); +- vgic_change_active_prepare(vcpu, intid); ++ vgic_access_active_prepare(vcpu, intid); + + __vgic_mmio_write_cactive(vcpu, addr, len, val); + +- vgic_change_active_finish(vcpu, intid); ++ vgic_access_active_finish(vcpu, intid); + mutex_unlock(&vcpu->kvm->lock); + } + +@@ -459,11 +487,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + + mutex_lock(&vcpu->kvm->lock); +- vgic_change_active_prepare(vcpu, intid); ++ vgic_access_active_prepare(vcpu, intid); + + __vgic_mmio_write_sactive(vcpu, addr, len, val); + +- vgic_change_active_finish(vcpu, intid); ++ vgic_access_active_finish(vcpu, intid); + mutex_unlock(&vcpu->kvm->lock); + } + +diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h +index 836f418f1ee8..b6aff5252429 100644 +--- a/virt/kvm/arm/vgic/vgic-mmio.h ++++ b/virt/kvm/arm/vgic/vgic-mmio.h +@@ -157,6 +157,9 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, + unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + ++unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, ++ gpa_t addr, unsigned int len); ++ + void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); diff --git a/patch/kernel/odroidxu4-current/patch-5.4.42-43.patch b/patch/kernel/odroidxu4-current/patch-5.4.42-43.patch new file mode 100644 index 000000000..d1cb39829 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.42-43.patch @@ -0,0 +1,4885 @@ +diff --git a/Makefile b/Makefile +index 1bd1b17cd207..7d7cf0082443 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 42 ++SUBLEVEL = 43 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +@@ -1246,11 +1246,15 @@ ifneq ($(dtstree),) + $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ + + PHONY += dtbs dtbs_install dtbs_check +-dtbs dtbs_check: include/config/kernel.release scripts_dtc ++dtbs: include/config/kernel.release scripts_dtc + $(Q)$(MAKE) $(build)=$(dtstree) + ++ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),) ++dtbs: dt_binding_check ++endif ++ + dtbs_check: export CHECK_DTBS=1 +-dtbs_check: dt_binding_check ++dtbs_check: dtbs + + dtbs_install: + $(Q)$(MAKE) $(dtbinst)=$(dtstree) +diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h +index 83c391b597d4..fdc4ae3e7378 100644 +--- a/arch/arm/include/asm/futex.h ++++ b/arch/arm/include/asm/futex.h +@@ -164,8 +164,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + preempt_enable(); + #endif + +- if (!ret) +- *oval = oldval; ++ /* ++ * Store unconditionally. If ret != 0 the extra store is the least ++ * of the worries but GCC cannot figure out that __futex_atomic_op() ++ * is either setting ret to -EFAULT or storing the old value in ++ * oldval which results in a uninitialized warning at the call site. ++ */ ++ *oval = oldval; + + return ret; + } +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c +index 9168c4f1a37f..8d2d9d5b418f 100644 +--- a/arch/arm64/kernel/ptrace.c ++++ b/arch/arm64/kernel/ptrace.c +@@ -1829,10 +1829,11 @@ static void tracehook_report_syscall(struct pt_regs *regs, + + int syscall_trace_enter(struct pt_regs *regs) + { +- if (test_thread_flag(TIF_SYSCALL_TRACE) || +- test_thread_flag(TIF_SYSCALL_EMU)) { ++ unsigned long flags = READ_ONCE(current_thread_info()->flags); ++ ++ if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { + tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); +- if (!in_syscall(regs) || test_thread_flag(TIF_SYSCALL_EMU)) ++ if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU)) + return -1; + } + +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index 2b1033f13210..3dc5aecdd853 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -133,7 +133,7 @@ config PPC + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_MEMBARRIER_CALLBACKS + select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 +- select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION) ++ select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION) + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_UACCESS_FLUSHCACHE + select ARCH_HAS_UACCESS_MCSAFE if PPC64 +diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h +index cd060b5dd8fd..e4dc64cc9c55 100644 +--- a/arch/s390/include/asm/pci_io.h ++++ b/arch/s390/include/asm/pci_io.h +@@ -8,6 +8,10 @@ + #include + #include + ++/* I/O size constraints */ ++#define ZPCI_MAX_READ_SIZE 8 ++#define ZPCI_MAX_WRITE_SIZE 128 ++ + /* I/O Map */ + #define ZPCI_IOMAP_SHIFT 48 + #define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL +@@ -140,7 +144,8 @@ static inline int zpci_memcpy_fromio(void *dst, + + while (n > 0) { + size = zpci_get_max_write_size((u64 __force) src, +- (u64) dst, n, 8); ++ (u64) dst, n, ++ ZPCI_MAX_READ_SIZE); + rc = zpci_read_single(dst, src, size); + if (rc) + break; +@@ -161,7 +166,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst, + + while (n > 0) { + size = zpci_get_max_write_size((u64 __force) dst, +- (u64) src, n, 128); ++ (u64) src, n, ++ ZPCI_MAX_WRITE_SIZE); + if (size > 8) /* main path */ + rc = zpci_write_block(dst, src, size); + else +diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c +index 8415ae7d2a23..f9e4baa64b67 100644 +--- a/arch/s390/kernel/machine_kexec_file.c ++++ b/arch/s390/kernel/machine_kexec_file.c +@@ -151,7 +151,7 @@ static int kexec_file_add_initrd(struct kimage *image, + buf.mem += crashk_res.start; + buf.memsz = buf.bufsz; + +- data->parm->initrd_start = buf.mem; ++ data->parm->initrd_start = data->memsz; + data->parm->initrd_size = buf.memsz; + data->memsz += buf.memsz; + +diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c +index d5035de9020e..b7182cec48dc 100644 +--- a/arch/s390/kernel/machine_kexec_reloc.c ++++ b/arch/s390/kernel/machine_kexec_reloc.c +@@ -28,6 +28,7 @@ int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val, + break; + case R_390_64: /* Direct 64 bit. */ + case R_390_GLOB_DAT: ++ case R_390_JMP_SLOT: + *(u64 *)loc = val; + break; + case R_390_PC16: /* PC relative 16 bit. */ +diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c +index 7d42a8794f10..020a2c514d96 100644 +--- a/arch/s390/pci/pci_mmio.c ++++ b/arch/s390/pci/pci_mmio.c +@@ -11,6 +11,113 @@ + #include + #include + #include ++#include ++#include ++ ++static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset) ++{ ++ struct { ++ u64 offset; ++ u8 cc; ++ u8 status; ++ } data = {offset, cc, status}; ++ ++ zpci_err_hex(&data, sizeof(data)); ++} ++ ++static inline int __pcistb_mio_inuser( ++ void __iomem *ioaddr, const void __user *src, ++ u64 len, u8 *status) ++{ ++ int cc = -ENXIO; ++ ++ asm volatile ( ++ " sacf 256\n" ++ "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n" ++ "1: ipm %[cc]\n" ++ " srl %[cc],28\n" ++ "2: sacf 768\n" ++ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) ++ : [cc] "+d" (cc), [len] "+d" (len) ++ : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src)) ++ : "cc", "memory"); ++ *status = len >> 24 & 0xff; ++ return cc; ++} ++ ++static inline int __pcistg_mio_inuser( ++ void __iomem *ioaddr, const void __user *src, ++ u64 ulen, u8 *status) ++{ ++ register u64 addr asm("2") = (u64 __force) ioaddr; ++ register u64 len asm("3") = ulen; ++ int cc = -ENXIO; ++ u64 val = 0; ++ u64 cnt = ulen; ++ u8 tmp; ++ ++ /* ++ * copy 0 < @len <= 8 bytes from @src into the right most bytes of ++ * a register, then store it to PCI at @ioaddr while in secondary ++ * address space. pcistg then uses the user mappings. ++ */ ++ asm volatile ( ++ " sacf 256\n" ++ "0: llgc %[tmp],0(%[src])\n" ++ " sllg %[val],%[val],8\n" ++ " aghi %[src],1\n" ++ " ogr %[val],%[tmp]\n" ++ " brctg %[cnt],0b\n" ++ "1: .insn rre,0xb9d40000,%[val],%[ioaddr]\n" ++ "2: ipm %[cc]\n" ++ " srl %[cc],28\n" ++ "3: sacf 768\n" ++ EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) ++ : ++ [src] "+a" (src), [cnt] "+d" (cnt), ++ [val] "+d" (val), [tmp] "=d" (tmp), ++ [len] "+d" (len), [cc] "+d" (cc), ++ [ioaddr] "+a" (addr) ++ :: "cc", "memory"); ++ *status = len >> 24 & 0xff; ++ ++ /* did we read everything from user memory? */ ++ if (!cc && cnt != 0) ++ cc = -EFAULT; ++ ++ return cc; ++} ++ ++static inline int __memcpy_toio_inuser(void __iomem *dst, ++ const void __user *src, size_t n) ++{ ++ int size, rc = 0; ++ u8 status = 0; ++ mm_segment_t old_fs; ++ ++ if (!src) ++ return -EINVAL; ++ ++ old_fs = enable_sacf_uaccess(); ++ while (n > 0) { ++ size = zpci_get_max_write_size((u64 __force) dst, ++ (u64 __force) src, n, ++ ZPCI_MAX_WRITE_SIZE); ++ if (size > 8) /* main path */ ++ rc = __pcistb_mio_inuser(dst, src, size, &status); ++ else ++ rc = __pcistg_mio_inuser(dst, src, size, &status); ++ if (rc) ++ break; ++ src += size; ++ dst += size; ++ n -= size; ++ } ++ disable_sacf_uaccess(old_fs); ++ if (rc) ++ zpci_err_mmio(rc, status, (__force u64) dst); ++ return rc; ++} + + static long get_pfn(unsigned long user_addr, unsigned long access, + unsigned long *pfn) +@@ -46,6 +153,20 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, + + if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) + return -EINVAL; ++ ++ /* ++ * Only support read access to MIO capable devices on a MIO enabled ++ * system. Otherwise we would have to check for every address if it is ++ * a special ZPCI_ADDR and we would have to do a get_pfn() which we ++ * don't need for MIO capable devices. ++ */ ++ if (static_branch_likely(&have_mio)) { ++ ret = __memcpy_toio_inuser((void __iomem *) mmio_addr, ++ user_buffer, ++ length); ++ return ret; ++ } ++ + if (length > 64) { + buf = kmalloc(length, GFP_KERNEL); + if (!buf) +@@ -56,7 +177,8 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, + ret = get_pfn(mmio_addr, VM_WRITE, &pfn); + if (ret) + goto out; +- io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); ++ io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | ++ (mmio_addr & ~PAGE_MASK)); + + ret = -EFAULT; + if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) +@@ -72,6 +194,78 @@ out: + return ret; + } + ++static inline int __pcilg_mio_inuser( ++ void __user *dst, const void __iomem *ioaddr, ++ u64 ulen, u8 *status) ++{ ++ register u64 addr asm("2") = (u64 __force) ioaddr; ++ register u64 len asm("3") = ulen; ++ u64 cnt = ulen; ++ int shift = ulen * 8; ++ int cc = -ENXIO; ++ u64 val, tmp; ++ ++ /* ++ * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in ++ * user space) into a register using pcilg then store these bytes at ++ * user address @dst ++ */ ++ asm volatile ( ++ " sacf 256\n" ++ "0: .insn rre,0xb9d60000,%[val],%[ioaddr]\n" ++ "1: ipm %[cc]\n" ++ " srl %[cc],28\n" ++ " ltr %[cc],%[cc]\n" ++ " jne 4f\n" ++ "2: ahi %[shift],-8\n" ++ " srlg %[tmp],%[val],0(%[shift])\n" ++ "3: stc %[tmp],0(%[dst])\n" ++ " aghi %[dst],1\n" ++ " brctg %[cnt],2b\n" ++ "4: sacf 768\n" ++ EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) ++ : ++ [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len), ++ [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp), ++ [shift] "+d" (shift) ++ : ++ [ioaddr] "a" (addr) ++ : "cc", "memory"); ++ ++ /* did we write everything to the user space buffer? */ ++ if (!cc && cnt != 0) ++ cc = -EFAULT; ++ ++ *status = len >> 24 & 0xff; ++ return cc; ++} ++ ++static inline int __memcpy_fromio_inuser(void __user *dst, ++ const void __iomem *src, ++ unsigned long n) ++{ ++ int size, rc = 0; ++ u8 status; ++ mm_segment_t old_fs; ++ ++ old_fs = enable_sacf_uaccess(); ++ while (n > 0) { ++ size = zpci_get_max_write_size((u64 __force) src, ++ (u64 __force) dst, n, ++ ZPCI_MAX_READ_SIZE); ++ rc = __pcilg_mio_inuser(dst, src, size, &status); ++ if (rc) ++ break; ++ src += size; ++ dst += size; ++ n -= size; ++ } ++ disable_sacf_uaccess(old_fs); ++ if (rc) ++ zpci_err_mmio(rc, status, (__force u64) dst); ++ return rc; ++} ++ + SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, + void __user *, user_buffer, size_t, length) + { +@@ -86,12 +280,27 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, + + if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) + return -EINVAL; ++ ++ /* ++ * Only support write access to MIO capable devices on a MIO enabled ++ * system. Otherwise we would have to check for every address if it is ++ * a special ZPCI_ADDR and we would have to do a get_pfn() which we ++ * don't need for MIO capable devices. ++ */ ++ if (static_branch_likely(&have_mio)) { ++ ret = __memcpy_fromio_inuser( ++ user_buffer, (const void __iomem *)mmio_addr, ++ length); ++ return ret; ++ } ++ + if (length > 64) { + buf = kmalloc(length, GFP_KERNEL); + if (!buf) + return -ENOMEM; +- } else ++ } else { + buf = local_buf; ++ } + + ret = get_pfn(mmio_addr, VM_READ, &pfn); + if (ret) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index f5341edbfa16..7d91a3f5b26a 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -550,6 +550,7 @@ struct kvm_vcpu_arch { + unsigned long cr4; + unsigned long cr4_guest_owned_bits; + unsigned long cr8; ++ u32 host_pkru; + u32 pkru; + u32 hflags; + u64 efer; +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index df891f874614..25b8c45467fc 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -352,8 +352,6 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) + * According to Intel, MFENCE can do the serialization here. + */ + asm volatile("mfence" : : : "memory"); +- +- printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); + return; + } + +@@ -552,7 +550,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); + #define DEADLINE_MODEL_MATCH_REV(model, rev) \ + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev } + +-static u32 hsx_deadline_rev(void) ++static __init u32 hsx_deadline_rev(void) + { + switch (boot_cpu_data.x86_stepping) { + case 0x02: return 0x3a; /* EP */ +@@ -562,7 +560,7 @@ static u32 hsx_deadline_rev(void) + return ~0U; + } + +-static u32 bdx_deadline_rev(void) ++static __init u32 bdx_deadline_rev(void) + { + switch (boot_cpu_data.x86_stepping) { + case 0x02: return 0x00000011; +@@ -574,7 +572,7 @@ static u32 bdx_deadline_rev(void) + return ~0U; + } + +-static u32 skx_deadline_rev(void) ++static __init u32 skx_deadline_rev(void) + { + switch (boot_cpu_data.x86_stepping) { + case 0x03: return 0x01000136; +@@ -587,7 +585,7 @@ static u32 skx_deadline_rev(void) + return ~0U; + } + +-static const struct x86_cpu_id deadline_match[] = { ++static const struct x86_cpu_id deadline_match[] __initconst = { + DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), + DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_D, bdx_deadline_rev), +@@ -609,18 +607,19 @@ static const struct x86_cpu_id deadline_match[] = { + {}, + }; + +-static void apic_check_deadline_errata(void) ++static __init bool apic_validate_deadline_timer(void) + { + const struct x86_cpu_id *m; + u32 rev; + +- if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) || +- boot_cpu_has(X86_FEATURE_HYPERVISOR)) +- return; ++ if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) ++ return false; ++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ return true; + + m = x86_match_cpu(deadline_match); + if (!m) +- return; ++ return true; + + /* + * Function pointers will have the MSB set due to address layout, +@@ -632,11 +631,12 @@ static void apic_check_deadline_errata(void) + rev = (u32)m->driver_data; + + if (boot_cpu_data.microcode >= rev) +- return; ++ return true; + + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); + pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; " + "please update microcode to version: 0x%x (or later)\n", rev); ++ return false; + } + + /* +@@ -2098,7 +2098,8 @@ void __init init_apic_mappings(void) + { + unsigned int new_apicid; + +- apic_check_deadline_errata(); ++ if (apic_validate_deadline_timer()) ++ pr_debug("TSC deadline timer available\n"); + + if (x2apic_mode) { + boot_cpu_physical_apicid = read_apic_id(); +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c +index 647e6af0883d..aa0f39dc8129 100644 +--- a/arch/x86/kernel/unwind_orc.c ++++ b/arch/x86/kernel/unwind_orc.c +@@ -311,12 +311,19 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address); + + unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) + { ++ struct task_struct *task = state->task; ++ + if (unwind_done(state)) + return NULL; + + if (state->regs) + return &state->regs->ip; + ++ if (task != current && state->sp == task->thread.sp) { ++ struct inactive_task_frame *frame = (void *)task->thread.sp; ++ return &frame->ret_addr; ++ } ++ + if (state->sp) + return (unsigned long *)state->sp - 1; + +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index fda2126f9a97..cc7da664fd39 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -998,33 +998,32 @@ static void svm_cpu_uninit(int cpu) + static int svm_cpu_init(int cpu) + { + struct svm_cpu_data *sd; +- int r; + + sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); + if (!sd) + return -ENOMEM; + sd->cpu = cpu; +- r = -ENOMEM; + sd->save_area = alloc_page(GFP_KERNEL); + if (!sd->save_area) +- goto err_1; ++ goto free_cpu_data; + + if (svm_sev_enabled()) { +- r = -ENOMEM; + sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, + sizeof(void *), + GFP_KERNEL); + if (!sd->sev_vmcbs) +- goto err_1; ++ goto free_save_area; + } + + per_cpu(svm_data, cpu) = sd; + + return 0; + +-err_1: ++free_save_area: ++ __free_page(sd->save_area); ++free_cpu_data: + kfree(sd); +- return r; ++ return -ENOMEM; + + } + +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 72f51275247e..7a2c05277f4c 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -1360,7 +1360,6 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + + vmx_vcpu_pi_load(vcpu, cpu); + +- vmx->host_pkru = read_pkru(); + vmx->host_debugctlmsr = get_debugctlmsr(); + } + +@@ -6521,11 +6520,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) + + kvm_load_guest_xcr0(vcpu); + +- if (static_cpu_has(X86_FEATURE_PKU) && +- kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && +- vcpu->arch.pkru != vmx->host_pkru) +- __write_pkru(vcpu->arch.pkru); +- + pt_guest_enter(vmx); + + atomic_switch_perf_msrs(vmx); +@@ -6614,18 +6608,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) + + pt_guest_exit(vmx); + +- /* +- * eager fpu is enabled if PKEY is supported and CR4 is switched +- * back on host, so it is safe to read guest PKRU from current +- * XSAVE. +- */ +- if (static_cpu_has(X86_FEATURE_PKU) && +- kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { +- vcpu->arch.pkru = rdpkru(); +- if (vcpu->arch.pkru != vmx->host_pkru) +- __write_pkru(vmx->host_pkru); +- } +- + kvm_put_guest_xcr0(vcpu); + + vmx->nested.nested_run_pending = 0; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 41408065574f..c6d9e363dfc0 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -832,11 +832,25 @@ void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); + vcpu->guest_xcr0_loaded = 1; + } ++ ++ if (static_cpu_has(X86_FEATURE_PKU) && ++ (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || ++ (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && ++ vcpu->arch.pkru != vcpu->arch.host_pkru) ++ __write_pkru(vcpu->arch.pkru); + } + EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0); + + void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) + { ++ if (static_cpu_has(X86_FEATURE_PKU) && ++ (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || ++ (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { ++ vcpu->arch.pkru = rdpkru(); ++ if (vcpu->arch.pkru != vcpu->arch.host_pkru) ++ __write_pkru(vcpu->arch.host_pkru); ++ } ++ + if (vcpu->guest_xcr0_loaded) { + if (vcpu->arch.xcr0 != host_xcr0) + xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); +@@ -8222,6 +8236,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + trace_kvm_entry(vcpu->vcpu_id); + guest_enter_irqoff(); + ++ /* Save host pkru register if supported */ ++ vcpu->arch.host_pkru = read_pkru(); ++ + fpregs_assert_state_consistent(); + if (test_thread_flag(TIF_NEED_FPU_LOAD)) + switch_fpu_return(); +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index a19a71b4d185..281e584cfe39 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -42,7 +42,8 @@ struct cpa_data { + unsigned long pfn; + unsigned int flags; + unsigned int force_split : 1, +- force_static_prot : 1; ++ force_static_prot : 1, ++ force_flush_all : 1; + struct page **pages; + }; + +@@ -352,10 +353,10 @@ static void cpa_flush(struct cpa_data *data, int cache) + return; + } + +- if (cpa->numpages <= tlb_single_page_flush_ceiling) +- on_each_cpu(__cpa_flush_tlb, cpa, 1); +- else ++ if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling) + flush_tlb_all(); ++ else ++ on_each_cpu(__cpa_flush_tlb, cpa, 1); + + if (!cache) + return; +@@ -1584,6 +1585,8 @@ static int cpa_process_alias(struct cpa_data *cpa) + alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); + alias_cpa.curpage = 0; + ++ cpa->force_flush_all = 1; ++ + ret = __change_page_attr_set_clr(&alias_cpa, 0); + if (ret) + return ret; +@@ -1604,6 +1607,7 @@ static int cpa_process_alias(struct cpa_data *cpa) + alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); + alias_cpa.curpage = 0; + ++ cpa->force_flush_all = 1; + /* + * The high mapping range is imprecise, so ignore the + * return value. +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 5b53a66d403d..57eacdcbf820 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -1984,9 +1984,13 @@ bool acpi_ec_dispatch_gpe(void) + * to allow the caller to process events properly after that. + */ + ret = acpi_dispatch_gpe(NULL, first_ec->gpe); +- if (ret == ACPI_INTERRUPT_HANDLED) ++ if (ret == ACPI_INTERRUPT_HANDLED) { + pm_pr_dbg("EC GPE dispatched\n"); + ++ /* Flush the event and query workqueues. */ ++ acpi_ec_flush_work(); ++ } ++ + return false; + } + #endif /* CONFIG_PM_SLEEP */ +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c +index 85514c0f3aa5..d1b74179d217 100644 +--- a/drivers/acpi/sleep.c ++++ b/drivers/acpi/sleep.c +@@ -977,13 +977,6 @@ static int acpi_s2idle_prepare_late(void) + return 0; + } + +-static void acpi_s2idle_sync(void) +-{ +- /* The EC driver uses special workqueues that need to be flushed. */ +- acpi_ec_flush_work(); +- acpi_os_wait_events_complete(); /* synchronize Notify handling */ +-} +- + static bool acpi_s2idle_wake(void) + { + if (!acpi_sci_irq_valid()) +@@ -1015,7 +1008,7 @@ static bool acpi_s2idle_wake(void) + return true; + + /* +- * Cancel the wakeup and process all pending events in case ++ * Cancel the SCI wakeup and process all pending events in case + * there are any wakeup ones in there. + * + * Note that if any non-EC GPEs are active at this point, the +@@ -1023,8 +1016,7 @@ static bool acpi_s2idle_wake(void) + * should be missed by canceling the wakeup here. + */ + pm_system_cancel_wakeup(); +- +- acpi_s2idle_sync(); ++ acpi_os_wait_events_complete(); + + /* + * The SCI is in the "suspended" state now and it cannot produce +@@ -1057,7 +1049,8 @@ static void acpi_s2idle_restore(void) + * of GPEs. + */ + acpi_os_wait_events_complete(); /* synchronize GPE processing */ +- acpi_s2idle_sync(); ++ acpi_ec_flush_work(); /* flush the EC driver's workqueues */ ++ acpi_os_wait_events_complete(); /* synchronize Notify handling */ + + s2idle_wakeup = false; + +diff --git a/drivers/base/component.c b/drivers/base/component.c +index 1fdbd6ff2058..b9f20ada68b0 100644 +--- a/drivers/base/component.c ++++ b/drivers/base/component.c +@@ -257,7 +257,8 @@ static int try_to_bring_up_master(struct master *master, + ret = master->ops->bind(master->dev); + if (ret < 0) { + devres_release_group(master->dev, NULL); +- dev_info(master->dev, "master bind failed: %d\n", ret); ++ if (ret != -EPROBE_DEFER) ++ dev_info(master->dev, "master bind failed: %d\n", ret); + return ret; + } + +@@ -611,8 +612,9 @@ static int component_bind(struct component *component, struct master *master, + devres_release_group(component->dev, NULL); + devres_release_group(master->dev, NULL); + +- dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", +- dev_name(component->dev), component->ops, ret); ++ if (ret != -EPROBE_DEFER) ++ dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", ++ dev_name(component->dev), component->ops, ret); + } + + return ret; +diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c +index 3d0a7e702c94..1e678bdf5aed 100644 +--- a/drivers/dax/kmem.c ++++ b/drivers/dax/kmem.c +@@ -22,6 +22,7 @@ int dev_dax_kmem_probe(struct device *dev) + resource_size_t kmem_size; + resource_size_t kmem_end; + struct resource *new_res; ++ const char *new_res_name; + int numa_node; + int rc; + +@@ -48,11 +49,16 @@ int dev_dax_kmem_probe(struct device *dev) + kmem_size &= ~(memory_block_size_bytes() - 1); + kmem_end = kmem_start + kmem_size; + +- /* Region is permanently reserved. Hot-remove not yet implemented. */ +- new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev)); ++ new_res_name = kstrdup(dev_name(dev), GFP_KERNEL); ++ if (!new_res_name) ++ return -ENOMEM; ++ ++ /* Region is permanently reserved if hotremove fails. */ ++ new_res = request_mem_region(kmem_start, kmem_size, new_res_name); + if (!new_res) { + dev_warn(dev, "could not reserve region [%pa-%pa]\n", + &kmem_start, &kmem_end); ++ kfree(new_res_name); + return -EBUSY; + } + +@@ -63,12 +69,12 @@ int dev_dax_kmem_probe(struct device *dev) + * unknown to us that will break add_memory() below. + */ + new_res->flags = IORESOURCE_SYSTEM_RAM; +- new_res->name = dev_name(dev); + + rc = add_memory(numa_node, new_res->start, resource_size(new_res)); + if (rc) { + release_resource(new_res); + kfree(new_res); ++ kfree(new_res_name); + return rc; + } + dev_dax->dax_kmem_res = new_res; +@@ -83,6 +89,7 @@ static int dev_dax_kmem_remove(struct device *dev) + struct resource *res = dev_dax->dax_kmem_res; + resource_size_t kmem_start = res->start; + resource_size_t kmem_size = resource_size(res); ++ const char *res_name = res->name; + int rc; + + /* +@@ -102,6 +109,7 @@ static int dev_dax_kmem_remove(struct device *dev) + /* Release and free dax resources */ + release_resource(res); + kfree(res); ++ kfree(res_name); + dev_dax->dax_kmem_res = NULL; + + return 0; +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c +index 364dd34799d4..0425984db118 100644 +--- a/drivers/dma/dmatest.c ++++ b/drivers/dma/dmatest.c +@@ -1166,10 +1166,11 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) + mutex_unlock(&info->lock); + return ret; + } else if (dmatest_run) { +- if (is_threaded_test_pending(info)) +- start_threaded_tests(info); +- else +- pr_info("Could not start test, no channels configured\n"); ++ if (!is_threaded_test_pending(info)) { ++ pr_info("No channels configured, continue with any\n"); ++ add_threaded_test(info); ++ } ++ start_threaded_tests(info); + } else { + stop_threaded_test(info); + } +diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c +index 90bbcef99ef8..af20e9a790a2 100644 +--- a/drivers/dma/owl-dma.c ++++ b/drivers/dma/owl-dma.c +@@ -175,13 +175,11 @@ struct owl_dma_txd { + * @id: physical index to this channel + * @base: virtual memory base for the dma channel + * @vchan: the virtual channel currently being served by this physical channel +- * @lock: a lock to use when altering an instance of this struct + */ + struct owl_dma_pchan { + u32 id; + void __iomem *base; + struct owl_dma_vchan *vchan; +- spinlock_t lock; + }; + + /** +@@ -437,14 +435,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, + for (i = 0; i < od->nr_pchans; i++) { + pchan = &od->pchans[i]; + +- spin_lock_irqsave(&pchan->lock, flags); ++ spin_lock_irqsave(&od->lock, flags); + if (!pchan->vchan) { + pchan->vchan = vchan; +- spin_unlock_irqrestore(&pchan->lock, flags); ++ spin_unlock_irqrestore(&od->lock, flags); + break; + } + +- spin_unlock_irqrestore(&pchan->lock, flags); ++ spin_unlock_irqrestore(&od->lock, flags); + } + + return pchan; +diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c +index 6e1268552f74..914901a680c8 100644 +--- a/drivers/dma/tegra210-adma.c ++++ b/drivers/dma/tegra210-adma.c +@@ -900,7 +900,7 @@ static int tegra_adma_probe(struct platform_device *pdev) + ret = dma_async_device_register(&tdma->dma_dev); + if (ret < 0) { + dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); +- goto irq_dispose; ++ goto rpm_put; + } + + ret = of_dma_controller_register(pdev->dev.of_node, +diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c +index eb9af83e4d59..aeeb1b2d8ede 100644 +--- a/drivers/firmware/efi/libstub/tpm.c ++++ b/drivers/firmware/efi/libstub/tpm.c +@@ -64,7 +64,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) + efi_status_t status; + efi_physical_addr_t log_location = 0, log_last_entry = 0; + struct linux_efi_tpm_eventlog *log_tbl = NULL; +- struct efi_tcg2_final_events_table *final_events_table; ++ struct efi_tcg2_final_events_table *final_events_table = NULL; + unsigned long first_entry_addr, last_entry_addr; + size_t log_size, last_entry_size; + efi_bool_t truncated; +@@ -140,7 +140,8 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) + * Figure out whether any events have already been logged to the + * final events structure, and if so how much space they take up + */ +- final_events_table = get_efi_config_table(sys_table_arg, ++ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) ++ final_events_table = get_efi_config_table(sys_table_arg, + LINUX_EFI_TPM_FINAL_LOG_GUID); + if (final_events_table && final_events_table->nr_events) { + struct tcg_pcr_event2_head *header; +diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c +index 55b031d2c989..c1955d320fec 100644 +--- a/drivers/firmware/efi/tpm.c ++++ b/drivers/firmware/efi/tpm.c +@@ -62,8 +62,11 @@ int __init efi_tpm_eventlog_init(void) + tbl_size = sizeof(*log_tbl) + log_tbl->size; + memblock_reserve(efi.tpm_log, tbl_size); + +- if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) ++ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR || ++ log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) { ++ pr_warn(FW_BUG "TPM Final Events table missing or invalid\n"); + goto out; ++ } + + final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl)); + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 99906435dcf7..9f30343262f3 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -1422,17 +1422,22 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) + dc_sink_retain(aconnector->dc_sink); + if (sink->dc_edid.length == 0) { + aconnector->edid = NULL; +- drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); ++ if (aconnector->dc_link->aux_mode) { ++ drm_dp_cec_unset_edid( ++ &aconnector->dm_dp_aux.aux); ++ } + } else { + aconnector->edid = +- (struct edid *) sink->dc_edid.raw_edid; +- ++ (struct edid *)sink->dc_edid.raw_edid; + + drm_connector_update_edid_property(connector, +- aconnector->edid); +- drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, +- aconnector->edid); ++ aconnector->edid); ++ ++ if (aconnector->dc_link->aux_mode) ++ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, ++ aconnector->edid); + } ++ + amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + + } else { +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +index aa3e4c3b063a..1ba83a90cdef 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +@@ -240,8 +240,10 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit) + } + + if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && +- submit->bos[i].va != mapping->iova) ++ submit->bos[i].va != mapping->iova) { ++ etnaviv_gem_mapping_unreference(mapping); + return -EINVAL; ++ } + + atomic_inc(&etnaviv_obj->gpu_active); + +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +index e6795bafcbb9..75f9db8f7bec 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +@@ -453,7 +453,7 @@ static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu, + if (!(gpu->identity.features & meta->feature)) + continue; + +- if (meta->nr_domains < (index - offset)) { ++ if (index - offset >= meta->nr_domains) { + offset += meta->nr_domains; + continue; + } +diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c +index a62bdf9be682..59aa5e64acb0 100644 +--- a/drivers/gpu/drm/i915/gvt/display.c ++++ b/drivers/gpu/drm/i915/gvt/display.c +@@ -207,14 +207,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) + SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG2); +- vgpu_vreg_t(vgpu, LCPLL1_CTL) |= +- LCPLL_PLL_ENABLE | +- LCPLL_PLL_LOCK; +- vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; +- ++ /* ++ * Only 1 PIPE enabled in current vGPU display and PIPE_A is ++ * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, ++ * TRANSCODER_A can be enabled. PORT_x depends on the input of ++ * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x ++ * so we fixed to DPLL0 here. ++ * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode ++ */ ++ vgpu_vreg_t(vgpu, DPLL_CTRL1) = ++ DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0); ++ vgpu_vreg_t(vgpu, DPLL_CTRL1) |= ++ DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0); ++ vgpu_vreg_t(vgpu, LCPLL1_CTL) = ++ LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK; ++ vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0); ++ /* ++ * Golden M/N are calculated based on: ++ * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), ++ * DP link clk 1620 MHz and non-constant_n. ++ * TODO: calculate DP link symbol clk and stream clk m/n. ++ */ ++ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; ++ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; ++ vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; ++ vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; ++ vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) &= ++ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B); + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | +@@ -235,6 +262,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) &= ++ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C); + vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | +@@ -255,6 +288,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) &= ++ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D); + vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | +diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c +index 0d39038898d4..49d498882cf6 100644 +--- a/drivers/gpu/drm/i915/i915_request.c ++++ b/drivers/gpu/drm/i915/i915_request.c +@@ -894,8 +894,10 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) + GEM_BUG_ON(to == from); + GEM_BUG_ON(to->timeline == from->timeline); + +- if (i915_request_completed(from)) ++ if (i915_request_completed(from)) { ++ i915_sw_fence_set_error_once(&to->submit, from->fence.error); + return 0; ++ } + + if (to->engine->schedule) { + ret = i915_sched_node_add_dependency(&to->sched, &from->sched); +diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c +index fa704153cb00..b2ad319a74b9 100644 +--- a/drivers/hid/hid-alps.c ++++ b/drivers/hid/hid-alps.c +@@ -802,6 +802,7 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id) + break; + case HID_DEVICE_ID_ALPS_U1_DUAL: + case HID_DEVICE_ID_ALPS_U1: ++ case HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY: + data->dev_type = U1; + break; + default: +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 646b98809ed3..13b7222ef2c9 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -79,10 +79,10 @@ + #define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F + #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 + #define HID_DEVICE_ID_ALPS_U1 0x1215 ++#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E + #define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C + #define HID_DEVICE_ID_ALPS_1222 0x1222 + +- + #define USB_VENDOR_ID_AMI 0x046b + #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10 + +@@ -385,6 +385,7 @@ + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002 + + #define USB_VENDOR_ID_ELAN 0x04f3 + #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 +@@ -1088,6 +1089,9 @@ + #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 + #define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200 + ++#define I2C_VENDOR_ID_SYNAPTICS 0x06cb ++#define I2C_PRODUCT_ID_SYNAPTICS_SYNA2393 0x7a13 ++ + #define USB_VENDOR_ID_SYNAPTICS 0x06cb + #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 + #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 +@@ -1102,6 +1106,7 @@ + #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 + #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 + #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 ++#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 + #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 362805ddf377..03c720b47306 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -1922,6 +1922,9 @@ static const struct hid_device_id mt_devices[] = { + { .driver_data = MT_CLS_EGALAX_SERIAL, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, ++ { .driver_data = MT_CLS_EGALAX, ++ MT_USB_DEVICE(USB_VENDOR_ID_DWAV, ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, + + /* Elitegroup panel */ + { .driver_data = MT_CLS_SERIAL, +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index ae64a286a68f..90ec2390ef68 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -163,6 +163,7 @@ static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD }, + { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET }, +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c +index 479934f7d241..b525b2715e07 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c +@@ -179,6 +179,8 @@ static const struct i2c_hid_quirks { + I2C_HID_QUIRK_BOGUS_IRQ }, + { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, + I2C_HID_QUIRK_RESET_ON_RESUME }, ++ { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393, ++ I2C_HID_QUIRK_RESET_ON_RESUME }, + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, + I2C_HID_QUIRK_BAD_INPUT_SIZE }, + { 0, 0 } +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c +index 810a942eaa8e..cc193f2ba5d3 100644 +--- a/drivers/i2c/i2c-core-base.c ++++ b/drivers/i2c/i2c-core-base.c +@@ -338,8 +338,10 @@ static int i2c_device_probe(struct device *dev) + } else if (ACPI_COMPANION(dev)) { + irq = i2c_acpi_get_irq(client); + } +- if (irq == -EPROBE_DEFER) +- return irq; ++ if (irq == -EPROBE_DEFER) { ++ status = irq; ++ goto put_sync_adapter; ++ } + + if (irq < 0) + irq = 0; +@@ -353,15 +355,19 @@ static int i2c_device_probe(struct device *dev) + */ + if (!driver->id_table && + !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && +- !i2c_of_match_device(dev->driver->of_match_table, client)) +- return -ENODEV; ++ !i2c_of_match_device(dev->driver->of_match_table, client)) { ++ status = -ENODEV; ++ goto put_sync_adapter; ++ } + + if (client->flags & I2C_CLIENT_WAKE) { + int wakeirq; + + wakeirq = of_irq_get_byname(dev->of_node, "wakeup"); +- if (wakeirq == -EPROBE_DEFER) +- return wakeirq; ++ if (wakeirq == -EPROBE_DEFER) { ++ status = wakeirq; ++ goto put_sync_adapter; ++ } + + device_init_wakeup(&client->dev, true); + +@@ -408,6 +414,10 @@ err_detach_pm_domain: + err_clear_wakeup_irq: + dev_pm_clear_wake_irq(&client->dev); + device_init_wakeup(&client->dev, false); ++put_sync_adapter: ++ if (client->flags & I2C_CLIENT_HOST_NOTIFY) ++ pm_runtime_put_sync(&client->adapter->dev); ++ + return status; + } + +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index 2ea4585d18c5..94beacc41302 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -40,7 +40,7 @@ + struct i2c_dev { + struct list_head list; + struct i2c_adapter *adap; +- struct device *dev; ++ struct device dev; + struct cdev cdev; + }; + +@@ -84,12 +84,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) + return i2c_dev; + } + +-static void put_i2c_dev(struct i2c_dev *i2c_dev) ++static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev) + { + spin_lock(&i2c_dev_list_lock); + list_del(&i2c_dev->list); + spin_unlock(&i2c_dev_list_lock); +- kfree(i2c_dev); ++ if (del_cdev) ++ cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev); ++ put_device(&i2c_dev->dev); + } + + static ssize_t name_show(struct device *dev, +@@ -628,6 +630,14 @@ static const struct file_operations i2cdev_fops = { + + static struct class *i2c_dev_class; + ++static void i2cdev_dev_release(struct device *dev) ++{ ++ struct i2c_dev *i2c_dev; ++ ++ i2c_dev = container_of(dev, struct i2c_dev, dev); ++ kfree(i2c_dev); ++} ++ + static int i2cdev_attach_adapter(struct device *dev, void *dummy) + { + struct i2c_adapter *adap; +@@ -644,27 +654,23 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) + + cdev_init(&i2c_dev->cdev, &i2cdev_fops); + i2c_dev->cdev.owner = THIS_MODULE; +- res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1); +- if (res) +- goto error_cdev; +- +- /* register this i2c device with the driver core */ +- i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, +- MKDEV(I2C_MAJOR, adap->nr), NULL, +- "i2c-%d", adap->nr); +- if (IS_ERR(i2c_dev->dev)) { +- res = PTR_ERR(i2c_dev->dev); +- goto error; ++ ++ device_initialize(&i2c_dev->dev); ++ i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr); ++ i2c_dev->dev.class = i2c_dev_class; ++ i2c_dev->dev.parent = &adap->dev; ++ i2c_dev->dev.release = i2cdev_dev_release; ++ dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); ++ ++ res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); ++ if (res) { ++ put_i2c_dev(i2c_dev, false); ++ return res; + } + + pr_debug("i2c-dev: adapter [%s] registered as minor %d\n", + adap->name, adap->nr); + return 0; +-error: +- cdev_del(&i2c_dev->cdev); +-error_cdev: +- put_i2c_dev(i2c_dev); +- return res; + } + + static int i2cdev_detach_adapter(struct device *dev, void *dummy) +@@ -680,9 +686,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) + if (!i2c_dev) /* attach_adapter must have failed */ + return 0; + +- cdev_del(&i2c_dev->cdev); +- put_i2c_dev(i2c_dev); +- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); ++ put_i2c_dev(i2c_dev, true); + + pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); + return 0; +diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c +index 0e16490eb3a1..5365199a31f4 100644 +--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c ++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c +@@ -272,6 +272,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) + err_rollback_available: + device_remove_file(&pdev->dev, &dev_attr_available_masters); + err_rollback: ++ i2c_demux_deactivate_master(priv); + for (j = 0; j < i; j++) { + of_node_put(priv->chan[j].parent_np); + of_changeset_destroy(&priv->chan[j].chgset); +diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c +index 66d768d971e1..6e429072e44a 100644 +--- a/drivers/iio/accel/sca3000.c ++++ b/drivers/iio/accel/sca3000.c +@@ -980,7 +980,7 @@ static int sca3000_read_data(struct sca3000_state *st, + st->tx[0] = SCA3000_READ_REG(reg_address_high); + ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); + if (ret) { +- dev_err(get_device(&st->us->dev), "problem reading register"); ++ dev_err(&st->us->dev, "problem reading register\n"); + return ret; + } + +diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c +index 9f63ceb15865..94fde39d9ff7 100644 +--- a/drivers/iio/adc/stm32-adc.c ++++ b/drivers/iio/adc/stm32-adc.c +@@ -1757,15 +1757,27 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev) + return 0; + } + +-static int stm32_adc_dma_request(struct iio_dev *indio_dev) ++static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev) + { + struct stm32_adc *adc = iio_priv(indio_dev); + struct dma_slave_config config; + int ret; + +- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx"); +- if (!adc->dma_chan) ++ adc->dma_chan = dma_request_chan(dev, "rx"); ++ if (IS_ERR(adc->dma_chan)) { ++ ret = PTR_ERR(adc->dma_chan); ++ if (ret != -ENODEV) { ++ if (ret != -EPROBE_DEFER) ++ dev_err(dev, ++ "DMA channel request failed with %d\n", ++ ret); ++ return ret; ++ } ++ ++ /* DMA is optional: fall back to IRQ mode */ ++ adc->dma_chan = NULL; + return 0; ++ } + + adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev, + STM32_DMA_BUFFER_SIZE, +@@ -1862,7 +1874,7 @@ static int stm32_adc_probe(struct platform_device *pdev) + if (ret < 0) + return ret; + +- ret = stm32_adc_dma_request(indio_dev); ++ ret = stm32_adc_dma_request(dev, indio_dev); + if (ret < 0) + return ret; + +diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c +index 3ae0366a7b58..c2948defa785 100644 +--- a/drivers/iio/adc/stm32-dfsdm-adc.c ++++ b/drivers/iio/adc/stm32-dfsdm-adc.c +@@ -62,7 +62,7 @@ enum sd_converter_type { + + struct stm32_dfsdm_dev_data { + int type; +- int (*init)(struct iio_dev *indio_dev); ++ int (*init)(struct device *dev, struct iio_dev *indio_dev); + unsigned int num_channels; + const struct regmap_config *regmap_cfg; + }; +@@ -1359,13 +1359,18 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev) + } + } + +-static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev) ++static int stm32_dfsdm_dma_request(struct device *dev, ++ struct iio_dev *indio_dev) + { + struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); + +- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx"); +- if (!adc->dma_chan) +- return -EINVAL; ++ adc->dma_chan = dma_request_chan(dev, "rx"); ++ if (IS_ERR(adc->dma_chan)) { ++ int ret = PTR_ERR(adc->dma_chan); ++ ++ adc->dma_chan = NULL; ++ return ret; ++ } + + adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev, + DFSDM_DMA_BUFFER_SIZE, +@@ -1415,7 +1420,7 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, + &adc->dfsdm->ch_list[ch->channel]); + } + +-static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev) ++static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev) + { + struct iio_chan_spec *ch; + struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); +@@ -1442,10 +1447,10 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev) + indio_dev->num_channels = 1; + indio_dev->channels = ch; + +- return stm32_dfsdm_dma_request(indio_dev); ++ return stm32_dfsdm_dma_request(dev, indio_dev); + } + +-static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) ++static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev) + { + struct iio_chan_spec *ch; + struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); +@@ -1489,8 +1494,17 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) + init_completion(&adc->completion); + + /* Optionally request DMA */ +- if (stm32_dfsdm_dma_request(indio_dev)) { +- dev_dbg(&indio_dev->dev, "No DMA support\n"); ++ ret = stm32_dfsdm_dma_request(dev, indio_dev); ++ if (ret) { ++ if (ret != -ENODEV) { ++ if (ret != -EPROBE_DEFER) ++ dev_err(dev, ++ "DMA channel request failed with %d\n", ++ ret); ++ return ret; ++ } ++ ++ dev_dbg(dev, "No DMA support\n"); + return 0; + } + +@@ -1603,7 +1617,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev) + adc->dfsdm->fl_list[adc->fl_id].sync_mode = val; + + adc->dev_data = dev_data; +- ret = dev_data->init(iio); ++ ret = dev_data->init(dev, iio); + if (ret < 0) + return ret; + +diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c +index abe4b56c847c..8a8792010c20 100644 +--- a/drivers/iio/adc/ti-ads8344.c ++++ b/drivers/iio/adc/ti-ads8344.c +@@ -32,16 +32,17 @@ struct ads8344 { + u8 rx_buf[3]; + }; + +-#define ADS8344_VOLTAGE_CHANNEL(chan, si) \ ++#define ADS8344_VOLTAGE_CHANNEL(chan, addr) \ + { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .channel = chan, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ ++ .address = addr, \ + } + +-#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \ ++#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, addr) \ + { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ +@@ -50,6 +51,7 @@ struct ads8344 { + .differential = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ ++ .address = addr, \ + } + + static const struct iio_chan_spec ads8344_channels[] = { +@@ -105,7 +107,7 @@ static int ads8344_read_raw(struct iio_dev *iio, + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&adc->lock); +- *value = ads8344_adc_conversion(adc, channel->scan_index, ++ *value = ads8344_adc_conversion(adc, channel->address, + channel->differential); + mutex_unlock(&adc->lock); + if (*value < 0) +diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c +index 0ec4d2609ef9..364925d703db 100644 +--- a/drivers/iio/dac/vf610_dac.c ++++ b/drivers/iio/dac/vf610_dac.c +@@ -225,6 +225,7 @@ static int vf610_dac_probe(struct platform_device *pdev) + return 0; + + error_iio_device_register: ++ vf610_dac_exit(info); + clk_disable_unprepare(info->clk); + + return ret; +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index bc7771498342..32de8e7bb8b4 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -2386,6 +2386,7 @@ static void update_domain(struct protection_domain *domain) + + domain_flush_devices(domain); + domain_flush_tlb_pde(domain); ++ domain_flush_complete(domain); + } + + static int dir2prot(enum dma_data_direction direction) +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index ef14b00fa94b..135ae5222cf3 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -1331,8 +1331,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, + } + case IVHD_DEV_ACPI_HID: { + u16 devid; +- u8 hid[ACPIHID_HID_LEN] = {0}; +- u8 uid[ACPIHID_UID_LEN] = {0}; ++ u8 hid[ACPIHID_HID_LEN]; ++ u8 uid[ACPIHID_UID_LEN]; + int ret; + + if (h->type != 0x40) { +@@ -1349,6 +1349,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, + break; + } + ++ uid[0] = '\0'; + switch (e->uidf) { + case UID_NOT_PRESENT: + +@@ -1363,8 +1364,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, + break; + case UID_IS_CHARACTER: + +- memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1); +- uid[ACPIHID_UID_LEN - 1] = '\0'; ++ memcpy(uid, &e->uid, e->uidl); ++ uid[e->uidl] = '\0'; + + break; + default: +diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c +index d246d74ec3a5..fdcf2bcae164 100644 +--- a/drivers/ipack/carriers/tpci200.c ++++ b/drivers/ipack/carriers/tpci200.c +@@ -306,6 +306,7 @@ static int tpci200_register(struct tpci200_board *tpci200) + "(bn 0x%X, sn 0x%X) failed to map driver user space!", + tpci200->info->pdev->bus->number, + tpci200->info->pdev->devfn); ++ res = -ENOMEM; + goto out_release_mem8_space; + } + +diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c +index cb93a13e1777..97bed45360f0 100644 +--- a/drivers/media/platform/rcar_fdp1.c ++++ b/drivers/media/platform/rcar_fdp1.c +@@ -2369,7 +2369,7 @@ static int fdp1_probe(struct platform_device *pdev) + dprintk(fdp1, "FDP1 Version R-Car H3\n"); + break; + case FD1_IP_M3N: +- dprintk(fdp1, "FDP1 Version R-Car M3N\n"); ++ dprintk(fdp1, "FDP1 Version R-Car M3-N\n"); + break; + case FD1_IP_E3: + dprintk(fdp1, "FDP1 Version R-Car E3\n"); +diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c +index b4a66b64f742..1958833b3b74 100644 +--- a/drivers/misc/cardreader/rtsx_pcr.c ++++ b/drivers/misc/cardreader/rtsx_pcr.c +@@ -143,6 +143,9 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) + + rtsx_disable_aspm(pcr); + ++ /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */ ++ msleep(1); ++ + if (option->ltr_enabled) + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index 1e3edbbacb1e..c6b163060c76 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -266,6 +266,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid(dev, uuid); + __mei_me_cl_del(dev, me_cl); ++ mei_me_cl_put(me_cl); + up_write(&dev->me_clients_rwsem); + } + +@@ -287,6 +288,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); + __mei_me_cl_del(dev, me_cl); ++ mei_me_cl_put(me_cl); + up_write(&dev->me_clients_rwsem); + } + +diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c +index 6cc7ecb0c788..036b9452b19f 100644 +--- a/drivers/mtd/mtdcore.c ++++ b/drivers/mtd/mtdcore.c +@@ -563,7 +563,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) + + config.id = -1; + config.dev = &mtd->dev; +- config.name = mtd->name; ++ config.name = dev_name(&mtd->dev); + config.owner = THIS_MODULE; + config.reg_read = mtd_nvmem_reg_read; + config.size = mtd->size; +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c +index 8dda51bbdd11..0d21c68bfe24 100644 +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -1049,6 +1049,10 @@ static int spinand_init(struct spinand_device *spinand) + + mtd->oobavail = ret; + ++ /* Propagate ECC information to mtd_info */ ++ mtd->ecc_strength = nand->eccreq.strength; ++ mtd->ecc_step_size = nand->eccreq.step_size; ++ + return 0; + + err_cleanup_nanddev: +diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c +index a1dff92ceedf..8a83072401a7 100644 +--- a/drivers/mtd/ubi/debug.c ++++ b/drivers/mtd/ubi/debug.c +@@ -392,9 +392,6 @@ static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos) + { + struct ubi_device *ubi = s->private; + +- if (*pos == 0) +- return SEQ_START_TOKEN; +- + if (*pos < ubi->peb_count) + return pos; + +@@ -408,8 +405,6 @@ static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos) + { + struct ubi_device *ubi = s->private; + +- if (v == SEQ_START_TOKEN) +- return pos; + (*pos)++; + + if (*pos < ubi->peb_count) +@@ -431,11 +426,8 @@ static int eraseblk_count_seq_show(struct seq_file *s, void *iter) + int err; + + /* If this is the start, print a header */ +- if (iter == SEQ_START_TOKEN) { +- seq_puts(s, +- "physical_block_number\terase_count\tblock_status\tread_status\n"); +- return 0; +- } ++ if (*block_number == 0) ++ seq_puts(s, "physical_block_number\terase_count\n"); + + err = ubi_io_is_bad(ubi, *block_number); + if (err) +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h +index dc02950a96b8..28412f11a9ca 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h +@@ -68,7 +68,7 @@ + * 16kB. + */ + #if PAGE_SIZE > SZ_16K +-#define ENA_PAGE_SIZE SZ_16K ++#define ENA_PAGE_SIZE (_AC(SZ_16K, UL)) + #else + #define ENA_PAGE_SIZE PAGE_SIZE + #endif +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +index 74b9f3f1da81..0e8264c0b308 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +@@ -56,7 +56,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = { + { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, }, + { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, }, + +- { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, ++ { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, }, + { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, }, + { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, }, +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index e1ab2feeae53..aaa03ce5796f 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -2086,7 +2086,8 @@ static void __ibmvnic_reset(struct work_struct *work) + rc = do_hard_reset(adapter, rwi, reset_state); + rtnl_unlock(); + } +- } else { ++ } else if (!(rwi->reset_reason == VNIC_RESET_FATAL && ++ adapter->from_passive_init)) { + rc = do_reset(adapter, rwi, reset_state); + } + kfree(rwi); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 89a6ae2b17e3..1623516efb17 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -3832,7 +3832,7 @@ static int stmmac_set_features(struct net_device *netdev, + /** + * stmmac_interrupt - main ISR + * @irq: interrupt number. +- * @dev_id: to pass the net device pointer. ++ * @dev_id: to pass the net device pointer (must be valid). + * Description: this is the main driver interrupt service routine. + * It can call: + * o DMA service routine (to manage incoming frame reception and transmission +@@ -3856,11 +3856,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) + if (priv->irq_wake) + pm_wakeup_event(priv->device, 0); + +- if (unlikely(!dev)) { +- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); +- return IRQ_NONE; +- } +- + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index 3a53d222bfcc..d89ec99abcd6 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -1172,11 +1172,11 @@ out_unlock: + static struct genl_family gtp_genl_family; + + static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, +- u32 type, struct pdp_ctx *pctx) ++ int flags, u32 type, struct pdp_ctx *pctx) + { + void *genlh; + +- genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0, ++ genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags, + type); + if (genlh == NULL) + goto nlmsg_failure; +@@ -1230,8 +1230,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) + goto err_unlock; + } + +- err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, +- info->snd_seq, info->nlhdr->nlmsg_type, pctx); ++ err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, ++ 0, info->nlhdr->nlmsg_type, pctx); + if (err < 0) + goto err_unlock_free; + +@@ -1274,6 +1274,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, + gtp_genl_fill_info(skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, ++ NLM_F_MULTI, + cb->nlh->nlmsg_type, pctx)) { + cb->args[0] = i; + cb->args[1] = j; +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c +index b361c73636a4..0d42477946f3 100644 +--- a/drivers/platform/x86/asus-nb-wmi.c ++++ b/drivers/platform/x86/asus-nb-wmi.c +@@ -514,9 +514,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = { + .detect_quirks = asus_nb_wmi_quirks, + }; + ++static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = { ++ { ++ /* ++ * asus-nb-wm adds no functionality. The T100TA has a detachable ++ * USB kbd, so no hotkeys and it has no WMI rfkill; and loading ++ * asus-nb-wm causes the camera LED to turn and _stay_ on. ++ */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), ++ }, ++ }, ++ { ++ /* The Asus T200TA has the same issue as the T100TA */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"), ++ }, ++ }, ++ {} /* Terminating entry */ ++}; + + static int __init asus_nb_wmi_init(void) + { ++ if (dmi_check_system(asus_nb_wmi_blacklist)) ++ return -ENODEV; ++ + return asus_wmi_register_driver(&asus_nb_wmi_driver); + } + +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c +index 8155f59ece38..10af330153b5 100644 +--- a/drivers/rapidio/devices/rio_mport_cdev.c ++++ b/drivers/rapidio/devices/rio_mport_cdev.c +@@ -877,6 +877,11 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, + rmcd_error("pinned %ld out of %ld pages", + pinned, nr_pages); + ret = -EFAULT; ++ /* ++ * Set nr_pages up to mean "how many pages to unpin, in ++ * the error handler: ++ */ ++ nr_pages = pinned; + goto err_pg; + } + +diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c +index 7f66a7783209..59f0f1030c54 100644 +--- a/drivers/scsi/ibmvscsi/ibmvscsi.c ++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c +@@ -2320,16 +2320,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) + static int ibmvscsi_remove(struct vio_dev *vdev) + { + struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); +- unsigned long flags; + + srp_remove_host(hostdata->host); + scsi_remove_host(hostdata->host); + + purge_requests(hostdata, DID_ERROR); +- +- spin_lock_irqsave(hostdata->host->host_lock, flags); + release_event_pool(&hostdata->pool, hostdata); +- spin_unlock_irqrestore(hostdata->host->host_lock, flags); + + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, + max_events); +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c +index 1fbc5c6c6c14..bed7e8637217 100644 +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -1775,9 +1775,6 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, + return -EINVAL; + } + +- ql_log(ql_log_info, vha, 0x70d6, +- "port speed:%d\n", ha->link_data_rate); +- + return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]); + } + +@@ -2926,11 +2923,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) + test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) + msleep(1000); + +- qla_nvme_delete(vha); + + qla24xx_disable_vp(vha); + qla2x00_wait_for_sess_deletion(vha); + ++ qla_nvme_delete(vha); + vha->flags.delete_progress = 1; + + qlt_remove_target(ha, vha); +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c +index 1ef8907314e5..62a16463f025 100644 +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -3117,7 +3117,7 @@ qla24xx_abort_command(srb_t *sp) + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, + "Entered %s.\n", __func__); + +- if (vha->flags.qpairs_available && sp->qpair) ++ if (sp->qpair) + req = sp->qpair->req; + else + return QLA_FUNCTION_FAILED; +diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c +index 55c51143bb09..4ffb334cd5cd 100644 +--- a/drivers/staging/greybus/uart.c ++++ b/drivers/staging/greybus/uart.c +@@ -537,9 +537,9 @@ static void gb_tty_set_termios(struct tty_struct *tty, + } + + if (C_CRTSCTS(tty) && C_BAUD(tty) != B0) +- newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN; ++ newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN; + else +- newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN; ++ newline.flow_control = 0; + + if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) { + memcpy(&gb_tty->line_coding, &newline, sizeof(newline)); +diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c +index 4b25a3a314ed..ed404355ea4c 100644 +--- a/drivers/staging/iio/resolver/ad2s1210.c ++++ b/drivers/staging/iio/resolver/ad2s1210.c +@@ -130,17 +130,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) + static int ad2s1210_config_read(struct ad2s1210_state *st, + unsigned char address) + { +- struct spi_transfer xfer = { +- .len = 2, +- .rx_buf = st->rx, +- .tx_buf = st->tx, ++ struct spi_transfer xfers[] = { ++ { ++ .len = 1, ++ .rx_buf = &st->rx[0], ++ .tx_buf = &st->tx[0], ++ .cs_change = 1, ++ }, { ++ .len = 1, ++ .rx_buf = &st->rx[1], ++ .tx_buf = &st->tx[1], ++ }, + }; + int ret = 0; + + ad2s1210_set_mode(MOD_CONFIG, st); + st->tx[0] = address | AD2S1210_MSB_IS_HIGH; + st->tx[1] = AD2S1210_REG_FAULT; +- ret = spi_sync_transfer(st->sdev, &xfer, 1); ++ ret = spi_sync_transfer(st->sdev, xfers, 2); + if (ret < 0) + return ret; + +diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c +index 871441658f0e..9c67852b19e1 100644 +--- a/drivers/staging/kpc2000/kpc2000/core.c ++++ b/drivers/staging/kpc2000/kpc2000/core.c +@@ -298,7 +298,6 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, + { + int err = 0; + struct kp2000_device *pcard; +- int rv; + unsigned long reg_bar_phys_addr; + unsigned long reg_bar_phys_len; + unsigned long dma_bar_phys_addr; +@@ -445,11 +444,11 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, + if (err < 0) + goto err_release_dma; + +- rv = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED, +- pcard->name, pcard); +- if (rv) { ++ err = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED, ++ pcard->name, pcard); ++ if (err) { + dev_err(&pcard->pdev->dev, +- "%s: failed to request_irq: %d\n", __func__, rv); ++ "%s: failed to request_irq: %d\n", __func__, err); + goto err_disable_msi; + } + +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index d542e26ca56a..7c78a5d02c08 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -3336,6 +3336,7 @@ static void target_tmr_work(struct work_struct *work) + + cmd->se_tfo->queue_tm_rsp(cmd); + ++ transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + +diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c +index d5f81b98e4d7..38133eba83a8 100644 +--- a/drivers/tty/serial/sifive.c ++++ b/drivers/tty/serial/sifive.c +@@ -840,6 +840,7 @@ console_initcall(sifive_console_init); + + static void __ssp_add_console_port(struct sifive_serial_port *ssp) + { ++ spin_lock_init(&ssp->port.lock); + sifive_serial_console_ports[ssp->port.line] = ssp; + } + +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index 02eaac7e1e34..a1ac2f0723b0 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -1143,11 +1143,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, + + if (usb_endpoint_out(epaddr)) { + ep = dev->ep_out[epnum]; +- if (reset_hardware) ++ if (reset_hardware && epnum != 0) + dev->ep_out[epnum] = NULL; + } else { + ep = dev->ep_in[epnum]; +- if (reset_hardware) ++ if (reset_hardware && epnum != 0) + dev->ep_in[epnum] = NULL; + } + if (ep) { +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c +index 6c089f655707..ca68a27b98ed 100644 +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -181,14 +181,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + break; + } + +- vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); +- added = true; +- +- /* Deliver to monitoring devices all correctly transmitted +- * packets. ++ /* Deliver to monitoring devices all packets that we ++ * will transmit. + */ + virtio_transport_deliver_tap_pkt(pkt); + ++ vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); ++ added = true; ++ + pkt->off += payload_len; + total_len += payload_len; + +diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c +index e1b9ed679045..02e976ca5732 100644 +--- a/fs/afs/fs_probe.c ++++ b/fs/afs/fs_probe.c +@@ -32,9 +32,8 @@ void afs_fileserver_probe_result(struct afs_call *call) + struct afs_server *server = call->server; + unsigned int server_index = call->server_index; + unsigned int index = call->addr_ix; +- unsigned int rtt = UINT_MAX; ++ unsigned int rtt_us; + bool have_result = false; +- u64 _rtt; + int ret = call->error; + + _enter("%pU,%u", &server->uuid, index); +@@ -93,15 +92,9 @@ responded: + } + } + +- /* Get the RTT and scale it to fit into a 32-bit value that represents +- * over a minute of time so that we can access it with one instruction +- * on a 32-bit system. +- */ +- _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); +- _rtt /= 64; +- rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; +- if (rtt < server->probe.rtt) { +- server->probe.rtt = rtt; ++ rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); ++ if (rtt_us < server->probe.rtt) { ++ server->probe.rtt = rtt_us; + alist->preferred = index; + have_result = true; + } +@@ -113,8 +106,7 @@ out: + spin_unlock(&server->probe_lock); + + _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", +- server_index, index, &alist->addrs[index].transport, +- (unsigned int)rtt, ret); ++ server_index, index, &alist->addrs[index].transport, rtt_us, ret); + + have_result |= afs_fs_probe_done(server); + if (have_result) { +diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c +index 6805a469d13c..0a4fed9e706b 100644 +--- a/fs/afs/fsclient.c ++++ b/fs/afs/fsclient.c +@@ -385,8 +385,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) + ASSERTCMP(req->offset, <=, PAGE_SIZE); + if (req->offset == PAGE_SIZE) { + req->offset = 0; +- if (req->page_done) +- req->page_done(req); + req->index++; + if (req->remain > 0) + goto begin_page; +@@ -440,11 +438,13 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) + if (req->offset < PAGE_SIZE) + zero_user_segment(req->pages[req->index], + req->offset, PAGE_SIZE); +- if (req->page_done) +- req->page_done(req); + req->offset = 0; + } + ++ if (req->page_done) ++ for (req->index = 0; req->index < req->nr_pages; req->index++) ++ req->page_done(req); ++ + _leave(" = 0 [done]"); + return 0; + } +diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c +index 858498cc1b05..e3aa013c2177 100644 +--- a/fs/afs/vl_probe.c ++++ b/fs/afs/vl_probe.c +@@ -31,10 +31,9 @@ void afs_vlserver_probe_result(struct afs_call *call) + struct afs_addr_list *alist = call->alist; + struct afs_vlserver *server = call->vlserver; + unsigned int server_index = call->server_index; ++ unsigned int rtt_us = 0; + unsigned int index = call->addr_ix; +- unsigned int rtt = UINT_MAX; + bool have_result = false; +- u64 _rtt; + int ret = call->error; + + _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code); +@@ -93,15 +92,9 @@ responded: + } + } + +- /* Get the RTT and scale it to fit into a 32-bit value that represents +- * over a minute of time so that we can access it with one instruction +- * on a 32-bit system. +- */ +- _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); +- _rtt /= 64; +- rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; +- if (rtt < server->probe.rtt) { +- server->probe.rtt = rtt; ++ rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); ++ if (rtt_us < server->probe.rtt) { ++ server->probe.rtt = rtt_us; + alist->preferred = index; + have_result = true; + } +@@ -113,8 +106,7 @@ out: + spin_unlock(&server->probe_lock); + + _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", +- server_index, index, &alist->addrs[index].transport, +- (unsigned int)rtt, ret); ++ server_index, index, &alist->addrs[index].transport, rtt_us, ret); + + have_result |= afs_vl_probe_done(server); + if (have_result) { +diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c +index 39230880f372..8af7f093305d 100644 +--- a/fs/afs/yfsclient.c ++++ b/fs/afs/yfsclient.c +@@ -497,8 +497,6 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) + ASSERTCMP(req->offset, <=, PAGE_SIZE); + if (req->offset == PAGE_SIZE) { + req->offset = 0; +- if (req->page_done) +- req->page_done(req); + req->index++; + if (req->remain > 0) + goto begin_page; +@@ -556,11 +554,13 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) + if (req->offset < PAGE_SIZE) + zero_user_segment(req->pages[req->index], + req->offset, PAGE_SIZE); +- if (req->page_done) +- req->page_done(req); + req->offset = 0; + } + ++ if (req->page_done) ++ for (req->index = 0; req->index < req->nr_pages; req->index++) ++ req->page_done(req); ++ + _leave(" = 0 [done]"); + return 0; + } +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c +index 703945cce0e5..2d602c2b0ff6 100644 +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -3693,6 +3693,7 @@ retry: + WARN_ON(1); + tsession = NULL; + target = -1; ++ mutex_lock(&session->s_mutex); + } + goto retry; + +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c +index cf7b7e1d5bd7..cb733652ecca 100644 +--- a/fs/configfs/dir.c ++++ b/fs/configfs/dir.c +@@ -1519,6 +1519,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) + spin_lock(&configfs_dirent_lock); + configfs_detach_rollback(dentry); + spin_unlock(&configfs_dirent_lock); ++ config_item_put(parent_item); + return -EINTR; + } + frag->frag_dead = true; +diff --git a/fs/file.c b/fs/file.c +index 3da91a112bab..e5d328335f88 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -70,7 +70,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, + */ + static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) + { +- unsigned int cpy, set; ++ size_t cpy, set; + + BUG_ON(nfdt->max_fds < ofdt->max_fds); + +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index 21820a5b388f..0290a22ebccf 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -639,9 +639,6 @@ __acquires(&gl->gl_lockref.lock) + goto out_unlock; + if (nonblock) + goto out_sched; +- smp_mb(); +- if (atomic_read(&gl->gl_revokes) != 0) +- goto out_sched; + set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); + GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); + gl->gl_target = gl->gl_demote_state; +diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c +index 8cdbd53d780c..f985a3fbbb36 100644 +--- a/fs/ubifs/auth.c ++++ b/fs/ubifs/auth.c +@@ -79,13 +79,9 @@ int ubifs_prepare_auth_node(struct ubifs_info *c, void *node, + struct shash_desc *inhash) + { + struct ubifs_auth_node *auth = node; +- u8 *hash; ++ u8 hash[UBIFS_HASH_ARR_SZ]; + int err; + +- hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS); +- if (!hash) +- return -ENOMEM; +- + { + SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm); + +@@ -94,21 +90,16 @@ int ubifs_prepare_auth_node(struct ubifs_info *c, void *node, + + err = crypto_shash_final(hash_desc, hash); + if (err) +- goto out; ++ return err; + } + + err = ubifs_hash_calc_hmac(c, hash, auth->hmac); + if (err) +- goto out; ++ return err; + + auth->ch.node_type = UBIFS_AUTH_NODE; + ubifs_prepare_node(c, auth, ubifs_auth_node_sz(c), 0); +- +- err = 0; +-out: +- kfree(hash); +- +- return err; ++ return 0; + } + + static struct shash_desc *ubifs_get_desc(const struct ubifs_info *c, +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c +index a771273fba7e..8dada89bbe4d 100644 +--- a/fs/ubifs/file.c ++++ b/fs/ubifs/file.c +@@ -1375,7 +1375,6 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time, + struct ubifs_info *c = inode->i_sb->s_fs_info; + struct ubifs_budget_req req = { .dirtied_ino = 1, + .dirtied_ino_d = ALIGN(ui->data_len, 8) }; +- int iflags = I_DIRTY_TIME; + int err, release; + + if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) +@@ -1393,11 +1392,8 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time, + if (flags & S_MTIME) + inode->i_mtime = *time; + +- if (!(inode->i_sb->s_flags & SB_LAZYTIME)) +- iflags |= I_DIRTY_SYNC; +- + release = ui->dirty; +- __mark_inode_dirty(inode, iflags); ++ __mark_inode_dirty(inode, I_DIRTY_SYNC); + mutex_unlock(&ui->ui_mutex); + if (release) + ubifs_release_budget(c, &req); +diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c +index b28ac4dfb407..01fcf7975047 100644 +--- a/fs/ubifs/replay.c ++++ b/fs/ubifs/replay.c +@@ -601,18 +601,12 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, + struct ubifs_scan_node *snod; + int n_nodes = 0; + int err; +- u8 *hash, *hmac; ++ u8 hash[UBIFS_HASH_ARR_SZ]; ++ u8 hmac[UBIFS_HMAC_ARR_SZ]; + + if (!ubifs_authenticated(c)) + return sleb->nodes_cnt; + +- hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS); +- hmac = kmalloc(c->hmac_desc_len, GFP_NOFS); +- if (!hash || !hmac) { +- err = -ENOMEM; +- goto out; +- } +- + list_for_each_entry(snod, &sleb->nodes, list) { + + n_nodes++; +@@ -662,9 +656,6 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, + err = 0; + } + out: +- kfree(hash); +- kfree(hmac); +- + return err ? err : n_nodes - n_not_auth; + } + +diff --git a/include/linux/filter.h b/include/linux/filter.h +index 0367a75f873b..3bbc72dbc69e 100644 +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -770,8 +770,12 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) + + static inline void bpf_prog_lock_ro(struct bpf_prog *fp) + { +- set_vm_flush_reset_perms(fp); +- set_memory_ro((unsigned long)fp, fp->pages); ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON ++ if (!fp->jited) { ++ set_vm_flush_reset_perms(fp); ++ set_memory_ro((unsigned long)fp, fp->pages); ++ } ++#endif + } + + static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h +index 04e97bab6f28..ab988940bf04 100644 +--- a/include/net/af_rxrpc.h ++++ b/include/net/af_rxrpc.h +@@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, + void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); + void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, + struct sockaddr_rxrpc *); +-u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *); ++u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *); + int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, + rxrpc_user_attach_call_t, unsigned long, gfp_t, + unsigned int); +diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h +index 2ab668461463..f68bc373544a 100644 +--- a/include/net/drop_monitor.h ++++ b/include/net/drop_monitor.h +@@ -19,7 +19,7 @@ struct net_dm_hw_metadata { + struct net_device *input_dev; + }; + +-#if IS_ENABLED(CONFIG_NET_DROP_MONITOR) ++#if IS_REACHABLE(CONFIG_NET_DROP_MONITOR) + void net_dm_hw_report(struct sk_buff *skb, + const struct net_dm_hw_metadata *hw_metadata); + #else +diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h +index 5141f8ffbb12..4c1b9bebbd60 100644 +--- a/include/sound/hda_regmap.h ++++ b/include/sound/hda_regmap.h +@@ -24,6 +24,9 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, + unsigned int val); + int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg, + unsigned int mask, unsigned int val); ++int snd_hdac_regmap_update_raw_once(struct hdac_device *codec, unsigned int reg, ++ unsigned int mask, unsigned int val); ++void snd_hdac_regmap_sync(struct hdac_device *codec); + + /** + * snd_hdac_regmap_encode_verb - encode the verb to a pseudo register +diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h +index fb9dce4c6928..44e57bcc4a57 100644 +--- a/include/sound/hdaudio.h ++++ b/include/sound/hdaudio.h +@@ -87,6 +87,7 @@ struct hdac_device { + + /* regmap */ + struct regmap *regmap; ++ struct mutex regmap_lock; + struct snd_array vendor_verbs; + bool lazy_cache:1; /* don't wake up for writes */ + bool caps_overwriting:1; /* caps overwrite being in process */ +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h +index 191fe447f990..ba9efdc848f9 100644 +--- a/include/trace/events/rxrpc.h ++++ b/include/trace/events/rxrpc.h +@@ -1112,18 +1112,17 @@ TRACE_EVENT(rxrpc_rtt_tx, + TRACE_EVENT(rxrpc_rtt_rx, + TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, + rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, +- s64 rtt, u8 nr, s64 avg), ++ u32 rtt, u32 rto), + +- TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg), ++ TP_ARGS(call, why, send_serial, resp_serial, rtt, rto), + + TP_STRUCT__entry( + __field(unsigned int, call ) + __field(enum rxrpc_rtt_rx_trace, why ) +- __field(u8, nr ) + __field(rxrpc_serial_t, send_serial ) + __field(rxrpc_serial_t, resp_serial ) +- __field(s64, rtt ) +- __field(u64, avg ) ++ __field(u32, rtt ) ++ __field(u32, rto ) + ), + + TP_fast_assign( +@@ -1132,18 +1131,16 @@ TRACE_EVENT(rxrpc_rtt_rx, + __entry->send_serial = send_serial; + __entry->resp_serial = resp_serial; + __entry->rtt = rtt; +- __entry->nr = nr; +- __entry->avg = avg; ++ __entry->rto = rto; + ), + +- TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld", ++ TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u", + __entry->call, + __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), + __entry->send_serial, + __entry->resp_serial, + __entry->rtt, +- __entry->nr, +- __entry->avg) ++ __entry->rto) + ); + + TRACE_EVENT(rxrpc_timer, +@@ -1544,6 +1541,41 @@ TRACE_EVENT(rxrpc_notify_socket, + __entry->serial) + ); + ++TRACE_EVENT(rxrpc_rx_discard_ack, ++ TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, ++ rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first, ++ rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev), ++ ++ TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first, ++ prev_pkt, call_ackr_prev), ++ ++ TP_STRUCT__entry( ++ __field(unsigned int, debug_id ) ++ __field(rxrpc_serial_t, serial ) ++ __field(rxrpc_seq_t, first_soft_ack) ++ __field(rxrpc_seq_t, call_ackr_first) ++ __field(rxrpc_seq_t, prev_pkt) ++ __field(rxrpc_seq_t, call_ackr_prev) ++ ), ++ ++ TP_fast_assign( ++ __entry->debug_id = debug_id; ++ __entry->serial = serial; ++ __entry->first_soft_ack = first_soft_ack; ++ __entry->call_ackr_first = call_ackr_first; ++ __entry->prev_pkt = prev_pkt; ++ __entry->call_ackr_prev = call_ackr_prev; ++ ), ++ ++ TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x", ++ __entry->debug_id, ++ __entry->serial, ++ __entry->first_soft_ack, ++ __entry->call_ackr_first, ++ __entry->prev_pkt, ++ __entry->call_ackr_prev) ++ ); ++ + #endif /* _TRACE_RXRPC_H */ + + /* This part must be outside protection */ +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index eeaf34d65742..193b6ab74d7f 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -5232,32 +5232,38 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) + cfs_rq = cfs_rq_of(se); + enqueue_entity(cfs_rq, se, flags); + +- /* +- * end evaluation on encountering a throttled cfs_rq +- * +- * note: in the case of encountering a throttled cfs_rq we will +- * post the final h_nr_running increment below. +- */ +- if (cfs_rq_throttled(cfs_rq)) +- break; + cfs_rq->h_nr_running++; + cfs_rq->idle_h_nr_running += idle_h_nr_running; + ++ /* end evaluation on encountering a throttled cfs_rq */ ++ if (cfs_rq_throttled(cfs_rq)) ++ goto enqueue_throttle; ++ + flags = ENQUEUE_WAKEUP; + } + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); ++ ++ update_load_avg(cfs_rq, se, UPDATE_TG); ++ update_cfs_group(se); ++ + cfs_rq->h_nr_running++; + cfs_rq->idle_h_nr_running += idle_h_nr_running; + ++ /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) +- break; ++ goto enqueue_throttle; + +- update_load_avg(cfs_rq, se, UPDATE_TG); +- update_cfs_group(se); ++ /* ++ * One parent has been throttled and cfs_rq removed from the ++ * list. Add it back to not break the leaf list. ++ */ ++ if (throttled_hierarchy(cfs_rq)) ++ list_add_leaf_cfs_rq(cfs_rq); + } + ++enqueue_throttle: + if (!se) { + add_nr_running(rq, 1); + /* +@@ -5317,17 +5323,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + cfs_rq = cfs_rq_of(se); + dequeue_entity(cfs_rq, se, flags); + +- /* +- * end evaluation on encountering a throttled cfs_rq +- * +- * note: in the case of encountering a throttled cfs_rq we will +- * post the final h_nr_running decrement below. +- */ +- if (cfs_rq_throttled(cfs_rq)) +- break; + cfs_rq->h_nr_running--; + cfs_rq->idle_h_nr_running -= idle_h_nr_running; + ++ /* end evaluation on encountering a throttled cfs_rq */ ++ if (cfs_rq_throttled(cfs_rq)) ++ goto dequeue_throttle; ++ + /* Don't dequeue parent if it has other entities besides us */ + if (cfs_rq->load.weight) { + /* Avoid re-evaluating load for this entity: */ +@@ -5345,16 +5347,20 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); ++ ++ update_load_avg(cfs_rq, se, UPDATE_TG); ++ update_cfs_group(se); ++ + cfs_rq->h_nr_running--; + cfs_rq->idle_h_nr_running -= idle_h_nr_running; + ++ /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) +- break; ++ goto dequeue_throttle; + +- update_load_avg(cfs_rq, se, UPDATE_TG); +- update_cfs_group(se); + } + ++dequeue_throttle: + if (!se) + sub_nr_running(rq, 1); + +diff --git a/lib/test_printf.c b/lib/test_printf.c +index 5d94cbff2120..d4b711b53942 100644 +--- a/lib/test_printf.c ++++ b/lib/test_printf.c +@@ -212,6 +212,7 @@ test_string(void) + #define PTR_STR "ffff0123456789ab" + #define PTR_VAL_NO_CRNG "(____ptrval____)" + #define ZEROS "00000000" /* hex 32 zero bits */ ++#define ONES "ffffffff" /* hex 32 one bits */ + + static int __init + plain_format(void) +@@ -243,6 +244,7 @@ plain_format(void) + #define PTR_STR "456789ab" + #define PTR_VAL_NO_CRNG "(ptrval)" + #define ZEROS "" ++#define ONES "" + + static int __init + plain_format(void) +@@ -328,14 +330,28 @@ test_hashed(const char *fmt, const void *p) + test(buf, fmt, p); + } + ++/* ++ * NULL pointers aren't hashed. ++ */ + static void __init + null_pointer(void) + { +- test_hashed("%p", NULL); ++ test(ZEROS "00000000", "%p", NULL); + test(ZEROS "00000000", "%px", NULL); + test("(null)", "%pE", NULL); + } + ++/* ++ * Error pointers aren't hashed. ++ */ ++static void __init ++error_pointer(void) ++{ ++ test(ONES "fffffff5", "%p", ERR_PTR(-11)); ++ test(ONES "fffffff5", "%px", ERR_PTR(-11)); ++ test("(efault)", "%pE", ERR_PTR(-11)); ++} ++ + #define PTR_INVALID ((void *)0x000000ab) + + static void __init +@@ -598,6 +614,7 @@ test_pointer(void) + { + plain(); + null_pointer(); ++ error_pointer(); + invalid_pointer(); + symbol_ptr(); + kernel_ptr(); +diff --git a/lib/vsprintf.c b/lib/vsprintf.c +index e78017a3e1bd..fb4af73142b4 100644 +--- a/lib/vsprintf.c ++++ b/lib/vsprintf.c +@@ -746,6 +746,13 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr, + const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)"; + unsigned long hashval; + ++ /* ++ * Print the real pointer value for NULL and error pointers, ++ * as they are not actual addresses. ++ */ ++ if (IS_ERR_OR_NULL(ptr)) ++ return pointer_string(buf, end, ptr, spec); ++ + /* When debugging early boot use non-cryptographically secure hash. */ + if (unlikely(debug_boot_weak_hash)) { + hashval = hash_long((unsigned long)ptr, 32); +diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile +index 08b43de2383b..f36ffc090f5f 100644 +--- a/mm/kasan/Makefile ++++ b/mm/kasan/Makefile +@@ -14,10 +14,10 @@ CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE) + # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 + # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 + +-CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) +-CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) +-CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) +-CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) ++CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING ++CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING ++CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING ++CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING + + obj-$(CONFIG_KASAN) := common.o init.o report.o + obj-$(CONFIG_KASAN_GENERIC) += generic.o generic_report.o quarantine.o +diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c +index 616f9dd82d12..76a80033e0b7 100644 +--- a/mm/kasan/generic.c ++++ b/mm/kasan/generic.c +@@ -15,7 +15,6 @@ + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +-#define DISABLE_BRANCH_PROFILING + + #include + #include +diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c +index 0e987c9ca052..caf4efd9888c 100644 +--- a/mm/kasan/tags.c ++++ b/mm/kasan/tags.c +@@ -12,7 +12,6 @@ + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +-#define DISABLE_BRANCH_PROFILING + + #include + #include +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c +index 96b2566c298d..e3bdd859c895 100644 +--- a/net/core/flow_dissector.c ++++ b/net/core/flow_dissector.c +@@ -129,12 +129,10 @@ int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, + return 0; + } + +-int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) ++static int flow_dissector_bpf_prog_detach(struct net *net) + { + struct bpf_prog *attached; +- struct net *net; + +- net = current->nsproxy->net_ns; + mutex_lock(&flow_dissector_mutex); + attached = rcu_dereference_protected(net->flow_dissector_prog, + lockdep_is_held(&flow_dissector_mutex)); +@@ -169,6 +167,24 @@ static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff, + return 0; + } + ++int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) ++{ ++ return flow_dissector_bpf_prog_detach(current->nsproxy->net_ns); ++} ++ ++static void __net_exit flow_dissector_pernet_pre_exit(struct net *net) ++{ ++ /* We're not racing with attach/detach because there are no ++ * references to netns left when pre_exit gets called. ++ */ ++ if (rcu_access_pointer(net->flow_dissector_prog)) ++ flow_dissector_bpf_prog_detach(net); ++} ++ ++static struct pernet_operations flow_dissector_pernet_ops __net_initdata = { ++ .pre_exit = flow_dissector_pernet_pre_exit, ++}; ++ + /** + * __skb_flow_get_ports - extract the upper layer ports and return them + * @skb: sk_buff to extract the ports from +@@ -1759,7 +1775,7 @@ static int __init init_default_flow_dissectors(void) + skb_flow_dissector_init(&flow_keys_basic_dissector, + flow_keys_basic_dissector_keys, + ARRAY_SIZE(flow_keys_basic_dissector_keys)); +- return 0; +-} + ++ return register_pernet_subsys(&flow_dissector_pernet_ops); ++} + core_initcall(init_default_flow_dissectors); +diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile +index 6ffb7e9887ce..ddd0f95713a9 100644 +--- a/net/rxrpc/Makefile ++++ b/net/rxrpc/Makefile +@@ -25,6 +25,7 @@ rxrpc-y := \ + peer_event.o \ + peer_object.o \ + recvmsg.o \ ++ rtt.o \ + security.o \ + sendmsg.o \ + skbuff.o \ +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h +index 3eb1ab40ca5c..9fe264bec70c 100644 +--- a/net/rxrpc/ar-internal.h ++++ b/net/rxrpc/ar-internal.h +@@ -7,6 +7,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -311,11 +312,14 @@ struct rxrpc_peer { + #define RXRPC_RTT_CACHE_SIZE 32 + spinlock_t rtt_input_lock; /* RTT lock for input routine */ + ktime_t rtt_last_req; /* Time of last RTT request */ +- u64 rtt; /* Current RTT estimate (in nS) */ +- u64 rtt_sum; /* Sum of cache contents */ +- u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */ +- u8 rtt_cursor; /* next entry at which to insert */ +- u8 rtt_usage; /* amount of cache actually used */ ++ unsigned int rtt_count; /* Number of samples we've got */ ++ ++ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ ++ u32 mdev_us; /* medium deviation */ ++ u32 mdev_max_us; /* maximal mdev for the last rtt period */ ++ u32 rttvar_us; /* smoothed mdev_max */ ++ u32 rto_j; /* Retransmission timeout in jiffies */ ++ u8 backoff; /* Backoff timeout */ + + u8 cong_cwnd; /* Congestion window size */ + }; +@@ -1041,7 +1045,6 @@ extern unsigned long rxrpc_idle_ack_delay; + extern unsigned int rxrpc_rx_window_size; + extern unsigned int rxrpc_rx_mtu; + extern unsigned int rxrpc_rx_jumbo_max; +-extern unsigned long rxrpc_resend_timeout; + + extern const s8 rxrpc_ack_priority[]; + +@@ -1069,8 +1072,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *); + * peer_event.c + */ + void rxrpc_error_report(struct sock *); +-void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, +- rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); + void rxrpc_peer_keepalive_worker(struct work_struct *); + + /* +@@ -1102,6 +1103,14 @@ extern const struct seq_operations rxrpc_peer_seq_ops; + void rxrpc_notify_socket(struct rxrpc_call *); + int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); + ++/* ++ * rtt.c ++ */ ++void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, ++ rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); ++unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool); ++void rxrpc_peer_init_rtt(struct rxrpc_peer *); ++ + /* + * rxkad.c + */ +diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c +index 70e44abf106c..b7611cc159e5 100644 +--- a/net/rxrpc/call_accept.c ++++ b/net/rxrpc/call_accept.c +@@ -248,7 +248,7 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + ktime_t now = skb->tstamp; + +- if (call->peer->rtt_usage < 3 || ++ if (call->peer->rtt_count < 3 || + ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) + rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, + true, true, +diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c +index cedbbb3a7c2e..2a65ac41055f 100644 +--- a/net/rxrpc/call_event.c ++++ b/net/rxrpc/call_event.c +@@ -111,8 +111,8 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, + } else { + unsigned long now = jiffies, ack_at; + +- if (call->peer->rtt_usage > 0) +- ack_at = nsecs_to_jiffies(call->peer->rtt); ++ if (call->peer->srtt_us != 0) ++ ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3); + else + ack_at = expiry; + +@@ -157,24 +157,18 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call) + static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) + { + struct sk_buff *skb; +- unsigned long resend_at; ++ unsigned long resend_at, rto_j; + rxrpc_seq_t cursor, seq, top; +- ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo; ++ ktime_t now, max_age, oldest, ack_ts; + int ix; + u8 annotation, anno_type, retrans = 0, unacked = 0; + + _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); + +- if (call->peer->rtt_usage > 1) +- timeout = ns_to_ktime(call->peer->rtt * 3 / 2); +- else +- timeout = ms_to_ktime(rxrpc_resend_timeout); +- min_timeo = ns_to_ktime((1000000000 / HZ) * 4); +- if (ktime_before(timeout, min_timeo)) +- timeout = min_timeo; ++ rto_j = call->peer->rto_j; + + now = ktime_get_real(); +- max_age = ktime_sub(now, timeout); ++ max_age = ktime_sub(now, jiffies_to_usecs(rto_j)); + + spin_lock_bh(&call->lock); + +@@ -219,7 +213,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) + } + + resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); +- resend_at += jiffies + rxrpc_resend_timeout; ++ resend_at += jiffies + rto_j; + WRITE_ONCE(call->resend_at, resend_at); + + if (unacked) +@@ -234,7 +228,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) + rxrpc_timer_set_for_resend); + spin_unlock_bh(&call->lock); + ack_ts = ktime_sub(now, call->acks_latest_ts); +- if (ktime_to_ns(ack_ts) < call->peer->rtt) ++ if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3)) + goto out; + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, + rxrpc_propose_ack_ping_for_lost_ack); +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c +index 69e09d69c896..3be4177baf70 100644 +--- a/net/rxrpc/input.c ++++ b/net/rxrpc/input.c +@@ -91,11 +91,11 @@ static void rxrpc_congestion_management(struct rxrpc_call *call, + /* We analyse the number of packets that get ACK'd per RTT + * period and increase the window if we managed to fill it. + */ +- if (call->peer->rtt_usage == 0) ++ if (call->peer->rtt_count == 0) + goto out; + if (ktime_before(skb->tstamp, +- ktime_add_ns(call->cong_tstamp, +- call->peer->rtt))) ++ ktime_add_us(call->cong_tstamp, ++ call->peer->srtt_us >> 3))) + goto out_no_clear_ca; + change = rxrpc_cong_rtt_window_end; + call->cong_tstamp = skb->tstamp; +@@ -802,6 +802,30 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, + } + } + ++/* ++ * Return true if the ACK is valid - ie. it doesn't appear to have regressed ++ * with respect to the ack state conveyed by preceding ACKs. ++ */ ++static bool rxrpc_is_ack_valid(struct rxrpc_call *call, ++ rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt) ++{ ++ rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq); ++ ++ if (after(first_pkt, base)) ++ return true; /* The window advanced */ ++ ++ if (before(first_pkt, base)) ++ return false; /* firstPacket regressed */ ++ ++ if (after_eq(prev_pkt, call->ackr_prev_seq)) ++ return true; /* previousPacket hasn't regressed. */ ++ ++ /* Some rx implementations put a serial number in previousPacket. */ ++ if (after_eq(prev_pkt, base + call->tx_winsize)) ++ return false; ++ return true; ++} ++ + /* + * Process an ACK packet. + * +@@ -865,9 +889,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) + } + + /* Discard any out-of-order or duplicate ACKs (outside lock). */ +- if (before(first_soft_ack, call->ackr_first_seq) || +- before(prev_pkt, call->ackr_prev_seq)) ++ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { ++ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, ++ first_soft_ack, call->ackr_first_seq, ++ prev_pkt, call->ackr_prev_seq); + return; ++ } + + buf.info.rxMTU = 0; + ioffset = offset + nr_acks + 3; +@@ -878,9 +905,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) + spin_lock(&call->input_lock); + + /* Discard any out-of-order or duplicate ACKs (inside lock). */ +- if (before(first_soft_ack, call->ackr_first_seq) || +- before(prev_pkt, call->ackr_prev_seq)) ++ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { ++ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, ++ first_soft_ack, call->ackr_first_seq, ++ prev_pkt, call->ackr_prev_seq); + goto out; ++ } + call->acks_latest_ts = skb->tstamp; + + call->ackr_first_seq = first_soft_ack; +diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c +index 214405f75346..d4144fd86f84 100644 +--- a/net/rxrpc/misc.c ++++ b/net/rxrpc/misc.c +@@ -63,11 +63,6 @@ unsigned int rxrpc_rx_mtu = 5692; + */ + unsigned int rxrpc_rx_jumbo_max = 4; + +-/* +- * Time till packet resend (in milliseconds). +- */ +-unsigned long rxrpc_resend_timeout = 4 * HZ; +- + const s8 rxrpc_ack_priority[] = { + [0] = 0, + [RXRPC_ACK_DELAY] = 1, +diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c +index 90e263c6aa69..f8b632a5c619 100644 +--- a/net/rxrpc/output.c ++++ b/net/rxrpc/output.c +@@ -369,7 +369,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, + (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) || + retrans || + call->cong_mode == RXRPC_CALL_SLOW_START || +- (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || ++ (call->peer->rtt_count < 3 && sp->hdr.seq & 1) || + ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), + ktime_get_real()))) + whdr.flags |= RXRPC_REQUEST_ACK; +@@ -423,13 +423,10 @@ done: + if (whdr.flags & RXRPC_REQUEST_ACK) { + call->peer->rtt_last_req = skb->tstamp; + trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); +- if (call->peer->rtt_usage > 1) { ++ if (call->peer->rtt_count > 1) { + unsigned long nowj = jiffies, ack_lost_at; + +- ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt); +- if (ack_lost_at < 1) +- ack_lost_at = 1; +- ++ ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans); + ack_lost_at += nowj; + WRITE_ONCE(call->ack_lost_at, ack_lost_at); + rxrpc_reduce_call_timer(call, ack_lost_at, nowj, +diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c +index 923b263c401b..b1449d971883 100644 +--- a/net/rxrpc/peer_event.c ++++ b/net/rxrpc/peer_event.c +@@ -295,52 +295,6 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error, + } + } + +-/* +- * Add RTT information to cache. This is called in softirq mode and has +- * exclusive access to the peer RTT data. +- */ +-void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, +- rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, +- ktime_t send_time, ktime_t resp_time) +-{ +- struct rxrpc_peer *peer = call->peer; +- s64 rtt; +- u64 sum = peer->rtt_sum, avg; +- u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage; +- +- rtt = ktime_to_ns(ktime_sub(resp_time, send_time)); +- if (rtt < 0) +- return; +- +- spin_lock(&peer->rtt_input_lock); +- +- /* Replace the oldest datum in the RTT buffer */ +- sum -= peer->rtt_cache[cursor]; +- sum += rtt; +- peer->rtt_cache[cursor] = rtt; +- peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1); +- peer->rtt_sum = sum; +- if (usage < RXRPC_RTT_CACHE_SIZE) { +- usage++; +- peer->rtt_usage = usage; +- } +- +- spin_unlock(&peer->rtt_input_lock); +- +- /* Now recalculate the average */ +- if (usage == RXRPC_RTT_CACHE_SIZE) { +- avg = sum / RXRPC_RTT_CACHE_SIZE; +- } else { +- avg = sum; +- do_div(avg, usage); +- } +- +- /* Don't need to update this under lock */ +- peer->rtt = avg; +- trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, +- usage, avg); +-} +- + /* + * Perform keep-alive pings. + */ +diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c +index 64830d8c1fdb..efce27802a74 100644 +--- a/net/rxrpc/peer_object.c ++++ b/net/rxrpc/peer_object.c +@@ -224,6 +224,8 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) + spin_lock_init(&peer->rtt_input_lock); + peer->debug_id = atomic_inc_return(&rxrpc_debug_id); + ++ rxrpc_peer_init_rtt(peer); ++ + if (RXRPC_TX_SMSS > 2190) + peer->cong_cwnd = 2; + else if (RXRPC_TX_SMSS > 1095) +@@ -495,14 +497,14 @@ void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call, + EXPORT_SYMBOL(rxrpc_kernel_get_peer); + + /** +- * rxrpc_kernel_get_rtt - Get a call's peer RTT ++ * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT + * @sock: The socket on which the call is in progress. + * @call: The call to query + * +- * Get the call's peer RTT. ++ * Get the call's peer smoothed RTT. + */ +-u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call) ++u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call) + { +- return call->peer->rtt; ++ return call->peer->srtt_us >> 3; + } +-EXPORT_SYMBOL(rxrpc_kernel_get_rtt); ++EXPORT_SYMBOL(rxrpc_kernel_get_srtt); +diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c +index b9d053e42821..8b179e3c802a 100644 +--- a/net/rxrpc/proc.c ++++ b/net/rxrpc/proc.c +@@ -222,7 +222,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) + seq_puts(seq, + "Proto Local " + " Remote " +- " Use CW MTU LastUse RTT Rc\n" ++ " Use CW MTU LastUse RTT RTO\n" + ); + return 0; + } +@@ -236,15 +236,15 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) + now = ktime_get_seconds(); + seq_printf(seq, + "UDP %-47.47s %-47.47s %3u" +- " %3u %5u %6llus %12llu %2u\n", ++ " %3u %5u %6llus %8u %8u\n", + lbuff, + rbuff, + atomic_read(&peer->usage), + peer->cong_cwnd, + peer->mtu, + now - peer->last_tx_at, +- peer->rtt, +- peer->rtt_cursor); ++ peer->srtt_us >> 3, ++ jiffies_to_usecs(peer->rto_j)); + + return 0; + } +diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c +new file mode 100644 +index 000000000000..928d8b34a3ee +--- /dev/null ++++ b/net/rxrpc/rtt.c +@@ -0,0 +1,195 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* RTT/RTO calculation. ++ * ++ * Adapted from TCP for AF_RXRPC by David Howells (dhowells@redhat.com) ++ * ++ * https://tools.ietf.org/html/rfc6298 ++ * https://tools.ietf.org/html/rfc1122#section-4.2.3.1 ++ * http://ccr.sigcomm.org/archive/1995/jan95/ccr-9501-partridge87.pdf ++ */ ++ ++#include ++#include "ar-internal.h" ++ ++#define RXRPC_RTO_MAX ((unsigned)(120 * HZ)) ++#define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ ++#define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */ ++#define rxrpc_min_rtt_wlen 300 /* As sysctl_tcp_min_rtt_wlen */ ++ ++static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer) ++{ ++ return 200; ++} ++ ++static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer) ++{ ++ return _usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us); ++} ++ ++static u32 rxrpc_bound_rto(u32 rto) ++{ ++ return min(rto, RXRPC_RTO_MAX); ++} ++ ++/* ++ * Called to compute a smoothed rtt estimate. The data fed to this ++ * routine either comes from timestamps, or from segments that were ++ * known _not_ to have been retransmitted [see Karn/Partridge ++ * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 ++ * piece by Van Jacobson. ++ * NOTE: the next three routines used to be one big routine. ++ * To save cycles in the RFC 1323 implementation it was better to break ++ * it up into three procedures. -- erics ++ */ ++static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us) ++{ ++ long m = sample_rtt_us; /* RTT */ ++ u32 srtt = peer->srtt_us; ++ ++ /* The following amusing code comes from Jacobson's ++ * article in SIGCOMM '88. Note that rtt and mdev ++ * are scaled versions of rtt and mean deviation. ++ * This is designed to be as fast as possible ++ * m stands for "measurement". ++ * ++ * On a 1990 paper the rto value is changed to: ++ * RTO = rtt + 4 * mdev ++ * ++ * Funny. This algorithm seems to be very broken. ++ * These formulae increase RTO, when it should be decreased, increase ++ * too slowly, when it should be increased quickly, decrease too quickly ++ * etc. I guess in BSD RTO takes ONE value, so that it is absolutely ++ * does not matter how to _calculate_ it. Seems, it was trap ++ * that VJ failed to avoid. 8) ++ */ ++ if (srtt != 0) { ++ m -= (srtt >> 3); /* m is now error in rtt est */ ++ srtt += m; /* rtt = 7/8 rtt + 1/8 new */ ++ if (m < 0) { ++ m = -m; /* m is now abs(error) */ ++ m -= (peer->mdev_us >> 2); /* similar update on mdev */ ++ /* This is similar to one of Eifel findings. ++ * Eifel blocks mdev updates when rtt decreases. ++ * This solution is a bit different: we use finer gain ++ * for mdev in this case (alpha*beta). ++ * Like Eifel it also prevents growth of rto, ++ * but also it limits too fast rto decreases, ++ * happening in pure Eifel. ++ */ ++ if (m > 0) ++ m >>= 3; ++ } else { ++ m -= (peer->mdev_us >> 2); /* similar update on mdev */ ++ } ++ ++ peer->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ ++ if (peer->mdev_us > peer->mdev_max_us) { ++ peer->mdev_max_us = peer->mdev_us; ++ if (peer->mdev_max_us > peer->rttvar_us) ++ peer->rttvar_us = peer->mdev_max_us; ++ } ++ } else { ++ /* no previous measure. */ ++ srtt = m << 3; /* take the measured time to be rtt */ ++ peer->mdev_us = m << 1; /* make sure rto = 3*rtt */ ++ peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer)); ++ peer->mdev_max_us = peer->rttvar_us; ++ } ++ ++ peer->srtt_us = max(1U, srtt); ++} ++ ++/* ++ * Calculate rto without backoff. This is the second half of Van Jacobson's ++ * routine referred to above. ++ */ ++static void rxrpc_set_rto(struct rxrpc_peer *peer) ++{ ++ u32 rto; ++ ++ /* 1. If rtt variance happened to be less 50msec, it is hallucination. ++ * It cannot be less due to utterly erratic ACK generation made ++ * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ ++ * to do with delayed acks, because at cwnd>2 true delack timeout ++ * is invisible. Actually, Linux-2.4 also generates erratic ++ * ACKs in some circumstances. ++ */ ++ rto = __rxrpc_set_rto(peer); ++ ++ /* 2. Fixups made earlier cannot be right. ++ * If we do not estimate RTO correctly without them, ++ * all the algo is pure shit and should be replaced ++ * with correct one. It is exactly, which we pretend to do. ++ */ ++ ++ /* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo ++ * guarantees that rto is higher. ++ */ ++ peer->rto_j = rxrpc_bound_rto(rto); ++} ++ ++static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us) ++{ ++ if (rtt_us < 0) ++ return; ++ ++ //rxrpc_update_rtt_min(peer, rtt_us); ++ rxrpc_rtt_estimator(peer, rtt_us); ++ rxrpc_set_rto(peer); ++ ++ /* RFC6298: only reset backoff on valid RTT measurement. */ ++ peer->backoff = 0; ++} ++ ++/* ++ * Add RTT information to cache. This is called in softirq mode and has ++ * exclusive access to the peer RTT data. ++ */ ++void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, ++ rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, ++ ktime_t send_time, ktime_t resp_time) ++{ ++ struct rxrpc_peer *peer = call->peer; ++ s64 rtt_us; ++ ++ rtt_us = ktime_to_us(ktime_sub(resp_time, send_time)); ++ if (rtt_us < 0) ++ return; ++ ++ spin_lock(&peer->rtt_input_lock); ++ rxrpc_ack_update_rtt(peer, rtt_us); ++ if (peer->rtt_count < 3) ++ peer->rtt_count++; ++ spin_unlock(&peer->rtt_input_lock); ++ ++ trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, ++ peer->srtt_us >> 3, peer->rto_j); ++} ++ ++/* ++ * Get the retransmission timeout to set in jiffies, backing it off each time ++ * we retransmit. ++ */ ++unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans) ++{ ++ u64 timo_j; ++ u8 backoff = READ_ONCE(peer->backoff); ++ ++ timo_j = peer->rto_j; ++ timo_j <<= backoff; ++ if (retrans && timo_j * 2 <= RXRPC_RTO_MAX) ++ WRITE_ONCE(peer->backoff, backoff + 1); ++ ++ if (timo_j < 1) ++ timo_j = 1; ++ ++ return timo_j; ++} ++ ++void rxrpc_peer_init_rtt(struct rxrpc_peer *peer) ++{ ++ peer->rto_j = RXRPC_TIMEOUT_INIT; ++ peer->mdev_us = jiffies_to_usecs(RXRPC_TIMEOUT_INIT); ++ peer->backoff = 0; ++ //minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U); ++} +diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c +index 098f1f9ec53b..52a24d4ef5d8 100644 +--- a/net/rxrpc/rxkad.c ++++ b/net/rxrpc/rxkad.c +@@ -1148,7 +1148,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, + ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key, + &expiry, _abort_code); + if (ret < 0) +- goto temporary_error_free_resp; ++ goto temporary_error_free_ticket; + + /* use the session key from inside the ticket to decrypt the + * response */ +@@ -1230,7 +1230,6 @@ protocol_error: + + temporary_error_free_ticket: + kfree(ticket); +-temporary_error_free_resp: + kfree(response); + temporary_error: + /* Ignore the response packet if we got a temporary error such as +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c +index 0fcf157aa09f..5e9c43d4a314 100644 +--- a/net/rxrpc/sendmsg.c ++++ b/net/rxrpc/sendmsg.c +@@ -66,15 +66,14 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, + struct rxrpc_call *call) + { + rxrpc_seq_t tx_start, tx_win; +- signed long rtt2, timeout; +- u64 rtt; ++ signed long rtt, timeout; + +- rtt = READ_ONCE(call->peer->rtt); +- rtt2 = nsecs_to_jiffies64(rtt) * 2; +- if (rtt2 < 2) +- rtt2 = 2; ++ rtt = READ_ONCE(call->peer->srtt_us) >> 3; ++ rtt = usecs_to_jiffies(rtt) * 2; ++ if (rtt < 2) ++ rtt = 2; + +- timeout = rtt2; ++ timeout = rtt; + tx_start = READ_ONCE(call->tx_hard_ack); + + for (;;) { +@@ -92,7 +91,7 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, + return -EINTR; + + if (tx_win != tx_start) { +- timeout = rtt2; ++ timeout = rtt; + tx_start = tx_win; + } + +@@ -271,16 +270,9 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, + _debug("need instant resend %d", ret); + rxrpc_instant_resend(call, ix); + } else { +- unsigned long now = jiffies, resend_at; ++ unsigned long now = jiffies; ++ unsigned long resend_at = now + call->peer->rto_j; + +- if (call->peer->rtt_usage > 1) +- resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2); +- else +- resend_at = rxrpc_resend_timeout; +- if (resend_at < 1) +- resend_at = 1; +- +- resend_at += now; + WRITE_ONCE(call->resend_at, resend_at); + rxrpc_reduce_call_timer(call, resend_at, now, + rxrpc_timer_set_for_send); +diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c +index 2bbb38161851..18dade4e6f9a 100644 +--- a/net/rxrpc/sysctl.c ++++ b/net/rxrpc/sysctl.c +@@ -71,15 +71,6 @@ static struct ctl_table rxrpc_sysctl_table[] = { + .extra1 = (void *)&one_jiffy, + .extra2 = (void *)&max_jiffies, + }, +- { +- .procname = "resend_timeout", +- .data = &rxrpc_resend_timeout, +- .maxlen = sizeof(unsigned long), +- .mode = 0644, +- .proc_handler = proc_doulongvec_ms_jiffies_minmax, +- .extra1 = (void *)&one_jiffy, +- .extra2 = (void *)&max_jiffies, +- }, + + /* Non-time values */ + { +diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile +index aa0d0ec6936d..9e95862f2788 100644 +--- a/scripts/gcc-plugins/Makefile ++++ b/scripts/gcc-plugins/Makefile +@@ -11,6 +11,7 @@ else + HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti + HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb + HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable ++ HOST_EXTRACXXFLAGS += -Wno-format-diag + export HOST_EXTRACXXFLAGS + endif + +diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h +index 17f06079a712..9ad76b7f3f10 100644 +--- a/scripts/gcc-plugins/gcc-common.h ++++ b/scripts/gcc-plugins/gcc-common.h +@@ -35,7 +35,9 @@ + #include "ggc.h" + #include "timevar.h" + ++#if BUILDING_GCC_VERSION < 10000 + #include "params.h" ++#endif + + #if BUILDING_GCC_VERSION <= 4009 + #include "pointer-set.h" +@@ -847,6 +849,7 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l + return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT); + } + ++#if BUILDING_GCC_VERSION < 10000 + template <> + template <> + inline bool is_a_helper::test(const_gimple gs) +@@ -860,6 +863,7 @@ inline bool is_a_helper::test(const_gimple gs) + { + return gs->code == GIMPLE_RETURN; + } ++#endif + + static inline gasm *as_a_gasm(gimple stmt) + { +diff --git a/scripts/gdb/linux/rbtree.py b/scripts/gdb/linux/rbtree.py +index 39db889b874c..c4b991607917 100644 +--- a/scripts/gdb/linux/rbtree.py ++++ b/scripts/gdb/linux/rbtree.py +@@ -12,7 +12,7 @@ rb_node_type = utils.CachedType("struct rb_node") + + def rb_first(root): + if root.type == rb_root_type.get_type(): +- node = node.address.cast(rb_root_type.get_type().pointer()) ++ node = root.address.cast(rb_root_type.get_type().pointer()) + elif root.type != rb_root_type.get_type().pointer(): + raise gdb.GdbError("Must be struct rb_root not {}".format(root.type)) + +@@ -28,7 +28,7 @@ def rb_first(root): + + def rb_last(root): + if root.type == rb_root_type.get_type(): +- node = node.address.cast(rb_root_type.get_type().pointer()) ++ node = root.address.cast(rb_root_type.get_type().pointer()) + elif root.type != rb_root_type.get_type().pointer(): + raise gdb.GdbError("Must be struct rb_root not {}".format(root.type)) + +diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh +index 408b5c0b99b1..aa1386079f0c 100755 +--- a/scripts/link-vmlinux.sh ++++ b/scripts/link-vmlinux.sh +@@ -63,12 +63,18 @@ vmlinux_link() + local lds="${objtree}/${KBUILD_LDS}" + local output=${1} + local objects ++ local strip_debug + + info LD ${output} + + # skip output file argument + shift + ++ # The kallsyms linking does not need debug symbols included. ++ if [ "$output" != "${output#.tmp_vmlinux.kallsyms}" ] ; then ++ strip_debug=-Wl,--strip-debug ++ fi ++ + if [ "${SRCARCH}" != "um" ]; then + objects="--whole-archive \ + ${KBUILD_VMLINUX_OBJS} \ +@@ -79,6 +85,7 @@ vmlinux_link() + ${@}" + + ${LD} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} \ ++ ${strip_debug#-Wl,} \ + -o ${output} \ + -T ${lds} ${objects} + else +@@ -91,6 +98,7 @@ vmlinux_link() + ${@}" + + ${CC} ${CFLAGS_vmlinux} \ ++ ${strip_debug} \ + -o ${output} \ + -Wl,-T,${lds} \ + ${objects} \ +@@ -106,6 +114,8 @@ gen_btf() + { + local pahole_ver + local bin_arch ++ local bin_format ++ local bin_file + + if ! [ -x "$(command -v ${PAHOLE})" ]; then + echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available" +@@ -118,8 +128,9 @@ gen_btf() + return 1 + fi + +- info "BTF" ${2} + vmlinux_link ${1} ++ ++ info "BTF" ${2} + LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1} + + # dump .BTF section into raw binary file to link with final vmlinux +@@ -127,11 +138,12 @@ gen_btf() + cut -d, -f1 | cut -d' ' -f2) + bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \ + awk '{print $4}') ++ bin_file=.btf.vmlinux.bin + ${OBJCOPY} --change-section-address .BTF=0 \ + --set-section-flags .BTF=alloc -O binary \ +- --only-section=.BTF ${1} .btf.vmlinux.bin ++ --only-section=.BTF ${1} $bin_file + ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \ +- --rename-section .data=.BTF .btf.vmlinux.bin ${2} ++ --rename-section .data=.BTF $bin_file ${2} + } + + # Create ${2} .o file with all symbols from the ${1} object file +@@ -166,8 +178,8 @@ kallsyms() + kallsyms_step() + { + kallsymso_prev=${kallsymso} +- kallsymso=.tmp_kallsyms${1}.o +- kallsyms_vmlinux=.tmp_vmlinux${1} ++ kallsyms_vmlinux=.tmp_vmlinux.kallsyms${1} ++ kallsymso=${kallsyms_vmlinux}.o + + vmlinux_link ${kallsyms_vmlinux} "${kallsymso_prev}" ${btf_vmlinux_bin_o} + kallsyms ${kallsyms_vmlinux} ${kallsymso} +@@ -190,7 +202,6 @@ cleanup() + { + rm -f .btf.* + rm -f .tmp_System.map +- rm -f .tmp_kallsyms* + rm -f .tmp_vmlinux* + rm -f System.map + rm -f vmlinux +@@ -253,9 +264,8 @@ ${OBJCOPY} -j .modinfo -O binary vmlinux.o modules.builtin.modinfo + + btf_vmlinux_bin_o="" + if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then +- if gen_btf .tmp_vmlinux.btf .btf.vmlinux.bin.o ; then +- btf_vmlinux_bin_o=.btf.vmlinux.bin.o +- else ++ btf_vmlinux_bin_o=.btf.vmlinux.bin.o ++ if ! gen_btf .tmp_vmlinux.btf $btf_vmlinux_bin_o ; then + echo >&2 "Failed to generate BTF for vmlinux" + echo >&2 "Try to disable CONFIG_DEBUG_INFO_BTF" + exit 1 +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c +index 90d21675c3ad..47e4f2d91df7 100644 +--- a/security/apparmor/apparmorfs.c ++++ b/security/apparmor/apparmorfs.c +@@ -424,7 +424,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size, + */ + error = aa_may_manage_policy(label, ns, mask); + if (error) +- return error; ++ goto end_section; + + data = aa_simple_write_to_buffer(buf, size, size, pos); + error = PTR_ERR(data); +@@ -432,6 +432,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size, + error = aa_replace_profiles(ns, label, mask, data); + aa_put_loaddata(data); + } ++end_section: + end_current_label_crit_section(label); + + return error; +diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c +index 5a98661a8b46..597732503815 100644 +--- a/security/apparmor/audit.c ++++ b/security/apparmor/audit.c +@@ -197,8 +197,9 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) + rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr, + GFP_KERNEL, true, false); + if (IS_ERR(rule->label)) { ++ int err = PTR_ERR(rule->label); + aa_audit_rule_free(rule); +- return PTR_ERR(rule->label); ++ return err; + } + + *vrule = rule; +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c +index 039ca71872ce..5dedc0173b02 100644 +--- a/security/apparmor/domain.c ++++ b/security/apparmor/domain.c +@@ -1334,6 +1334,7 @@ int aa_change_profile(const char *fqname, int flags) + ctx->nnp = aa_get_label(label); + + if (!fqname || !*fqname) { ++ aa_put_label(label); + AA_DEBUG("no profile name"); + return -EINVAL; + } +@@ -1352,8 +1353,6 @@ int aa_change_profile(const char *fqname, int flags) + op = OP_CHANGE_PROFILE; + } + +- label = aa_get_current_label(); +- + if (*fqname == '&') { + stack = true; + /* don't have label_parse() do stacking */ +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index d485f6fc908e..cc826c2767a3 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -75,7 +75,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo) + { + long rc; + const char *algo; +- struct crypto_shash **tfm; ++ struct crypto_shash **tfm, *tmp_tfm; + struct shash_desc *desc; + + if (type == EVM_XATTR_HMAC) { +@@ -93,31 +93,31 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo) + algo = hash_algo_name[hash_algo]; + } + +- if (*tfm == NULL) { +- mutex_lock(&mutex); +- if (*tfm) +- goto out; +- *tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD); +- if (IS_ERR(*tfm)) { +- rc = PTR_ERR(*tfm); +- pr_err("Can not allocate %s (reason: %ld)\n", algo, rc); +- *tfm = NULL; ++ if (*tfm) ++ goto alloc; ++ mutex_lock(&mutex); ++ if (*tfm) ++ goto unlock; ++ ++ tmp_tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD); ++ if (IS_ERR(tmp_tfm)) { ++ pr_err("Can not allocate %s (reason: %ld)\n", algo, ++ PTR_ERR(tmp_tfm)); ++ mutex_unlock(&mutex); ++ return ERR_CAST(tmp_tfm); ++ } ++ if (type == EVM_XATTR_HMAC) { ++ rc = crypto_shash_setkey(tmp_tfm, evmkey, evmkey_len); ++ if (rc) { ++ crypto_free_shash(tmp_tfm); + mutex_unlock(&mutex); + return ERR_PTR(rc); + } +- if (type == EVM_XATTR_HMAC) { +- rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len); +- if (rc) { +- crypto_free_shash(*tfm); +- *tfm = NULL; +- mutex_unlock(&mutex); +- return ERR_PTR(rc); +- } +- } +-out: +- mutex_unlock(&mutex); + } +- ++ *tfm = tmp_tfm; ++unlock: ++ mutex_unlock(&mutex); ++alloc: + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), + GFP_KERNEL); + if (!desc) +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index 73044fc6a952..ad6cbbccc8d9 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -411,7 +411,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + loff_t i_size; + int rc; + struct file *f = file; +- bool new_file_instance = false, modified_flags = false; ++ bool new_file_instance = false, modified_mode = false; + + /* + * For consistency, fail file's opened with the O_DIRECT flag on +@@ -431,13 +431,13 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + f = dentry_open(&file->f_path, flags, file->f_cred); + if (IS_ERR(f)) { + /* +- * Cannot open the file again, lets modify f_flags ++ * Cannot open the file again, lets modify f_mode + * of original and continue + */ + pr_info_ratelimited("Unable to reopen file for reading.\n"); + f = file; +- f->f_flags |= FMODE_READ; +- modified_flags = true; ++ f->f_mode |= FMODE_READ; ++ modified_mode = true; + } else { + new_file_instance = true; + } +@@ -455,8 +455,8 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + out: + if (new_file_instance) + fput(f); +- else if (modified_flags) +- f->f_flags &= ~FMODE_READ; ++ else if (modified_mode) ++ f->f_mode &= ~FMODE_READ; + return rc; + } + +diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c +index 2000e8df0301..68571c40d61f 100644 +--- a/security/integrity/ima/ima_fs.c ++++ b/security/integrity/ima/ima_fs.c +@@ -340,8 +340,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf, + integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, + "policy_update", "signed policy required", + 1, 0); +- if (ima_appraise & IMA_APPRAISE_ENFORCE) +- result = -EACCES; ++ result = -EACCES; + } else { + result = ima_parse_add_rule(data); + } +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c +index 2236b5e0c1f2..1662573a4030 100644 +--- a/sound/core/pcm_lib.c ++++ b/sound/core/pcm_lib.c +@@ -423,6 +423,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, + + no_delta_check: + if (runtime->status->hw_ptr == new_hw_ptr) { ++ runtime->hw_ptr_jiffies = curr_jiffies; + update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); + return 0; + } +diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c +index 9f3e37511408..c946fd8beebc 100644 +--- a/sound/hda/hdac_device.c ++++ b/sound/hda/hdac_device.c +@@ -57,6 +57,7 @@ int snd_hdac_device_init(struct hdac_device *codec, struct hdac_bus *bus, + codec->addr = addr; + codec->type = HDA_DEV_CORE; + mutex_init(&codec->widget_lock); ++ mutex_init(&codec->regmap_lock); + pm_runtime_set_active(&codec->dev); + pm_runtime_get_noresume(&codec->dev); + atomic_set(&codec->in_pm, 0); +diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c +index 286361ecd640..2596a881186f 100644 +--- a/sound/hda/hdac_regmap.c ++++ b/sound/hda/hdac_regmap.c +@@ -363,6 +363,7 @@ static const struct regmap_config hda_regmap_cfg = { + .reg_write = hda_reg_write, + .use_single_read = true, + .use_single_write = true, ++ .disable_locking = true, + }; + + /** +@@ -425,12 +426,29 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_add_vendor_verb); + static int reg_raw_write(struct hdac_device *codec, unsigned int reg, + unsigned int val) + { ++ int err; ++ ++ mutex_lock(&codec->regmap_lock); + if (!codec->regmap) +- return hda_reg_write(codec, reg, val); ++ err = hda_reg_write(codec, reg, val); + else +- return regmap_write(codec->regmap, reg, val); ++ err = regmap_write(codec->regmap, reg, val); ++ mutex_unlock(&codec->regmap_lock); ++ return err; + } + ++/* a helper macro to call @func_call; retry with power-up if failed */ ++#define CALL_RAW_FUNC(codec, func_call) \ ++ ({ \ ++ int _err = func_call; \ ++ if (_err == -EAGAIN) { \ ++ _err = snd_hdac_power_up_pm(codec); \ ++ if (_err >= 0) \ ++ _err = func_call; \ ++ snd_hdac_power_down_pm(codec); \ ++ } \ ++ _err;}) ++ + /** + * snd_hdac_regmap_write_raw - write a pseudo register with power mgmt + * @codec: the codec object +@@ -442,42 +460,29 @@ static int reg_raw_write(struct hdac_device *codec, unsigned int reg, + int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, + unsigned int val) + { +- int err; +- +- err = reg_raw_write(codec, reg, val); +- if (err == -EAGAIN) { +- err = snd_hdac_power_up_pm(codec); +- if (err >= 0) +- err = reg_raw_write(codec, reg, val); +- snd_hdac_power_down_pm(codec); +- } +- return err; ++ return CALL_RAW_FUNC(codec, reg_raw_write(codec, reg, val)); + } + EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw); + + static int reg_raw_read(struct hdac_device *codec, unsigned int reg, + unsigned int *val, bool uncached) + { ++ int err; ++ ++ mutex_lock(&codec->regmap_lock); + if (uncached || !codec->regmap) +- return hda_reg_read(codec, reg, val); ++ err = hda_reg_read(codec, reg, val); + else +- return regmap_read(codec->regmap, reg, val); ++ err = regmap_read(codec->regmap, reg, val); ++ mutex_unlock(&codec->regmap_lock); ++ return err; + } + + static int __snd_hdac_regmap_read_raw(struct hdac_device *codec, + unsigned int reg, unsigned int *val, + bool uncached) + { +- int err; +- +- err = reg_raw_read(codec, reg, val, uncached); +- if (err == -EAGAIN) { +- err = snd_hdac_power_up_pm(codec); +- if (err >= 0) +- err = reg_raw_read(codec, reg, val, uncached); +- snd_hdac_power_down_pm(codec); +- } +- return err; ++ return CALL_RAW_FUNC(codec, reg_raw_read(codec, reg, val, uncached)); + } + + /** +@@ -504,6 +509,35 @@ int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec, + return __snd_hdac_regmap_read_raw(codec, reg, val, true); + } + ++static int reg_raw_update(struct hdac_device *codec, unsigned int reg, ++ unsigned int mask, unsigned int val) ++{ ++ unsigned int orig; ++ bool change; ++ int err; ++ ++ mutex_lock(&codec->regmap_lock); ++ if (codec->regmap) { ++ err = regmap_update_bits_check(codec->regmap, reg, mask, val, ++ &change); ++ if (!err) ++ err = change ? 1 : 0; ++ } else { ++ err = hda_reg_read(codec, reg, &orig); ++ if (!err) { ++ val &= mask; ++ val |= orig & ~mask; ++ if (val != orig) { ++ err = hda_reg_write(codec, reg, val); ++ if (!err) ++ err = 1; ++ } ++ } ++ } ++ mutex_unlock(&codec->regmap_lock); ++ return err; ++} ++ + /** + * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt + * @codec: the codec object +@@ -515,20 +549,58 @@ int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec, + */ + int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg, + unsigned int mask, unsigned int val) ++{ ++ return CALL_RAW_FUNC(codec, reg_raw_update(codec, reg, mask, val)); ++} ++EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw); ++ ++static int reg_raw_update_once(struct hdac_device *codec, unsigned int reg, ++ unsigned int mask, unsigned int val) + { + unsigned int orig; + int err; + +- val &= mask; +- err = snd_hdac_regmap_read_raw(codec, reg, &orig); +- if (err < 0) +- return err; +- val |= orig & ~mask; +- if (val == orig) +- return 0; +- err = snd_hdac_regmap_write_raw(codec, reg, val); ++ if (!codec->regmap) ++ return reg_raw_update(codec, reg, mask, val); ++ ++ mutex_lock(&codec->regmap_lock); ++ regcache_cache_only(codec->regmap, true); ++ err = regmap_read(codec->regmap, reg, &orig); ++ regcache_cache_only(codec->regmap, false); + if (err < 0) +- return err; +- return 1; ++ err = regmap_update_bits(codec->regmap, reg, mask, val); ++ mutex_unlock(&codec->regmap_lock); ++ return err; + } +-EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw); ++ ++/** ++ * snd_hdac_regmap_update_raw_once - initialize the register value only once ++ * @codec: the codec object ++ * @reg: pseudo register ++ * @mask: bit mask to update ++ * @val: value to update ++ * ++ * Performs the update of the register bits only once when the register ++ * hasn't been initialized yet. Used in HD-audio legacy driver. ++ * Returns zero if successful or a negative error code ++ */ ++int snd_hdac_regmap_update_raw_once(struct hdac_device *codec, unsigned int reg, ++ unsigned int mask, unsigned int val) ++{ ++ return CALL_RAW_FUNC(codec, reg_raw_update_once(codec, reg, mask, val)); ++} ++EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once); ++ ++/** ++ * snd_hdac_regmap_sync - sync out the cached values for PM resume ++ * @codec: the codec object ++ */ ++void snd_hdac_regmap_sync(struct hdac_device *codec) ++{ ++ if (codec->regmap) { ++ mutex_lock(&codec->regmap_lock); ++ regcache_sync(codec->regmap); ++ mutex_unlock(&codec->regmap_lock); ++ } ++} ++EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync); +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index 6cb72336433a..07c03c32715a 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -1267,6 +1267,18 @@ int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir, + } + EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps); + ++static unsigned int encode_amp(struct hda_codec *codec, hda_nid_t nid, ++ int ch, int dir, int idx) ++{ ++ unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx); ++ ++ /* enable fake mute if no h/w mute but min=mute */ ++ if ((query_amp_caps(codec, nid, dir) & ++ (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) == AC_AMPCAP_MIN_MUTE) ++ cmd |= AC_AMP_FAKE_MUTE; ++ return cmd; ++} ++ + /** + * snd_hda_codec_amp_update - update the AMP mono value + * @codec: HD-audio codec +@@ -1282,12 +1294,8 @@ EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps); + int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, + int ch, int dir, int idx, int mask, int val) + { +- unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx); ++ unsigned int cmd = encode_amp(codec, nid, ch, dir, idx); + +- /* enable fake mute if no h/w mute but min=mute */ +- if ((query_amp_caps(codec, nid, dir) & +- (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) == AC_AMPCAP_MIN_MUTE) +- cmd |= AC_AMP_FAKE_MUTE; + return snd_hdac_regmap_update_raw(&codec->core, cmd, mask, val); + } + EXPORT_SYMBOL_GPL(snd_hda_codec_amp_update); +@@ -1335,16 +1343,11 @@ EXPORT_SYMBOL_GPL(snd_hda_codec_amp_stereo); + int snd_hda_codec_amp_init(struct hda_codec *codec, hda_nid_t nid, int ch, + int dir, int idx, int mask, int val) + { +- int orig; ++ unsigned int cmd = encode_amp(codec, nid, ch, dir, idx); + + if (!codec->core.regmap) + return -EINVAL; +- regcache_cache_only(codec->core.regmap, true); +- orig = snd_hda_codec_amp_read(codec, nid, ch, dir, idx); +- regcache_cache_only(codec->core.regmap, false); +- if (orig >= 0) +- return 0; +- return snd_hda_codec_amp_update(codec, nid, ch, dir, idx, mask, val); ++ return snd_hdac_regmap_update_raw_once(&codec->core, cmd, mask, val); + } + EXPORT_SYMBOL_GPL(snd_hda_codec_amp_init); + +@@ -2905,8 +2908,7 @@ static void hda_call_codec_resume(struct hda_codec *codec) + else { + if (codec->patch_ops.init) + codec->patch_ops.init(codec); +- if (codec->core.regmap) +- regcache_sync(codec->core.regmap); ++ snd_hda_regmap_sync(codec); + } + + if (codec->jackpoll_interval) +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c +index 10d502328b76..6815f9dc8545 100644 +--- a/sound/pci/hda/hda_generic.c ++++ b/sound/pci/hda/hda_generic.c +@@ -4401,7 +4401,7 @@ EXPORT_SYMBOL_GPL(snd_hda_gen_fix_pin_power); + */ + + /* check each pin in the given array; returns true if any of them is plugged */ +-static bool detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins) ++static bool detect_jacks(struct hda_codec *codec, int num_pins, const hda_nid_t *pins) + { + int i; + bool present = false; +@@ -4420,7 +4420,7 @@ static bool detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins) + } + + /* standard HP/line-out auto-mute helper */ +-static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins, ++static void do_automute(struct hda_codec *codec, int num_pins, const hda_nid_t *pins, + int *paths, bool mute) + { + struct hda_gen_spec *spec = codec->spec; +@@ -6027,7 +6027,7 @@ int snd_hda_gen_init(struct hda_codec *codec) + /* call init functions of standard auto-mute helpers */ + update_automute_all(codec); + +- regcache_sync(codec->core.regmap); ++ snd_hda_regmap_sync(codec); + + if (spec->vmaster_mute.sw_kctl && spec->vmaster_mute.hook) + snd_hda_sync_vmaster_hook(&spec->vmaster_mute); +diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h +index 3942e1b528d8..3dca65d79b02 100644 +--- a/sound/pci/hda/hda_local.h ++++ b/sound/pci/hda/hda_local.h +@@ -138,6 +138,8 @@ int snd_hda_codec_reset(struct hda_codec *codec); + void snd_hda_codec_register(struct hda_codec *codec); + void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec); + ++#define snd_hda_regmap_sync(codec) snd_hdac_regmap_sync(&(codec)->core) ++ + enum { + HDA_VMUTE_OFF, + HDA_VMUTE_ON, +diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c +index bc9dd8e6fd86..c64895f99299 100644 +--- a/sound/pci/hda/patch_analog.c ++++ b/sound/pci/hda/patch_analog.c +@@ -389,7 +389,7 @@ static int patch_ad1986a(struct hda_codec *codec) + { + int err; + struct ad198x_spec *spec; +- static hda_nid_t preferred_pairs[] = { ++ static const hda_nid_t preferred_pairs[] = { + 0x1a, 0x03, + 0x1b, 0x03, + 0x1c, 0x04, +@@ -519,9 +519,9 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec) + + static int patch_ad1983(struct hda_codec *codec) + { ++ static const hda_nid_t conn_0c[] = { 0x08 }; ++ static const hda_nid_t conn_0d[] = { 0x09 }; + struct ad198x_spec *spec; +- static hda_nid_t conn_0c[] = { 0x08 }; +- static hda_nid_t conn_0d[] = { 0x09 }; + int err; + + err = alloc_ad_spec(codec); +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c +index adad3651889e..1e904dd15ab3 100644 +--- a/sound/pci/hda/patch_ca0132.c ++++ b/sound/pci/hda/patch_ca0132.c +@@ -7803,23 +7803,23 @@ static void sbz_region2_exit(struct hda_codec *codec) + + static void sbz_set_pin_ctl_default(struct hda_codec *codec) + { +- hda_nid_t pins[5] = {0x0B, 0x0C, 0x0E, 0x12, 0x13}; ++ static const hda_nid_t pins[] = {0x0B, 0x0C, 0x0E, 0x12, 0x13}; + unsigned int i; + + snd_hda_codec_write(codec, 0x11, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40); + +- for (i = 0; i < 5; i++) ++ for (i = 0; i < ARRAY_SIZE(pins); i++) + snd_hda_codec_write(codec, pins[i], 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x00); + } + + static void ca0132_clear_unsolicited(struct hda_codec *codec) + { +- hda_nid_t pins[7] = {0x0B, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13}; ++ static const hda_nid_t pins[] = {0x0B, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13}; + unsigned int i; + +- for (i = 0; i < 7; i++) { ++ for (i = 0; i < ARRAY_SIZE(pins); i++) { + snd_hda_codec_write(codec, pins[i], 0, + AC_VERB_SET_UNSOLICITED_ENABLE, 0x00); + } +@@ -7843,10 +7843,10 @@ static void sbz_gpio_shutdown_commands(struct hda_codec *codec, int dir, + + static void zxr_dbpro_power_state_shutdown(struct hda_codec *codec) + { +- hda_nid_t pins[7] = {0x05, 0x0c, 0x09, 0x0e, 0x08, 0x11, 0x01}; ++ static const hda_nid_t pins[] = {0x05, 0x0c, 0x09, 0x0e, 0x08, 0x11, 0x01}; + unsigned int i; + +- for (i = 0; i < 7; i++) ++ for (i = 0; i < ARRAY_SIZE(pins); i++) + snd_hda_codec_write(codec, pins[i], 0, + AC_VERB_SET_POWER_STATE, 0x03); + } +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 1e20e85e9b46..396b5503038a 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -116,7 +116,7 @@ static void cx_auto_parse_eapd(struct hda_codec *codec) + } + + static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins, +- hda_nid_t *pins, bool on) ++ const hda_nid_t *pins, bool on) + { + int i; + for (i = 0; i < num_pins; i++) { +@@ -960,10 +960,10 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { + static void add_cx5051_fake_mutes(struct hda_codec *codec) + { + struct conexant_spec *spec = codec->spec; +- static hda_nid_t out_nids[] = { ++ static const hda_nid_t out_nids[] = { + 0x10, 0x11, 0 + }; +- hda_nid_t *p; ++ const hda_nid_t *p; + + for (p = out_nids; *p; p++) + snd_hda_override_amp_caps(codec, *p, HDA_OUTPUT, +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index d48263d1f6a2..d41c91468ab3 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -2359,7 +2359,7 @@ static int generic_hdmi_resume(struct hda_codec *codec) + int pin_idx; + + codec->patch_ops.init(codec); +- regcache_sync(codec->core.regmap); ++ snd_hda_regmap_sync(codec); + + for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { + struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 004d2f638cf2..c5bec191e003 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -86,6 +86,14 @@ struct alc_spec { + + unsigned int gpio_mute_led_mask; + unsigned int gpio_mic_led_mask; ++ unsigned int mute_led_coef_idx; ++ unsigned int mute_led_coefbit_mask; ++ unsigned int mute_led_coefbit_on; ++ unsigned int mute_led_coefbit_off; ++ unsigned int mic_led_coef_idx; ++ unsigned int mic_led_coefbit_mask; ++ unsigned int mic_led_coefbit_on; ++ unsigned int mic_led_coefbit_off; + + hda_nid_t headset_mic_pin; + hda_nid_t headphone_mic_pin; +@@ -465,10 +473,10 @@ static void set_eapd(struct hda_codec *codec, hda_nid_t nid, int on) + static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) + { + /* We currently only handle front, HP */ +- static hda_nid_t pins[] = { ++ static const hda_nid_t pins[] = { + 0x0f, 0x10, 0x14, 0x15, 0x17, 0 + }; +- hda_nid_t *p; ++ const hda_nid_t *p; + for (p = pins; *p; p++) + set_eapd(codec, *p, on); + } +@@ -908,7 +916,7 @@ static int alc_resume(struct hda_codec *codec) + if (!spec->no_depop_delay) + msleep(150); /* to avoid pop noise */ + codec->patch_ops.init(codec); +- regcache_sync(codec->core.regmap); ++ snd_hda_regmap_sync(codec); + hda_call_check_power_status(codec, 0x01); + return 0; + } +@@ -1939,19 +1947,19 @@ static void alc889_fixup_dac_route(struct hda_codec *codec, + { + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + /* fake the connections during parsing the tree */ +- hda_nid_t conn1[2] = { 0x0c, 0x0d }; +- hda_nid_t conn2[2] = { 0x0e, 0x0f }; +- snd_hda_override_conn_list(codec, 0x14, 2, conn1); +- snd_hda_override_conn_list(codec, 0x15, 2, conn1); +- snd_hda_override_conn_list(codec, 0x18, 2, conn2); +- snd_hda_override_conn_list(codec, 0x1a, 2, conn2); ++ static const hda_nid_t conn1[] = { 0x0c, 0x0d }; ++ static const hda_nid_t conn2[] = { 0x0e, 0x0f }; ++ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); ++ snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn1), conn1); ++ snd_hda_override_conn_list(codec, 0x18, ARRAY_SIZE(conn2), conn2); ++ snd_hda_override_conn_list(codec, 0x1a, ARRAY_SIZE(conn2), conn2); + } else if (action == HDA_FIXUP_ACT_PROBE) { + /* restore the connections */ +- hda_nid_t conn[5] = { 0x0c, 0x0d, 0x0e, 0x0f, 0x26 }; +- snd_hda_override_conn_list(codec, 0x14, 5, conn); +- snd_hda_override_conn_list(codec, 0x15, 5, conn); +- snd_hda_override_conn_list(codec, 0x18, 5, conn); +- snd_hda_override_conn_list(codec, 0x1a, 5, conn); ++ static const hda_nid_t conn[] = { 0x0c, 0x0d, 0x0e, 0x0f, 0x26 }; ++ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn); ++ snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn), conn); ++ snd_hda_override_conn_list(codec, 0x18, ARRAY_SIZE(conn), conn); ++ snd_hda_override_conn_list(codec, 0x1a, ARRAY_SIZE(conn), conn); + } + } + +@@ -1959,8 +1967,8 @@ static void alc889_fixup_dac_route(struct hda_codec *codec, + static void alc889_fixup_mbp_vref(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { ++ static const hda_nid_t nids[] = { 0x14, 0x15, 0x19 }; + struct alc_spec *spec = codec->spec; +- static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 }; + int i; + + if (action != HDA_FIXUP_ACT_INIT) +@@ -1996,7 +2004,7 @@ static void alc889_fixup_mac_pins(struct hda_codec *codec, + static void alc889_fixup_imac91_vref(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { +- static hda_nid_t nids[2] = { 0x18, 0x1a }; ++ static const hda_nid_t nids[] = { 0x18, 0x1a }; + + if (action == HDA_FIXUP_ACT_INIT) + alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids)); +@@ -2006,7 +2014,7 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec, + static void alc889_fixup_mba11_vref(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { +- static hda_nid_t nids[1] = { 0x18 }; ++ static const hda_nid_t nids[] = { 0x18 }; + + if (action == HDA_FIXUP_ACT_INIT) + alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids)); +@@ -2016,7 +2024,7 @@ static void alc889_fixup_mba11_vref(struct hda_codec *codec, + static void alc889_fixup_mba21_vref(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { +- static hda_nid_t nids[2] = { 0x18, 0x19 }; ++ static const hda_nid_t nids[] = { 0x18, 0x19 }; + + if (action == HDA_FIXUP_ACT_INIT) + alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids)); +@@ -2098,7 +2106,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) + { +- hda_nid_t conn1[1] = { 0x0c }; ++ static const hda_nid_t conn1[] = { 0x0c }; + + if (action != HDA_FIXUP_ACT_PRE_PROBE) + return; +@@ -2107,8 +2115,8 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, + /* We therefore want to make sure 0x14 (front headphone) and + * 0x1b (speakers) use the stereo DAC 0x02 + */ +- snd_hda_override_conn_list(codec, 0x14, 1, conn1); +- snd_hda_override_conn_list(codec, 0x1b, 1, conn1); ++ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); ++ snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1); + } + + static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, +@@ -2449,6 +2457,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), ++ SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), +@@ -2464,6 +2473,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), ++ SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), ++ SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), ++ SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), + SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), +@@ -3719,8 +3731,8 @@ static void alc5505_dsp_init(struct hda_codec *codec) + } + + #ifdef HALT_REALTEK_ALC5505 +-#define alc5505_dsp_suspend(codec) /* NOP */ +-#define alc5505_dsp_resume(codec) /* NOP */ ++#define alc5505_dsp_suspend(codec) do { } while (0) /* NOP */ ++#define alc5505_dsp_resume(codec) do { } while (0) /* NOP */ + #else + #define alc5505_dsp_suspend(codec) alc5505_dsp_halt(codec) + #define alc5505_dsp_resume(codec) alc5505_dsp_back_from_halt(codec) +@@ -3756,7 +3768,7 @@ static int alc269_resume(struct hda_codec *codec) + msleep(200); + } + +- regcache_sync(codec->core.regmap); ++ snd_hda_regmap_sync(codec); + hda_call_check_power_status(codec, 0x01); + + /* on some machine, the BIOS will clear the codec gpio data when enter +@@ -4182,6 +4194,111 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec, + } + } + ++/* update mute-LED according to the speaker mute state via COEF bit */ ++static void alc_fixup_mute_led_coefbit_hook(void *private_data, int enabled) ++{ ++ struct hda_codec *codec = private_data; ++ struct alc_spec *spec = codec->spec; ++ ++ if (spec->mute_led_polarity) ++ enabled = !enabled; ++ ++ /* temporarily power up/down for setting COEF bit */ ++ enabled ? alc_update_coef_idx(codec, spec->mute_led_coef_idx, ++ spec->mute_led_coefbit_mask, spec->mute_led_coefbit_off) : ++ alc_update_coef_idx(codec, spec->mute_led_coef_idx, ++ spec->mute_led_coefbit_mask, spec->mute_led_coefbit_on); ++} ++ ++static void alc285_fixup_hp_mute_led_coefbit(struct hda_codec *codec, ++ const struct hda_fixup *fix, ++ int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ spec->mute_led_polarity = 0; ++ spec->mute_led_coef_idx = 0x0b; ++ spec->mute_led_coefbit_mask = 1<<3; ++ spec->mute_led_coefbit_on = 1<<3; ++ spec->mute_led_coefbit_off = 0; ++ spec->gen.vmaster_mute.hook = alc_fixup_mute_led_coefbit_hook; ++ spec->gen.vmaster_mute_enum = 1; ++ } ++} ++ ++static void alc236_fixup_hp_mute_led_coefbit(struct hda_codec *codec, ++ const struct hda_fixup *fix, ++ int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ spec->mute_led_polarity = 0; ++ spec->mute_led_coef_idx = 0x34; ++ spec->mute_led_coefbit_mask = 1<<5; ++ spec->mute_led_coefbit_on = 0; ++ spec->mute_led_coefbit_off = 1<<5; ++ spec->gen.vmaster_mute.hook = alc_fixup_mute_led_coefbit_hook; ++ spec->gen.vmaster_mute_enum = 1; ++ } ++} ++ ++/* turn on/off mic-mute LED per capture hook by coef bit */ ++static void alc_hp_cap_micmute_update(struct hda_codec *codec) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ if (spec->gen.micmute_led.led_value) ++ alc_update_coef_idx(codec, spec->mic_led_coef_idx, ++ spec->mic_led_coefbit_mask, spec->mic_led_coefbit_on); ++ else ++ alc_update_coef_idx(codec, spec->mic_led_coef_idx, ++ spec->mic_led_coefbit_mask, spec->mic_led_coefbit_off); ++} ++ ++static void alc285_fixup_hp_coef_micmute_led(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ spec->mic_led_coef_idx = 0x19; ++ spec->mic_led_coefbit_mask = 1<<13; ++ spec->mic_led_coefbit_on = 1<<13; ++ spec->mic_led_coefbit_off = 0; ++ snd_hda_gen_add_micmute_led(codec, alc_hp_cap_micmute_update); ++ } ++} ++ ++static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ spec->mic_led_coef_idx = 0x35; ++ spec->mic_led_coefbit_mask = 3<<2; ++ spec->mic_led_coefbit_on = 2<<2; ++ spec->mic_led_coefbit_off = 1<<2; ++ snd_hda_gen_add_micmute_led(codec, alc_hp_cap_micmute_update); ++ } ++} ++ ++static void alc285_fixup_hp_mute_led(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ alc285_fixup_hp_mute_led_coefbit(codec, fix, action); ++ alc285_fixup_hp_coef_micmute_led(codec, fix, action); ++} ++ ++static void alc236_fixup_hp_mute_led(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ alc236_fixup_hp_mute_led_coefbit(codec, fix, action); ++ alc236_fixup_hp_coef_micmute_led(codec, fix, action); ++} ++ + #if IS_REACHABLE(CONFIG_INPUT) + static void gpio2_mic_hotkey_event(struct hda_codec *codec, + struct hda_jack_callback *event) +@@ -5371,7 +5488,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, + * the speaker output becomes too low by some reason on Thinkpads with + * ALC298 codec + */ +- static hda_nid_t preferred_pairs[] = { ++ static const hda_nid_t preferred_pairs[] = { + 0x14, 0x03, 0x17, 0x02, 0x21, 0x02, + 0 + }; +@@ -5632,9 +5749,9 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec, + /* DAC node 0x03 is giving mono output. We therefore want to + make sure 0x14 (front speaker) and 0x15 (headphones) use the + stereo DAC, while leaving 0x17 (bass speaker) for node 0x03. */ +- hda_nid_t conn1[2] = { 0x0c }; +- snd_hda_override_conn_list(codec, 0x14, 1, conn1); +- snd_hda_override_conn_list(codec, 0x15, 1, conn1); ++ static const hda_nid_t conn1[] = { 0x0c }; ++ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); ++ snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn1), conn1); + } + } + +@@ -5649,8 +5766,8 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec, + Pin Complex), since Node 0x02 has Amp-out caps, we can adjust + speaker's volume now. */ + +- hda_nid_t conn1[1] = { 0x0c }; +- snd_hda_override_conn_list(codec, 0x17, 1, conn1); ++ static const hda_nid_t conn1[] = { 0x0c }; ++ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn1), conn1); + } + } + +@@ -5659,8 +5776,8 @@ static void alc295_fixup_disable_dac3(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { + if (action == HDA_FIXUP_ACT_PRE_PROBE) { +- hda_nid_t conn[2] = { 0x02, 0x03 }; +- snd_hda_override_conn_list(codec, 0x17, 2, conn); ++ static const hda_nid_t conn[] = { 0x02, 0x03 }; ++ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + } + } + +@@ -5669,8 +5786,8 @@ static void alc285_fixup_speaker2_to_dac1(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { + if (action == HDA_FIXUP_ACT_PRE_PROBE) { +- hda_nid_t conn[1] = { 0x02 }; +- snd_hda_override_conn_list(codec, 0x17, 1, conn); ++ static const hda_nid_t conn[] = { 0x02 }; ++ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + } + } + +@@ -5757,7 +5874,7 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { + struct alc_spec *spec = codec->spec; +- static hda_nid_t preferred_pairs[] = { ++ static const hda_nid_t preferred_pairs[] = { + 0x21, 0x03, 0x1b, 0x03, 0x16, 0x02, + 0 + }; +@@ -5980,6 +6097,10 @@ enum { + ALC294_FIXUP_ASUS_HPE, + ALC294_FIXUP_ASUS_COEF_1B, + ALC285_FIXUP_HP_GPIO_LED, ++ ALC285_FIXUP_HP_MUTE_LED, ++ ALC236_FIXUP_HP_MUTE_LED, ++ ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ++ ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -7128,6 +7249,30 @@ static const struct hda_fixup alc269_fixups[] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_gpio_led, + }, ++ [ALC285_FIXUP_HP_MUTE_LED] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc285_fixup_hp_mute_led, ++ }, ++ [ALC236_FIXUP_HP_MUTE_LED] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc236_fixup_hp_mute_led, ++ }, ++ [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = { ++ .type = HDA_FIXUP_VERBS, ++ .v.verbs = (const struct hda_verb[]) { ++ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc5 }, ++ { } ++ }, ++ }, ++ [ALC295_FIXUP_ASUS_MIC_NO_PRESENCE] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ ++ { } ++ }, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_HEADSET_MODE ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -7273,6 +7418,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED), ++ SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), ++ SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), +@@ -7293,6 +7440,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), ++ SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), + SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B), +@@ -7321,6 +7469,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), + SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), + SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), ++ SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), ++ SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), +@@ -7937,6 +8087,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x12, 0x90a60130}, + {0x17, 0x90170110}, + {0x21, 0x03211020}), ++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, ++ {0x12, 0x90a60120}, ++ {0x17, 0x90170110}, ++ {0x21, 0x04211030}), ++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, ++ {0x12, 0x90a60130}, ++ {0x17, 0x90170110}, ++ {0x21, 0x03211020}), ++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, ++ {0x12, 0x90a60130}, ++ {0x17, 0x90170110}, ++ {0x21, 0x03211020}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, + {0x14, 0x90170110}, + {0x21, 0x04211020}), +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 894f3f509e76..4b9300babc7d 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -795,7 +795,7 @@ static int find_mute_led_cfg(struct hda_codec *codec, int default_polarity) + static bool has_builtin_speaker(struct hda_codec *codec) + { + struct sigmatel_spec *spec = codec->spec; +- hda_nid_t *nid_pin; ++ const hda_nid_t *nid_pin; + int nids, i; + + if (spec->gen.autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) { +@@ -2182,7 +2182,7 @@ static void hp_envy_ts_fixup_dac_bind(struct hda_codec *codec, + int action) + { + struct sigmatel_spec *spec = codec->spec; +- static hda_nid_t preferred_pairs[] = { ++ static const hda_nid_t preferred_pairs[] = { + 0xd, 0x13, + 0 + }; +diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c +index 29dcdb8b36db..7ef8f3105cdb 100644 +--- a/sound/pci/hda/patch_via.c ++++ b/sound/pci/hda/patch_via.c +@@ -396,7 +396,7 @@ static int via_resume(struct hda_codec *codec) + /* some delay here to make jack detection working (bko#98921) */ + msleep(10); + codec->patch_ops.init(codec); +- regcache_sync(codec->core.regmap); ++ snd_hda_regmap_sync(codec); + return 0; + } + #endif +@@ -1038,8 +1038,8 @@ static const struct snd_pci_quirk vt2002p_fixups[] = { + */ + static void fix_vt1802_connections(struct hda_codec *codec) + { +- static hda_nid_t conn_24[] = { 0x14, 0x1c }; +- static hda_nid_t conn_33[] = { 0x1c }; ++ static const hda_nid_t conn_24[] = { 0x14, 0x1c }; ++ static const hda_nid_t conn_33[] = { 0x1c }; + + snd_hda_override_conn_list(codec, 0x24, ARRAY_SIZE(conn_24), conn_24); + snd_hda_override_conn_list(codec, 0x33, ARRAY_SIZE(conn_33), conn_33); +diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c +index 4b0dea7f7669..2654eebd5663 100644 +--- a/sound/pci/ice1712/ice1712.c ++++ b/sound/pci/ice1712/ice1712.c +@@ -2360,7 +2360,8 @@ static int snd_ice1712_chip_init(struct snd_ice1712 *ice) + pci_write_config_byte(ice->pci, 0x61, ice->eeprom.data[ICE_EEP1_ACLINK]); + pci_write_config_byte(ice->pci, 0x62, ice->eeprom.data[ICE_EEP1_I2SID]); + pci_write_config_byte(ice->pci, 0x63, ice->eeprom.data[ICE_EEP1_SPDIF]); +- if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24) { ++ if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24 && ++ ice->eeprom.subvendor != ICE1712_SUBDEVICE_STAUDIO_ADCIII) { + ice->gpio.write_mask = ice->eeprom.gpiomask; + ice->gpio.direction = ice->eeprom.gpiodir; + snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, +diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest +index 144308a757b7..19e9236dec5e 100755 +--- a/tools/testing/selftests/ftrace/ftracetest ++++ b/tools/testing/selftests/ftrace/ftracetest +@@ -17,6 +17,7 @@ echo " -v|--verbose Increase verbosity of test messages" + echo " -vv Alias of -v -v (Show all results in stdout)" + echo " -vvv Alias of -v -v -v (Show all commands immediately)" + echo " --fail-unsupported Treat UNSUPPORTED as a failure" ++echo " --fail-unresolved Treat UNRESOLVED as a failure" + echo " -d|--debug Debug mode (trace all shell commands)" + echo " -l|--logdir Save logs on the " + echo " If is -, all logs output in console only" +@@ -112,6 +113,10 @@ parse_opts() { # opts + UNSUPPORTED_RESULT=1 + shift 1 + ;; ++ --fail-unresolved) ++ UNRESOLVED_RESULT=1 ++ shift 1 ++ ;; + --logdir|-l) + LOG_DIR=$2 + shift 2 +@@ -176,6 +181,7 @@ KEEP_LOG=0 + DEBUG=0 + VERBOSE=0 + UNSUPPORTED_RESULT=0 ++UNRESOLVED_RESULT=0 + STOP_FAILURE=0 + # Parse command-line options + parse_opts $* +@@ -280,7 +286,7 @@ eval_result() { # sigval + $UNRESOLVED) + prlog " [${color_blue}UNRESOLVED${color_reset}]" + UNRESOLVED_CASES="$UNRESOLVED_CASES $CASENO" +- return 1 # this is a kind of bug.. something happened. ++ return $UNRESOLVED_RESULT # depends on use case + ;; + $UNTESTED) + prlog " [${color_blue}UNTESTED${color_reset}]" +diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h +index 4912d23844bc..e31ac9c5ead0 100644 +--- a/tools/testing/selftests/kvm/include/evmcs.h ++++ b/tools/testing/selftests/kvm/include/evmcs.h +@@ -217,8 +217,8 @@ struct hv_enlightened_vmcs { + #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \ + (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) + +-struct hv_enlightened_vmcs *current_evmcs; +-struct hv_vp_assist_page *current_vp_assist; ++extern struct hv_enlightened_vmcs *current_evmcs; ++extern struct hv_vp_assist_page *current_vp_assist; + + int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id); + +diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c +index f6ec97b7eaef..8cc4a59ff369 100644 +--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c ++++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c +@@ -17,6 +17,9 @@ + + bool enable_evmcs; + ++struct hv_enlightened_vmcs *current_evmcs; ++struct hv_vp_assist_page *current_vp_assist; ++ + struct eptPageTableEntry { + uint64_t readable:1; + uint64_t writable:1; diff --git a/patch/kernel/odroidxu4-current/patch-5.4.43-44.patch b/patch/kernel/odroidxu4-current/patch-5.4.43-44.patch new file mode 100644 index 000000000..48842cdd2 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.43-44.patch @@ -0,0 +1,4629 @@ +diff --git a/Makefile b/Makefile +index 7d7cf0082443..ef4697fcb8ea 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 43 ++SUBLEVEL = 44 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S +index fc7ed03d8b93..51b078604978 100644 +--- a/arch/arm/boot/compressed/vmlinux.lds.S ++++ b/arch/arm/boot/compressed/vmlinux.lds.S +@@ -43,7 +43,7 @@ SECTIONS + } + .table : ALIGN(4) { + _table_start = .; +- LONG(ZIMAGE_MAGIC(2)) ++ LONG(ZIMAGE_MAGIC(4)) + LONG(ZIMAGE_MAGIC(0x5a534c4b)) + LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start)) + LONG(ZIMAGE_MAGIC(_kernel_bss_size)) +diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi +index e4d49731287f..e35398cc60a0 100644 +--- a/arch/arm/boot/dts/bcm-hr2.dtsi ++++ b/arch/arm/boot/dts/bcm-hr2.dtsi +@@ -75,7 +75,7 @@ + timer@20200 { + compatible = "arm,cortex-a9-global-timer"; + reg = <0x20200 0x100>; +- interrupts = ; ++ interrupts = ; + clocks = <&periph_clk>; + }; + +@@ -83,7 +83,7 @@ + compatible = "arm,cortex-a9-twd-timer"; + reg = <0x20600 0x20>; + interrupts = ; ++ IRQ_TYPE_EDGE_RISING)>; + clocks = <&periph_clk>; + }; + +@@ -91,7 +91,7 @@ + compatible = "arm,cortex-a9-twd-wdt"; + reg = <0x20620 0x20>; + interrupts = ; ++ IRQ_TYPE_EDGE_RISING)>; + clocks = <&periph_clk>; + }; + +diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts +index 4c3f606e5b8d..f65448c01e31 100644 +--- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts ++++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts +@@ -24,7 +24,7 @@ + + leds { + act { +- gpios = <&gpio 47 GPIO_ACTIVE_HIGH>; ++ gpios = <&gpio 47 GPIO_ACTIVE_LOW>; + }; + }; + +diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts +index 95b8f2d71821..fb0980190aa0 100644 +--- a/arch/arm/boot/dts/imx6q-b450v3.dts ++++ b/arch/arm/boot/dts/imx6q-b450v3.dts +@@ -65,13 +65,6 @@ + }; + }; + +-&clks { +- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, +- <&clks IMX6QDL_CLK_LDB_DI1_SEL>; +- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, +- <&clks IMX6QDL_CLK_PLL3_USB_OTG>; +-}; +- + &ldb { + status = "okay"; + +diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts +index 611cb7ae7e55..8f762d9c5ae9 100644 +--- a/arch/arm/boot/dts/imx6q-b650v3.dts ++++ b/arch/arm/boot/dts/imx6q-b650v3.dts +@@ -65,13 +65,6 @@ + }; + }; + +-&clks { +- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, +- <&clks IMX6QDL_CLK_LDB_DI1_SEL>; +- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, +- <&clks IMX6QDL_CLK_PLL3_USB_OTG>; +-}; +- + &ldb { + status = "okay"; + +diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts +index e4cb118f88c6..1ea64ecf4291 100644 +--- a/arch/arm/boot/dts/imx6q-b850v3.dts ++++ b/arch/arm/boot/dts/imx6q-b850v3.dts +@@ -53,17 +53,6 @@ + }; + }; + +-&clks { +- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, +- <&clks IMX6QDL_CLK_LDB_DI1_SEL>, +- <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, +- <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>; +- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, +- <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, +- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>, +- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>; +-}; +- + &ldb { + fsl,dual-channel; + status = "okay"; +diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi +index fa27dcdf06f1..1938b04199c4 100644 +--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi ++++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi +@@ -377,3 +377,18 @@ + #interrupt-cells = <1>; + }; + }; ++ ++&clks { ++ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, ++ <&clks IMX6QDL_CLK_LDB_DI1_SEL>, ++ <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, ++ <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>, ++ <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>, ++ <&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>; ++ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, ++ <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>; ++}; +diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi +index c776321b2cc4..d282a7b638d8 100644 +--- a/arch/arm/boot/dts/rk3036.dtsi ++++ b/arch/arm/boot/dts/rk3036.dtsi +@@ -128,7 +128,7 @@ + assigned-clocks = <&cru SCLK_GPU>; + assigned-clock-rates = <100000000>; + clocks = <&cru SCLK_GPU>, <&cru SCLK_GPU>; +- clock-names = "core", "bus"; ++ clock-names = "bus", "core"; + resets = <&cru SRST_GPU>; + status = "disabled"; + }; +diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts +index 5670b33fd1bd..aed879db6c15 100644 +--- a/arch/arm/boot/dts/rk3228-evb.dts ++++ b/arch/arm/boot/dts/rk3228-evb.dts +@@ -46,7 +46,7 @@ + #address-cells = <1>; + #size-cells = <0>; + +- phy: phy@0 { ++ phy: ethernet-phy@0 { + compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22"; + reg = <0>; + clocks = <&cru SCLK_MAC_PHY>; +diff --git a/arch/arm/boot/dts/rk3229-xms6.dts b/arch/arm/boot/dts/rk3229-xms6.dts +index 679fc2b00e5a..933ef69da32a 100644 +--- a/arch/arm/boot/dts/rk3229-xms6.dts ++++ b/arch/arm/boot/dts/rk3229-xms6.dts +@@ -150,7 +150,7 @@ + #address-cells = <1>; + #size-cells = <0>; + +- phy: phy@0 { ++ phy: ethernet-phy@0 { + compatible = "ethernet-phy-id1234.d400", + "ethernet-phy-ieee802.3-c22"; + reg = <0>; +diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi +index 340ed6ccb08f..6bb78b19c555 100644 +--- a/arch/arm/boot/dts/rk322x.dtsi ++++ b/arch/arm/boot/dts/rk322x.dtsi +@@ -561,7 +561,7 @@ + "pp1", + "ppmmu1"; + clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>; +- clock-names = "core", "bus"; ++ clock-names = "bus", "core"; + resets = <&cru SRST_GPU_A>; + status = "disabled"; + }; +@@ -1033,7 +1033,7 @@ + }; + }; + +- spi-0 { ++ spi0 { + spi0_clk: spi0-clk { + rockchip,pins = <0 RK_PB1 2 &pcfg_pull_up>; + }; +@@ -1051,7 +1051,7 @@ + }; + }; + +- spi-1 { ++ spi1 { + spi1_clk: spi1-clk { + rockchip,pins = <0 RK_PC7 2 &pcfg_pull_up>; + }; +diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi +index 97307a405e60..bce0b05ef7bf 100644 +--- a/arch/arm/boot/dts/rk3xxx.dtsi ++++ b/arch/arm/boot/dts/rk3xxx.dtsi +@@ -84,7 +84,7 @@ + compatible = "arm,mali-400"; + reg = <0x10090000 0x10000>; + clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>; +- clock-names = "core", "bus"; ++ clock-names = "bus", "core"; + assigned-clocks = <&cru ACLK_GPU>; + assigned-clock-rates = <100000000>; + resets = <&cru SRST_GPU>; +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h +index 99929122dad7..3546d294d55f 100644 +--- a/arch/arm/include/asm/assembler.h ++++ b/arch/arm/include/asm/assembler.h +@@ -18,11 +18,11 @@ + #endif + + #include +-#include + #include + #include + #include + #include ++#include + + #define IOMEM(x) (x) + +@@ -446,79 +446,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) + .size \name , . - \name + .endm + +- .macro csdb +-#ifdef CONFIG_THUMB2_KERNEL +- .inst.w 0xf3af8014 +-#else +- .inst 0xe320f014 +-#endif +- .endm +- +- .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +-#ifndef CONFIG_CPU_USE_DOMAINS +- adds \tmp, \addr, #\size - 1 +- sbcscc \tmp, \tmp, \limit +- bcs \bad +-#ifdef CONFIG_CPU_SPECTRE +- movcs \addr, #0 +- csdb +-#endif +-#endif +- .endm +- +- .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req +-#ifdef CONFIG_CPU_SPECTRE +- sub \tmp, \limit, #1 +- subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr +- addhs \tmp, \tmp, #1 @ if (tmp >= 0) { +- subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) } +- movlo \addr, #0 @ if (tmp < 0) addr = NULL +- csdb +-#endif +- .endm +- +- .macro uaccess_disable, tmp, isb=1 +-#ifdef CONFIG_CPU_SW_DOMAIN_PAN +- /* +- * Whenever we re-enter userspace, the domains should always be +- * set appropriately. +- */ +- mov \tmp, #DACR_UACCESS_DISABLE +- mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register +- .if \isb +- instr_sync +- .endif +-#endif +- .endm +- +- .macro uaccess_enable, tmp, isb=1 +-#ifdef CONFIG_CPU_SW_DOMAIN_PAN +- /* +- * Whenever we re-enter userspace, the domains should always be +- * set appropriately. +- */ +- mov \tmp, #DACR_UACCESS_ENABLE +- mcr p15, 0, \tmp, c3, c0, 0 +- .if \isb +- instr_sync +- .endif +-#endif +- .endm +- +- .macro uaccess_save, tmp +-#ifdef CONFIG_CPU_SW_DOMAIN_PAN +- mrc p15, 0, \tmp, c3, c0, 0 +- str \tmp, [sp, #SVC_DACR] +-#endif +- .endm +- +- .macro uaccess_restore +-#ifdef CONFIG_CPU_SW_DOMAIN_PAN +- ldr r0, [sp, #SVC_DACR] +- mcr p15, 0, r0, c3, c0, 0 +-#endif +- .endm +- + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo + .macro ret\c, reg + #if __LINUX_ARM_ARCH__ < 6 +diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h +new file mode 100644 +index 000000000000..907571fd05c6 +--- /dev/null ++++ b/arch/arm/include/asm/uaccess-asm.h +@@ -0,0 +1,117 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++ ++#ifndef __ASM_UACCESS_ASM_H__ ++#define __ASM_UACCESS_ASM_H__ ++ ++#include ++#include ++#include ++#include ++ ++ .macro csdb ++#ifdef CONFIG_THUMB2_KERNEL ++ .inst.w 0xf3af8014 ++#else ++ .inst 0xe320f014 ++#endif ++ .endm ++ ++ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req ++#ifndef CONFIG_CPU_USE_DOMAINS ++ adds \tmp, \addr, #\size - 1 ++ sbcscc \tmp, \tmp, \limit ++ bcs \bad ++#ifdef CONFIG_CPU_SPECTRE ++ movcs \addr, #0 ++ csdb ++#endif ++#endif ++ .endm ++ ++ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req ++#ifdef CONFIG_CPU_SPECTRE ++ sub \tmp, \limit, #1 ++ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr ++ addhs \tmp, \tmp, #1 @ if (tmp >= 0) { ++ subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) } ++ movlo \addr, #0 @ if (tmp < 0) addr = NULL ++ csdb ++#endif ++ .endm ++ ++ .macro uaccess_disable, tmp, isb=1 ++#ifdef CONFIG_CPU_SW_DOMAIN_PAN ++ /* ++ * Whenever we re-enter userspace, the domains should always be ++ * set appropriately. ++ */ ++ mov \tmp, #DACR_UACCESS_DISABLE ++ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register ++ .if \isb ++ instr_sync ++ .endif ++#endif ++ .endm ++ ++ .macro uaccess_enable, tmp, isb=1 ++#ifdef CONFIG_CPU_SW_DOMAIN_PAN ++ /* ++ * Whenever we re-enter userspace, the domains should always be ++ * set appropriately. ++ */ ++ mov \tmp, #DACR_UACCESS_ENABLE ++ mcr p15, 0, \tmp, c3, c0, 0 ++ .if \isb ++ instr_sync ++ .endif ++#endif ++ .endm ++ ++#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS) ++#define DACR(x...) x ++#else ++#define DACR(x...) ++#endif ++ ++ /* ++ * Save the address limit on entry to a privileged exception. ++ * ++ * If we are using the DACR for kernel access by the user accessors ++ * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain ++ * back to client mode, whether or not \disable is set. ++ * ++ * If we are using SW PAN, set the DACR user domain to no access ++ * if \disable is set. ++ */ ++ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable ++ ldr \tmp1, [\tsk, #TI_ADDR_LIMIT] ++ mov \tmp2, #TASK_SIZE ++ str \tmp2, [\tsk, #TI_ADDR_LIMIT] ++ DACR( mrc p15, 0, \tmp0, c3, c0, 0) ++ DACR( str \tmp0, [sp, #SVC_DACR]) ++ str \tmp1, [sp, #SVC_ADDR_LIMIT] ++ .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN) ++ /* kernel=client, user=no access */ ++ mov \tmp2, #DACR_UACCESS_DISABLE ++ mcr p15, 0, \tmp2, c3, c0, 0 ++ instr_sync ++ .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS) ++ /* kernel=client */ ++ bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL) ++ orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) ++ mcr p15, 0, \tmp2, c3, c0, 0 ++ instr_sync ++ .endif ++ .endm ++ ++ /* Restore the user access state previously saved by uaccess_entry */ ++ .macro uaccess_exit, tsk, tmp0, tmp1 ++ ldr \tmp1, [sp, #SVC_ADDR_LIMIT] ++ DACR( ldr \tmp0, [sp, #SVC_DACR]) ++ str \tmp1, [\tsk, #TI_ADDR_LIMIT] ++ DACR( mcr p15, 0, \tmp0, c3, c0, 0) ++ .endm ++ ++#undef DACR ++ ++#endif /* __ASM_UACCESS_ASM_H__ */ +diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S +index 858d4e541532..a874b753397e 100644 +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + + #include "entry-header.S" + #include +@@ -179,15 +180,7 @@ ENDPROC(__und_invalid) + stmia r7, {r2 - r6} + + get_thread_info tsk +- ldr r0, [tsk, #TI_ADDR_LIMIT] +- mov r1, #TASK_SIZE +- str r1, [tsk, #TI_ADDR_LIMIT] +- str r0, [sp, #SVC_ADDR_LIMIT] +- +- uaccess_save r0 +- .if \uaccess +- uaccess_disable r0 +- .endif ++ uaccess_entry tsk, r0, r1, r2, \uaccess + + .if \trace + #ifdef CONFIG_TRACE_IRQFLAGS +diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S +index 32051ec5b33f..40db0f9188b6 100644 +--- a/arch/arm/kernel/entry-header.S ++++ b/arch/arm/kernel/entry-header.S +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + #include + + @ Bad Abort numbers +@@ -217,9 +218,7 @@ + blne trace_hardirqs_off + #endif + .endif +- ldr r1, [sp, #SVC_ADDR_LIMIT] +- uaccess_restore +- str r1, [tsk, #TI_ADDR_LIMIT] ++ uaccess_exit tsk, r0, r1 + + #ifndef CONFIG_THUMB2_KERNEL + @ ARM mode SVC restore +@@ -263,9 +262,7 @@ + @ on the stack remains correct). + @ + .macro svc_exit_via_fiq +- ldr r1, [sp, #SVC_ADDR_LIMIT] +- uaccess_restore +- str r1, [tsk, #TI_ADDR_LIMIT] ++ uaccess_exit tsk, r0, r1 + #ifndef CONFIG_THUMB2_KERNEL + @ ARM mode restore + mov r0, sp +diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi +index 15f1842f6df3..5891b7151432 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi +@@ -1397,8 +1397,8 @@ + "venc_lt_sel"; + assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>, + <&topckgen CLK_TOP_VENC_LT_SEL>; +- assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>, +- <&topckgen CLK_TOP_UNIVPLL1_D2>; ++ assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL>, ++ <&topckgen CLK_TOP_VCODECPLL_370P5>; + }; + + vencltsys: clock-controller@19000000 { +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts +index 6abc6f4a86cf..05265b38cc02 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts +@@ -86,7 +86,7 @@ + assigned-clock-rate = <50000000>; + assigned-clocks = <&cru SCLK_MAC2PHY>; + assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>; +- ++ status = "okay"; + }; + + &i2c1 { +diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi +index cd97016b7c18..c5d8d1c58291 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi +@@ -1881,10 +1881,10 @@ + gpu: gpu@ff9a0000 { + compatible = "rockchip,rk3399-mali", "arm,mali-t860"; + reg = <0x0 0xff9a0000 0x0 0x10000>; +- interrupts = , +- , +- ; +- interrupt-names = "gpu", "job", "mmu"; ++ interrupts = , ++ , ++ ; ++ interrupt-names = "job", "mmu", "gpu"; + clocks = <&cru ACLK_GPU>; + power-domains = <&power RK3399_PD_GPU>; + status = "disabled"; +diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h +index 5056ebb902d1..61d94ec7dd16 100644 +--- a/arch/csky/abiv1/inc/abi/entry.h ++++ b/arch/csky/abiv1/inc/abi/entry.h +@@ -167,8 +167,8 @@ + * BA Reserved C D V + */ + cprcr r6, cpcr30 +- lsri r6, 28 +- lsli r6, 28 ++ lsri r6, 29 ++ lsli r6, 29 + addi r6, 0xe + cpwcr r6, cpcr30 + +diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h +index 111973c6c713..9023828ede97 100644 +--- a/arch/csky/abiv2/inc/abi/entry.h ++++ b/arch/csky/abiv2/inc/abi/entry.h +@@ -225,8 +225,8 @@ + */ + mfcr r6, cr<30, 15> /* Get MSA0 */ + 2: +- lsri r6, 28 +- lsli r6, 28 ++ lsri r6, 29 ++ lsli r6, 29 + addi r6, 0x1ce + mtcr r6, cr<30, 15> /* Set MSA0 */ + +diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h +index eaa1c3403a42..60f8a4112588 100644 +--- a/arch/csky/include/asm/uaccess.h ++++ b/arch/csky/include/asm/uaccess.h +@@ -254,7 +254,7 @@ do { \ + + extern int __get_user_bad(void); + +-#define __copy_user(to, from, n) \ ++#define ___copy_to_user(to, from, n) \ + do { \ + int w0, w1, w2, w3; \ + asm volatile( \ +@@ -289,31 +289,34 @@ do { \ + " subi %0, 4 \n" \ + " br 3b \n" \ + "5: cmpnei %0, 0 \n" /* 1B */ \ +- " bf 8f \n" \ ++ " bf 13f \n" \ + " ldb %3, (%2, 0) \n" \ + "6: stb %3, (%1, 0) \n" \ + " addi %2, 1 \n" \ + " addi %1, 1 \n" \ + " subi %0, 1 \n" \ + " br 5b \n" \ +- "7: br 8f \n" \ ++ "7: subi %0, 4 \n" \ ++ "8: subi %0, 4 \n" \ ++ "12: subi %0, 4 \n" \ ++ " br 13f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ +- ".long 2b, 7b \n" \ +- ".long 9b, 7b \n" \ +- ".long 10b, 7b \n" \ ++ ".long 2b, 13f \n" \ ++ ".long 4b, 13f \n" \ ++ ".long 6b, 13f \n" \ ++ ".long 9b, 12b \n" \ ++ ".long 10b, 8b \n" \ + ".long 11b, 7b \n" \ +- ".long 4b, 7b \n" \ +- ".long 6b, 7b \n" \ + ".previous \n" \ +- "8: \n" \ ++ "13: \n" \ + : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ + "=r"(w1), "=r"(w2), "=r"(w3) \ + : "0"(n), "1"(to), "2"(from) \ + : "memory"); \ + } while (0) + +-#define __copy_user_zeroing(to, from, n) \ ++#define ___copy_from_user(to, from, n) \ + do { \ + int tmp; \ + int nsave; \ +@@ -356,22 +359,22 @@ do { \ + " addi %1, 1 \n" \ + " subi %0, 1 \n" \ + " br 5b \n" \ +- "8: mov %3, %0 \n" \ +- " movi %4, 0 \n" \ +- "9: stb %4, (%1, 0) \n" \ +- " addi %1, 1 \n" \ +- " subi %3, 1 \n" \ +- " cmpnei %3, 0 \n" \ +- " bt 9b \n" \ +- " br 7f \n" \ ++ "8: stw %3, (%1, 0) \n" \ ++ " subi %0, 4 \n" \ ++ " bf 7f \n" \ ++ "9: subi %0, 8 \n" \ ++ " bf 7f \n" \ ++ "13: stw %3, (%1, 8) \n" \ ++ " subi %0, 12 \n" \ ++ " bf 7f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ +- ".long 2b, 8b \n" \ ++ ".long 2b, 7f \n" \ ++ ".long 4b, 7f \n" \ ++ ".long 6b, 7f \n" \ + ".long 10b, 8b \n" \ +- ".long 11b, 8b \n" \ +- ".long 12b, 8b \n" \ +- ".long 4b, 8b \n" \ +- ".long 6b, 8b \n" \ ++ ".long 11b, 9b \n" \ ++ ".long 12b,13b \n" \ + ".previous \n" \ + "7: \n" \ + : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ +diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S +index a7a5b67df898..65c55f22532a 100644 +--- a/arch/csky/kernel/entry.S ++++ b/arch/csky/kernel/entry.S +@@ -318,8 +318,6 @@ ENTRY(__switch_to) + + mfcr a2, psr /* Save PSR value */ + stw a2, (a3, THREAD_SR) /* Save PSR in task struct */ +- bclri a2, 6 /* Disable interrupts */ +- mtcr a2, psr + + SAVE_SWITCH_STACK + +diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c +index e68ff375c8f8..ab55e98ee8f6 100644 +--- a/arch/csky/kernel/perf_callchain.c ++++ b/arch/csky/kernel/perf_callchain.c +@@ -12,12 +12,17 @@ struct stackframe { + + static int unwind_frame_kernel(struct stackframe *frame) + { +- if (kstack_end((void *)frame->fp)) ++ unsigned long low = (unsigned long)task_stack_page(current); ++ unsigned long high = low + THREAD_SIZE; ++ ++ if (unlikely(frame->fp < low || frame->fp > high)) + return -EPERM; +- if (frame->fp & 0x3 || frame->fp < TASK_SIZE) ++ ++ if (kstack_end((void *)frame->fp) || frame->fp & 0x3) + return -EPERM; + + *frame = *(struct stackframe *)frame->fp; ++ + if (__kernel_text_address(frame->lr)) { + int graph = 0; + +diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c +index 647a23986fb5..3c9bd645e643 100644 +--- a/arch/csky/lib/usercopy.c ++++ b/arch/csky/lib/usercopy.c +@@ -7,10 +7,7 @@ + unsigned long raw_copy_from_user(void *to, const void *from, + unsigned long n) + { +- if (access_ok(from, n)) +- __copy_user_zeroing(to, from, n); +- else +- memset(to, 0, n); ++ ___copy_from_user(to, from, n); + return n; + } + EXPORT_SYMBOL(raw_copy_from_user); +@@ -18,8 +15,7 @@ EXPORT_SYMBOL(raw_copy_from_user); + unsigned long raw_copy_to_user(void *to, const void *from, + unsigned long n) + { +- if (access_ok(to, n)) +- __copy_user(to, from, n); ++ ___copy_to_user(to, from, n); + return n; + } + EXPORT_SYMBOL(raw_copy_to_user); +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c +index ddca8287d43b..3e54484797f6 100644 +--- a/arch/parisc/mm/init.c ++++ b/arch/parisc/mm/init.c +@@ -588,7 +588,7 @@ void __init mem_init(void) + > BITS_PER_LONG); + + high_memory = __va((max_pfn << PAGE_SHIFT)); +- set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); ++ set_max_mapnr(max_low_pfn); + memblock_free_all(); + + #ifdef CONFIG_PA11 +diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c +index 0940681d2f68..19e46f4160cc 100644 +--- a/arch/riscv/kernel/stacktrace.c ++++ b/arch/riscv/kernel/stacktrace.c +@@ -63,7 +63,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, + + #else /* !CONFIG_FRAME_POINTER */ + +-static void notrace walk_stackframe(struct task_struct *task, ++void notrace walk_stackframe(struct task_struct *task, + struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) + { + unsigned long sp, pc; +diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h +index 00f7cf45e699..8e95aa4b0d17 100644 +--- a/arch/x86/include/asm/dma.h ++++ b/arch/x86/include/asm/dma.h +@@ -74,7 +74,7 @@ + #define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT) + + /* 4GB broken PCI/AGP hardware bus master zone */ +-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) ++#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) + + #ifdef CONFIG_X86_32 + /* The maximum address that we can perform a DMA transfer to on this platform */ +diff --git a/arch/x86/include/uapi/asm/unistd.h b/arch/x86/include/uapi/asm/unistd.h +index 196fdd02b8b1..be5e2e747f50 100644 +--- a/arch/x86/include/uapi/asm/unistd.h ++++ b/arch/x86/include/uapi/asm/unistd.h +@@ -2,8 +2,15 @@ + #ifndef _UAPI_ASM_X86_UNISTD_H + #define _UAPI_ASM_X86_UNISTD_H + +-/* x32 syscall flag bit */ +-#define __X32_SYSCALL_BIT 0x40000000UL ++/* ++ * x32 syscall flag bit. Some user programs expect syscall NR macros ++ * and __X32_SYSCALL_BIT to have type int, even though syscall numbers ++ * are, for practical purposes, unsigned long. ++ * ++ * Fortunately, expressions like (nr & ~__X32_SYSCALL_BIT) do the right ++ * thing regardless. ++ */ ++#define __X32_SYSCALL_BIT 0x40000000 + + #ifndef __KERNEL__ + # ifdef __i386__ +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c +index e5cb67d67c03..c94fec268ef2 100644 +--- a/arch/x86/kernel/fpu/xstate.c ++++ b/arch/x86/kernel/fpu/xstate.c +@@ -952,18 +952,31 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures) + return true; + } + +-/* +- * This is similar to user_regset_copyout(), but will not add offset to +- * the source data pointer or increment pos, count, kbuf, and ubuf. +- */ +-static inline void +-__copy_xstate_to_kernel(void *kbuf, const void *data, +- unsigned int offset, unsigned int size, unsigned int size_total) ++static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count) + { +- if (offset < size_total) { +- unsigned int copy = min(size, size_total - offset); ++ if (*pos < to) { ++ unsigned size = to - *pos; ++ ++ if (size > *count) ++ size = *count; ++ memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size); ++ *kbuf += size; ++ *pos += size; ++ *count -= size; ++ } ++} + +- memcpy(kbuf + offset, data, copy); ++static void copy_part(unsigned offset, unsigned size, void *from, ++ void **kbuf, unsigned *pos, unsigned *count) ++{ ++ fill_gap(offset, kbuf, pos, count); ++ if (size > *count) ++ size = *count; ++ if (size) { ++ memcpy(*kbuf, from, size); ++ *kbuf += size; ++ *pos += size; ++ *count -= size; + } + } + +@@ -976,8 +989,9 @@ __copy_xstate_to_kernel(void *kbuf, const void *data, + */ + int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) + { +- unsigned int offset, size; + struct xstate_header header; ++ const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr); ++ unsigned count = size_total; + int i; + + /* +@@ -993,46 +1007,42 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of + header.xfeatures = xsave->header.xfeatures; + header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR; + ++ if (header.xfeatures & XFEATURE_MASK_FP) ++ copy_part(0, off_mxcsr, ++ &xsave->i387, &kbuf, &offset_start, &count); ++ if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)) ++ copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE, ++ &xsave->i387.mxcsr, &kbuf, &offset_start, &count); ++ if (header.xfeatures & XFEATURE_MASK_FP) ++ copy_part(offsetof(struct fxregs_state, st_space), 128, ++ &xsave->i387.st_space, &kbuf, &offset_start, &count); ++ if (header.xfeatures & XFEATURE_MASK_SSE) ++ copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256, ++ &xsave->i387.xmm_space, &kbuf, &offset_start, &count); ++ /* ++ * Fill xsave->i387.sw_reserved value for ptrace frame: ++ */ ++ copy_part(offsetof(struct fxregs_state, sw_reserved), 48, ++ xstate_fx_sw_bytes, &kbuf, &offset_start, &count); + /* + * Copy xregs_state->header: + */ +- offset = offsetof(struct xregs_state, header); +- size = sizeof(header); +- +- __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total); ++ copy_part(offsetof(struct xregs_state, header), sizeof(header), ++ &header, &kbuf, &offset_start, &count); + +- for (i = 0; i < XFEATURE_MAX; i++) { ++ for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { + /* + * Copy only in-use xstates: + */ + if ((header.xfeatures >> i) & 1) { + void *src = __raw_xsave_addr(xsave, i); + +- offset = xstate_offsets[i]; +- size = xstate_sizes[i]; +- +- /* The next component has to fit fully into the output buffer: */ +- if (offset + size > size_total) +- break; +- +- __copy_xstate_to_kernel(kbuf, src, offset, size, size_total); ++ copy_part(xstate_offsets[i], xstate_sizes[i], ++ src, &kbuf, &offset_start, &count); + } + + } +- +- if (xfeatures_mxcsr_quirk(header.xfeatures)) { +- offset = offsetof(struct fxregs_state, mxcsr); +- size = MXCSR_AND_FLAGS_SIZE; +- __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total); +- } +- +- /* +- * Fill xsave->i387.sw_reserved value for ptrace frame: +- */ +- offset = offsetof(struct fxregs_state, sw_reserved); +- size = sizeof(xstate_fx_sw_bytes); +- +- __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total); ++ fill_gap(size_total, &kbuf, &offset_start, &count); + + return 0; + } +diff --git a/block/blk-core.c b/block/blk-core.c +index 1075aaff606d..d5e668ec751b 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -886,14 +886,11 @@ generic_make_request_checks(struct bio *bio) + } + + /* +- * Non-mq queues do not honor REQ_NOWAIT, so complete a bio +- * with BLK_STS_AGAIN status in order to catch -EAGAIN and +- * to give a chance to the caller to repeat request gracefully. ++ * For a REQ_NOWAIT based request, return -EOPNOTSUPP ++ * if queue is not a request based queue. + */ +- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) { +- status = BLK_STS_AGAIN; +- goto end_io; +- } ++ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) ++ goto not_supported; + + if (should_fail_bio(bio)) + goto end_io; +diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c +index 20877214acff..e3959ff5cb55 100644 +--- a/drivers/clk/qcom/gcc-sm8150.c ++++ b/drivers/clk/qcom/gcc-sm8150.c +@@ -75,8 +75,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = { + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll0_out_even", + .parent_data = &(const struct clk_parent_data){ +- .fw_name = "bi_tcxo", +- .name = "bi_tcxo", ++ .hw = &gpll0.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_trion_pll_postdiv_ops, +diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c +index a360d3109555..73f567d8022f 100644 +--- a/drivers/clk/ti/clk-33xx.c ++++ b/drivers/clk/ti/clk-33xx.c +@@ -212,7 +212,7 @@ static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = { + }; + + static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = { +- { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, ++ { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk-24mhz-clkctrl:0000:0" }, + { 0 }, + }; + +diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c +index 98bc5a4cd5e7..ce1f1d5d7cd5 100644 +--- a/drivers/crypto/chelsio/chtls/chtls_io.c ++++ b/drivers/crypto/chelsio/chtls/chtls_io.c +@@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp) + make_tx_data_wr(sk, skb, immdlen, len, + credits_needed, completion); + tp->snd_nxt += len; +- tp->lsndtime = tcp_time_stamp(tp); ++ tp->lsndtime = tcp_jiffies32; + if (completion) + ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; + } else { +diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c +index 9fa6d3a967d2..100575973e1f 100644 +--- a/drivers/gpio/gpio-bcm-kona.c ++++ b/drivers/gpio/gpio-bcm-kona.c +@@ -619,7 +619,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev) + + kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(kona_gpio->reg_base)) { +- ret = -ENXIO; ++ ret = PTR_ERR(kona_gpio->reg_base); + goto err_irq_domain; + } + +diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c +index fae327d5b06e..6890d32d9f25 100644 +--- a/drivers/gpio/gpio-exar.c ++++ b/drivers/gpio/gpio-exar.c +@@ -145,8 +145,10 @@ static int gpio_exar_probe(struct platform_device *pdev) + mutex_init(&exar_gpio->lock); + + index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); +- if (index < 0) +- goto err_destroy; ++ if (index < 0) { ++ ret = index; ++ goto err_mutex_destroy; ++ } + + sprintf(exar_gpio->name, "exar_gpio%d", index); + exar_gpio->gpio_chip.label = exar_gpio->name; +@@ -173,6 +175,7 @@ static int gpio_exar_probe(struct platform_device *pdev) + + err_destroy: + ida_simple_remove(&ida_index, index); ++err_mutex_destroy: + mutex_destroy(&exar_gpio->lock); + return ret; + } +diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c +index 9888b62f37af..432c487f77b4 100644 +--- a/drivers/gpio/gpio-pxa.c ++++ b/drivers/gpio/gpio-pxa.c +@@ -663,8 +663,8 @@ static int pxa_gpio_probe(struct platform_device *pdev) + pchip->irq1 = irq1; + + gpio_reg_base = devm_platform_ioremap_resource(pdev, 0); +- if (!gpio_reg_base) +- return -EINVAL; ++ if (IS_ERR(gpio_reg_base)) ++ return PTR_ERR(gpio_reg_base); + + clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { +diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c +index 8a01d3694b28..cecde5440a39 100644 +--- a/drivers/gpio/gpio-tegra.c ++++ b/drivers/gpio/gpio-tegra.c +@@ -365,6 +365,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d) + struct tegra_gpio_info *tgi = bank->tgi; + unsigned int gpio = d->hwirq; + ++ tegra_gpio_irq_mask(d); + gpiochip_unlock_as_irq(&tgi->gc, gpio); + } + +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index a8cf55eb54d8..abdf448b11a3 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -3894,7 +3894,9 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) + } + } + +- if (test_bit(FLAG_IS_OUT, &desc->flags)) { ++ /* To be valid for IRQ the line needs to be input or open drain */ ++ if (test_bit(FLAG_IS_OUT, &desc->flags) && ++ !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { + chip_err(chip, + "%s: tried to flag a GPIO set as output for IRQ\n", + __func__); +@@ -3957,7 +3959,12 @@ void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset) + + if (!IS_ERR(desc) && + !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) { +- WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags)); ++ /* ++ * We must not be output when using IRQ UNLESS we are ++ * open drain. ++ */ ++ WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) && ++ !test_bit(FLAG_OPEN_DRAIN, &desc->flags)); + set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags); + } + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +index 6d021ecc8d59..edb561baf8b9 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +@@ -1288,7 +1288,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( + } + + /* Free the BO*/ +- amdgpu_bo_unref(&mem->bo); ++ drm_gem_object_put_unlocked(&mem->bo->tbo.base); + mutex_destroy(&mem->lock); + kfree(mem); + +@@ -1630,7 +1630,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, + AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | + AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC; + +- (*mem)->bo = amdgpu_bo_ref(bo); ++ drm_gem_object_get(&bo->tbo.base); ++ (*mem)->bo = bo; + (*mem)->va = va; + (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? + AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +index 14417cebe38b..6f118292e40f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +@@ -4290,11 +4290,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, + switch (adev->asic_type) { + case CHIP_NAVI10: + case CHIP_NAVI14: +- if (!enable) { +- amdgpu_gfx_off_ctrl(adev, false); +- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); +- } else +- amdgpu_gfx_off_ctrl(adev, true); ++ amdgpu_gfx_off_ctrl(adev, enable); + break; + default: + break; +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +index c34ddaa65324..6004fdacc866 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +@@ -4839,10 +4839,9 @@ static int gfx_v9_0_set_powergating_state(void *handle, + switch (adev->asic_type) { + case CHIP_RAVEN: + case CHIP_RENOIR: +- if (!enable) { ++ if (!enable) + amdgpu_gfx_off_ctrl(adev, false); +- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); +- } ++ + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { + gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); + gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); +@@ -4868,12 +4867,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, + amdgpu_gfx_off_ctrl(adev, true); + break; + case CHIP_VEGA12: +- if (!enable) { +- amdgpu_gfx_off_ctrl(adev, false); +- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); +- } else { +- amdgpu_gfx_off_ctrl(adev, true); +- } ++ amdgpu_gfx_off_ctrl(adev, enable); + break; + default: + break; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 9f30343262f3..9fd12e108a70 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -6951,13 +6951,6 @@ static int dm_update_plane_state(struct dc *dc, + return -EINVAL; + } + +- if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width || +- new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) { +- DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n", +- new_plane_state->crtc_x, new_plane_state->crtc_y); +- return -EINVAL; +- } +- + return 0; + } + +diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +index 8bb5fbef7de0..9eb3a0dcd1f2 100644 +--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c ++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +@@ -320,12 +320,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, + if (*level & profile_mode_mask) { + hwmgr->saved_dpm_level = hwmgr->dpm_level; + hwmgr->en_umd_pstate = true; +- amdgpu_device_ip_set_clockgating_state(hwmgr->adev, +- AMD_IP_BLOCK_TYPE_GFX, +- AMD_CG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); ++ amdgpu_device_ip_set_clockgating_state(hwmgr->adev, ++ AMD_IP_BLOCK_TYPE_GFX, ++ AMD_CG_STATE_UNGATE); + } + } else { + /* exit umd pstate, restore level, enable gfx cg*/ +diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +index a066e9297777..b51a124e505a 100644 +--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c ++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +@@ -1541,12 +1541,12 @@ static int smu_enable_umd_pstate(void *handle, + if (*level & profile_mode_mask) { + smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; + smu_dpm_ctx->enable_umd_pstate = true; +- amdgpu_device_ip_set_clockgating_state(smu->adev, +- AMD_IP_BLOCK_TYPE_GFX, +- AMD_CG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(smu->adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); ++ amdgpu_device_ip_set_clockgating_state(smu->adev, ++ AMD_IP_BLOCK_TYPE_GFX, ++ AMD_CG_STATE_UNGATE); + } + } else { + /* exit umd pstate, restore level, enable gfx cg*/ +diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c +index 9e95f6fd5406..376fca6ca9f4 100644 +--- a/drivers/gpu/drm/ingenic/ingenic-drm.c ++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c +@@ -467,7 +467,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, + + static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg) + { +- struct ingenic_drm *priv = arg; ++ struct ingenic_drm *priv = drm_device_get_priv(arg); + unsigned int state; + + regmap_read(priv->map, JZ_REG_LCD_STATE, &state); +diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c +index 281c81edabc6..dfb122b5e1b7 100644 +--- a/drivers/hwmon/nct7904.c ++++ b/drivers/hwmon/nct7904.c +@@ -356,6 +356,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel, + struct nct7904_data *data = dev_get_drvdata(dev); + int ret, temp; + unsigned int reg1, reg2, reg3; ++ s8 temps; + + switch (attr) { + case hwmon_temp_input: +@@ -461,7 +462,8 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel, + + if (ret < 0) + return ret; +- *val = ret * 1000; ++ temps = ret; ++ *val = temps * 1000; + return 0; + } + +diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c +index 1c95fefa1f06..d0580eed3bcb 100644 +--- a/drivers/infiniband/core/rdma_core.c ++++ b/drivers/infiniband/core/rdma_core.c +@@ -160,9 +160,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj, + uobj->context = NULL; + + /* +- * For DESTROY the usecnt is held write locked, the caller is expected +- * to put it unlock and put the object when done with it. Only DESTROY +- * can remove the IDR handle. ++ * For DESTROY the usecnt is not changed, the caller is expected to ++ * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR ++ * handle. + */ + if (reason != RDMA_REMOVE_DESTROY) + atomic_set(&uobj->usecnt, 0); +@@ -194,7 +194,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj, + /* + * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY + * sequence. It should only be used from command callbacks. On success the +- * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This ++ * caller must pair this with uobj_put_destroy(). This + * version requires the caller to have already obtained an + * LOOKUP_DESTROY uobject kref. + */ +@@ -205,6 +205,13 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) + + down_read(&ufile->hw_destroy_rwsem); + ++ /* ++ * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left ++ * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY. ++ * This is because any other concurrent thread can still see the object ++ * in the xarray due to RCU. Leaving it locked ensures nothing else will ++ * touch it. ++ */ + ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE); + if (ret) + goto out_unlock; +@@ -223,7 +230,7 @@ out_unlock: + /* + * uobj_get_destroy destroys the HW object and returns a handle to the uobj + * with a NULL object pointer. The caller must pair this with +- * uverbs_put_destroy. ++ * uobj_put_destroy(). + */ + struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, + u32 id, struct uverbs_attr_bundle *attrs) +@@ -257,8 +264,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, + uobj = __uobj_get_destroy(obj, id, attrs); + if (IS_ERR(uobj)) + return PTR_ERR(uobj); +- +- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); ++ uobj_put_destroy(uobj); + return 0; + } + +diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c +index 2d6a378e8560..b1df93b69df4 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c +@@ -1987,7 +1987,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, + struct rtable *rt; + struct neighbour *neigh; + int rc = arpindex; +- struct net_device *netdev = iwdev->netdev; + __be32 dst_ipaddr = htonl(dst_ip); + __be32 src_ipaddr = htonl(src_ip); + +@@ -1997,9 +1996,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, + return rc; + } + +- if (netif_is_bond_slave(netdev)) +- netdev = netdev_master_upper_dev_get(netdev); +- + neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr); + + rcu_read_lock(); +@@ -2065,7 +2061,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, + { + struct neighbour *neigh; + int rc = arpindex; +- struct net_device *netdev = iwdev->netdev; + struct dst_entry *dst; + struct sockaddr_in6 dst_addr; + struct sockaddr_in6 src_addr; +@@ -2086,9 +2081,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, + return rc; + } + +- if (netif_is_bond_slave(netdev)) +- netdev = netdev_master_upper_dev_get(netdev); +- + neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32); + + rcu_read_lock(); +diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c +index 568b21eb6ea1..021df0654ba7 100644 +--- a/drivers/infiniband/hw/qib/qib_sysfs.c ++++ b/drivers/infiniband/hw/qib/qib_sysfs.c +@@ -760,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + qib_dev_err(dd, + "Skipping linkcontrol sysfs info, (err %d) port %u\n", + ret, port_num); +- goto bail; ++ goto bail_link; + } + kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); + +@@ -770,7 +770,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + qib_dev_err(dd, + "Skipping sl2vl sysfs info, (err %d) port %u\n", + ret, port_num); +- goto bail_link; ++ goto bail_sl; + } + kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); + +@@ -780,7 +780,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + qib_dev_err(dd, + "Skipping diag_counters sysfs info, (err %d) port %u\n", + ret, port_num); +- goto bail_sl; ++ goto bail_diagc; + } + kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); + +@@ -793,7 +793,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + qib_dev_err(dd, + "Skipping Congestion Control sysfs info, (err %d) port %u\n", + ret, port_num); +- goto bail_diagc; ++ goto bail_cc; + } + + kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); +@@ -854,6 +854,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) + &cc_table_bin_attr); + kobject_put(&ppd->pport_cc_kobj); + } ++ kobject_put(&ppd->diagc_kobj); + kobject_put(&ppd->sl2vl_kobj); + kobject_put(&ppd->pport_kobj); + } +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +index e580ae9cc55a..780fd2dfc07e 100644 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +@@ -829,7 +829,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, + !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); + ret = -ENOMEM; +- goto err_free_device; ++ goto err_disable_pdev; + } + + ret = pci_request_regions(pdev, DRV_NAME); +diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h +index 2aa3457a30ce..0e5f27caf2b2 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib.h ++++ b/drivers/infiniband/ulp/ipoib/ipoib.h +@@ -377,8 +377,12 @@ struct ipoib_dev_priv { + struct ipoib_rx_buf *rx_ring; + + struct ipoib_tx_buf *tx_ring; ++ /* cyclic ring variables for managing tx_ring, for UD only */ + unsigned int tx_head; + unsigned int tx_tail; ++ /* cyclic ring variables for counting overall outstanding send WRs */ ++ unsigned int global_tx_head; ++ unsigned int global_tx_tail; + struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; + struct ib_ud_wr tx_wr; + struct ib_wc send_wc[MAX_SEND_CQE]; +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +index c59e00a0881f..9bf0fa30df28 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +@@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ + return; + } + +- if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) { ++ if ((priv->global_tx_head - priv->global_tx_tail) == ++ ipoib_sendq_size - 1) { + ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", + tx->qp->qp_num); + netif_stop_queue(dev); +@@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ + } else { + netif_trans_update(dev); + ++tx->tx_head; +- ++priv->tx_head; ++ ++priv->global_tx_head; + } + } + +@@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) + netif_tx_lock(dev); + + ++tx->tx_tail; +- ++priv->tx_tail; ++ ++priv->global_tx_tail; + + if (unlikely(netif_queue_stopped(dev) && +- (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 && ++ ((priv->global_tx_head - priv->global_tx_tail) <= ++ ipoib_sendq_size >> 1) && + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) + netif_wake_queue(dev); + +@@ -1232,8 +1234,9 @@ timeout: + dev_kfree_skb_any(tx_req->skb); + netif_tx_lock_bh(p->dev); + ++p->tx_tail; +- ++priv->tx_tail; +- if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) && ++ ++priv->global_tx_tail; ++ if (unlikely((priv->global_tx_head - priv->global_tx_tail) <= ++ ipoib_sendq_size >> 1) && + netif_queue_stopped(p->dev) && + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + netif_wake_queue(p->dev); +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c +index c332b4761816..da3c5315bbb5 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c +@@ -407,9 +407,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) + dev_kfree_skb_any(tx_req->skb); + + ++priv->tx_tail; ++ ++priv->global_tx_tail; + + if (unlikely(netif_queue_stopped(dev) && +- ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) && ++ ((priv->global_tx_head - priv->global_tx_tail) <= ++ ipoib_sendq_size >> 1) && + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) + netif_wake_queue(dev); + +@@ -634,7 +636,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, + else + priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; + /* increase the tx_head after send success, but use it for queue state */ +- if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) { ++ if ((priv->global_tx_head - priv->global_tx_tail) == ++ ipoib_sendq_size - 1) { + ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); + netif_stop_queue(dev); + } +@@ -662,6 +665,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, + + rc = priv->tx_head; + ++priv->tx_head; ++ ++priv->global_tx_head; + } + return rc; + } +@@ -807,6 +811,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) + ipoib_dma_unmap_tx(priv, tx_req); + dev_kfree_skb_any(tx_req->skb); + ++priv->tx_tail; ++ ++priv->global_tx_tail; + } + + for (i = 0; i < ipoib_recvq_size; ++i) { +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index ac0583ff280d..4fd095fd63b6 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -1188,9 +1188,11 @@ static void ipoib_timeout(struct net_device *dev) + + ipoib_warn(priv, "transmit timeout: latency %d msecs\n", + jiffies_to_msecs(jiffies - dev_trans_start(dev))); +- ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", +- netif_queue_stopped(dev), +- priv->tx_head, priv->tx_tail); ++ ipoib_warn(priv, ++ "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n", ++ netif_queue_stopped(dev), priv->tx_head, priv->tx_tail, ++ priv->global_tx_head, priv->global_tx_tail); ++ + /* XXX reset QP, etc. */ + } + +@@ -1705,7 +1707,7 @@ static int ipoib_dev_init_default(struct net_device *dev) + goto out_rx_ring_cleanup; + } + +- /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ ++ /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */ + + if (ipoib_transport_dev_init(dev, priv->ca)) { + pr_warn("%s: ipoib_transport_dev_init failed\n", +diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c +index cb6e3a5f509c..0d57e51b8ba1 100644 +--- a/drivers/input/evdev.c ++++ b/drivers/input/evdev.c +@@ -326,20 +326,6 @@ static int evdev_fasync(int fd, struct file *file, int on) + return fasync_helper(fd, file, on, &client->fasync); + } + +-static int evdev_flush(struct file *file, fl_owner_t id) +-{ +- struct evdev_client *client = file->private_data; +- struct evdev *evdev = client->evdev; +- +- mutex_lock(&evdev->mutex); +- +- if (evdev->exist && !client->revoked) +- input_flush_device(&evdev->handle, file); +- +- mutex_unlock(&evdev->mutex); +- return 0; +-} +- + static void evdev_free(struct device *dev) + { + struct evdev *evdev = container_of(dev, struct evdev, dev); +@@ -453,6 +439,10 @@ static int evdev_release(struct inode *inode, struct file *file) + unsigned int i; + + mutex_lock(&evdev->mutex); ++ ++ if (evdev->exist && !client->revoked) ++ input_flush_device(&evdev->handle, file); ++ + evdev_ungrab(evdev, client); + mutex_unlock(&evdev->mutex); + +@@ -1310,7 +1300,6 @@ static const struct file_operations evdev_fops = { + .compat_ioctl = evdev_ioctl_compat, + #endif + .fasync = evdev_fasync, +- .flush = evdev_flush, + .llseek = no_llseek, + }; + +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 6b40a1c68f9f..c77cdb3b62b5 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -458,6 +458,16 @@ static const u8 xboxone_fw2015_init[] = { + 0x05, 0x20, 0x00, 0x01, 0x00 + }; + ++/* ++ * This packet is required for Xbox One S (0x045e:0x02ea) ++ * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to ++ * initialize the controller that was previously used in ++ * Bluetooth mode. ++ */ ++static const u8 xboxone_s_init[] = { ++ 0x05, 0x20, 0x00, 0x0f, 0x06 ++}; ++ + /* + * This packet is required for the Titanfall 2 Xbox One pads + * (0x0e6f:0x0165) to finish initialization and for Hori pads +@@ -516,6 +526,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { + XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), + XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), + XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), ++ XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), ++ XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), + XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), +diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c +index b0ead7199c40..a69dcc3bd30c 100644 +--- a/drivers/input/keyboard/dlink-dir685-touchkeys.c ++++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c +@@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match); + + static struct i2c_driver dir685_tk_i2c_driver = { + .driver = { +- .name = "dlin-dir685-touchkeys", ++ .name = "dlink-dir685-touchkeys", + .of_match_table = of_match_ptr(dir685_tk_of_match), + }, + .probe = dir685_tk_probe, +diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c +index 190b9974526b..258d5fe3d395 100644 +--- a/drivers/input/rmi4/rmi_driver.c ++++ b/drivers/input/rmi4/rmi_driver.c +@@ -205,7 +205,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id) + + if (count) { + kfree(attn_data.data); +- attn_data.data = NULL; ++ drvdata->attn_data.data = NULL; + } + + if (!kfifo_is_empty(&drvdata->attn_fifo)) +@@ -1210,7 +1210,8 @@ static int rmi_driver_probe(struct device *dev) + if (data->input) { + rmi_driver_set_input_name(rmi_dev, data->input); + if (!rmi_dev->xport->input) { +- if (input_register_device(data->input)) { ++ retval = input_register_device(data->input); ++ if (retval) { + dev_err(dev, "%s: Failed to register input device.\n", + __func__); + goto err_destroy_functions; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 08e919dbeb5d..7e048b557462 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -662,6 +662,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), + }, + }, ++ { ++ /* Lenovo ThinkPad Twist S230u */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c +index 16d70201de4a..397cb1d3f481 100644 +--- a/drivers/input/touchscreen/usbtouchscreen.c ++++ b/drivers/input/touchscreen/usbtouchscreen.c +@@ -182,6 +182,7 @@ static const struct usb_device_id usbtouch_devices[] = { + #endif + + #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH ++ {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, + {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, + {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, + {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES}, +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c +index cd3c0ea56657..9d7232e26ecf 100644 +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -492,7 +492,7 @@ struct iommu_group *iommu_group_alloc(void) + NULL, "%d", group->id); + if (ret) { + ida_simple_remove(&iommu_group_ida, group->id); +- kfree(group); ++ kobject_put(&group->kobj); + return ERR_PTR(ret); + } + +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c +index 9d01b5dca519..7f480c6b1981 100644 +--- a/drivers/mmc/core/block.c ++++ b/drivers/mmc/core/block.c +@@ -2475,8 +2475,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) + struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, + struct mmc_rpmb_data, chrdev); + +- put_device(&rpmb->dev); + mmc_blk_put(rpmb->md); ++ put_device(&rpmb->dev); + + return 0; + } +diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c +index 007481557191..9b8346638f69 100644 +--- a/drivers/net/bonding/bond_sysfs_slave.c ++++ b/drivers/net/bonding/bond_sysfs_slave.c +@@ -149,8 +149,10 @@ int bond_sysfs_slave_add(struct slave *slave) + + err = kobject_init_and_add(&slave->kobj, &slave_ktype, + &(slave->dev->dev.kobj), "bonding_slave"); +- if (err) ++ if (err) { ++ kobject_put(&slave->kobj); + return err; ++ } + + for (a = slave_attrs; *a; ++a) { + err = sysfs_create_file(&slave->kobj, &((*a)->attr)); +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c +index cffaf4fdd772..6027bb65f7f6 100644 +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -639,11 +639,8 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv, + mt7530_write(priv, MT7530_PVC_P(port), + PORT_SPEC_TAG); + +- /* Disable auto learning on the cpu port */ +- mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); +- +- /* Unknown unicast frame fordwarding to the cpu port */ +- mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port))); ++ /* Unknown multicast frame forwarding to the cpu port */ ++ mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port))); + + /* Set CPU port number */ + if (priv->id == ID_MT7621) +@@ -1246,8 +1243,6 @@ mt7530_setup(struct dsa_switch *ds) + /* Enable and reset MIB counters */ + mt7530_mib_reset(ds); + +- mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK); +- + for (i = 0; i < MT7530_NUM_PORTS; i++) { + /* Disable forwarding by default on all ports */ + mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, +diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h +index 756140b7dfd5..0e7e36d8f994 100644 +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -31,6 +31,7 @@ enum { + #define MT7530_MFC 0x10 + #define BC_FFP(x) (((x) & 0xff) << 24) + #define UNM_FFP(x) (((x) & 0xff) << 16) ++#define UNM_FFP_MASK UNM_FFP(~0) + #define UNU_FFP(x) (((x) & 0xff) << 8) + #define UNU_FFP_MASK UNU_FFP(~0) + #define CPU_EN BIT(7) +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index edf8452a2574..63ee0c49be7c 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -9285,7 +9285,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, + bnxt_free_skbs(bp); + + /* Save ring stats before shutdown */ +- if (bp->bnapi) ++ if (bp->bnapi && irq_re_init) + bnxt_get_ring_stats(bp, &bp->net_stats_prev); + if (irq_re_init) { + bnxt_free_irq(bp); +diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig +index 6a7e8993119f..941c7e667afc 100644 +--- a/drivers/net/ethernet/freescale/Kconfig ++++ b/drivers/net/ethernet/freescale/Kconfig +@@ -77,6 +77,7 @@ config UCC_GETH + depends on QUICC_ENGINE + select FSL_PQ_MDIO + select PHYLIB ++ select FIXED_PHY + ---help--- + This driver supports the Gigabit Ethernet mode of the QUICC Engine, + which is available on some Freescale SOCs. +@@ -90,6 +91,7 @@ config GIANFAR + depends on HAS_DMA + select FSL_PQ_MDIO + select PHYLIB ++ select FIXED_PHY + select CRC32 + ---help--- + This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, +diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig +index 3b325733a4f8..0a54c7e0e4ae 100644 +--- a/drivers/net/ethernet/freescale/dpaa/Kconfig ++++ b/drivers/net/ethernet/freescale/dpaa/Kconfig +@@ -3,6 +3,7 @@ menuconfig FSL_DPAA_ETH + tristate "DPAA Ethernet" + depends on FSL_DPAA && FSL_FMAN + select PHYLIB ++ select FIXED_PHY + select FSL_FMAN_MAC + ---help--- + Data Path Acceleration Architecture Ethernet driver, +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +index 00c4beb760c3..cd9d08695cc1 100644 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +@@ -2802,7 +2802,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) + } + + /* Do this here, so we can be verbose early */ +- SET_NETDEV_DEV(net_dev, dev); ++ SET_NETDEV_DEV(net_dev, dev->parent); + dev_set_drvdata(dev, net_dev); + + priv = netdev_priv(net_dev); +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +index 4344a59c823f..6122057d60c0 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +@@ -1070,7 +1070,7 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) + (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); + + val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); +- val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); ++ val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); + } + +diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c +index 6e501af0e532..f6ff9620a137 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/fw.c ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c +@@ -2734,7 +2734,7 @@ void mlx4_opreq_action(struct work_struct *work) + if (err) { + mlx4_err(dev, "Failed to retrieve required operation: %d\n", + err); +- return; ++ goto out; + } + MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); + MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 08048a2d7259..b6a3370068f1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -861,6 +861,7 @@ static void cmd_work_handler(struct work_struct *work) + int alloc_ret; + int cmd_mode; + ++ complete(&ent->handling); + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + down(sem); + if (!ent->page_queue) { +@@ -978,6 +979,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) + struct mlx5_cmd *cmd = &dev->cmd; + int err; + ++ if (!wait_for_completion_timeout(&ent->handling, timeout) && ++ cancel_work_sync(&ent->work)) { ++ ent->ret = -ECANCELED; ++ goto out_err; ++ } + if (cmd->mode == CMD_MODE_POLLING || ent->polling) { + wait_for_completion(&ent->done); + } else if (!wait_for_completion_timeout(&ent->done, timeout)) { +@@ -985,12 +991,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + } + ++out_err: + err = ent->ret; + + if (err == -ETIMEDOUT) { + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); ++ } else if (err == -ECANCELED) { ++ mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", ++ mlx5_command_str(msg_to_opcode(ent->in)), ++ msg_to_opcode(ent->in)); + } + mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", + err, deliv_status_to_str(ent->status), ent->status); +@@ -1026,6 +1037,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + ent->token = token; + ent->polling = force_polling; + ++ init_completion(&ent->handling); + if (!callback) + init_completion(&ent->done); + +@@ -1045,6 +1057,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + err = wait_func(dev, ent); + if (err == -ETIMEDOUT) + goto out; ++ if (err == -ECANCELED) ++ goto out_free; + + ds = ent->ts2 - ent->ts1; + op = MLX5_GET(mbox_in, in->first.data, opcode); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 38aa55638bbe..98304c42e495 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1103,7 +1103,7 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); + int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); + + int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); +-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); ++void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); + + int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); + void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +index 46725cd743a3..7d1985fa0d4f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +@@ -69,8 +69,8 @@ static void mlx5e_ktls_del(struct net_device *netdev, + struct mlx5e_ktls_offload_context_tx *tx_priv = + mlx5e_get_ktls_tx_priv_ctx(tls_ctx); + +- mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); + mlx5e_destroy_tis(priv->mdev, tx_priv->tisn); ++ mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); + kvfree(tx_priv); + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 0e340893ca00..c133beb6a7a5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2758,7 +2758,8 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) + mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); + } + +- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) ++ /* Verify inner tirs resources allocated */ ++ if (!priv->inner_indir_tir[0].tirn) + return; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { +@@ -3405,14 +3406,15 @@ out: + return err; + } + +-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) ++void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) + { + int i; + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); + +- if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) ++ /* Verify inner tirs resources allocated */ ++ if (!priv->inner_indir_tir[0].tirn) + return; + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) +@@ -5119,7 +5121,7 @@ err_destroy_xsk_rqts: + err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + err_destroy_indirect_tirs: +- mlx5e_destroy_indirect_tirs(priv, true); ++ mlx5e_destroy_indirect_tirs(priv); + err_destroy_direct_rqts: + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + err_destroy_indirect_rqts: +@@ -5138,7 +5140,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) + mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); + mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); +- mlx5e_destroy_indirect_tirs(priv, true); ++ mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index fddf644ba349..9b232ef36d53 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1597,7 +1597,7 @@ err_destroy_ttc_table: + err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + err_destroy_indirect_tirs: +- mlx5e_destroy_indirect_tirs(priv, false); ++ mlx5e_destroy_indirect_tirs(priv); + err_destroy_direct_rqts: + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + err_destroy_indirect_rqts: +@@ -1614,7 +1614,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) + mlx5_del_flow_rules(rpriv->vport_rx_rule); + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); +- mlx5e_destroy_indirect_tirs(priv, false); ++ mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index dee12f17f9c2..d9e0fc146741 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -537,10 +537,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) + void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) + { + struct mlx5e_tx_wqe_info *wi; ++ u32 dma_fifo_cc, nbytes = 0; ++ u16 ci, sqcc, npkts = 0; + struct sk_buff *skb; +- u32 dma_fifo_cc; +- u16 sqcc; +- u16 ci; + int i; + + sqcc = sq->cc; +@@ -565,11 +564,15 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) + } + + dev_kfree_skb_any(skb); ++ npkts++; ++ nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + } + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; ++ ++ netdev_tx_completed_queue(sq->txq, npkts, nbytes); + } + + #ifdef CONFIG_MLX5_CORE_IPOIB +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c +index 8bcf3426b9c6..3ce17c3d7a00 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c +@@ -346,8 +346,10 @@ int mlx5_events_init(struct mlx5_core_dev *dev) + events->dev = dev; + dev->priv.events = events; + events->wq = create_singlethread_workqueue("mlx5_events"); +- if (!events->wq) ++ if (!events->wq) { ++ kfree(events); + return -ENOMEM; ++ } + INIT_WORK(&events->pcie_core_work, mlx5_pcie_event); + + return 0; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 86e6bbb57482..8d9aab45fd8e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -417,6 +417,12 @@ static void del_sw_ns(struct fs_node *node) + + static void del_sw_prio(struct fs_node *node) + { ++ struct mlx5_flow_root_namespace *root_ns; ++ struct mlx5_flow_namespace *ns; ++ ++ fs_get_obj(ns, node); ++ root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns); ++ mutex_destroy(&root_ns->chain_lock); + kfree(node); + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +index 3ed8ab2d703d..0fed2419623d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +@@ -396,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) + err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + err_destroy_indirect_tirs: +- mlx5e_destroy_indirect_tirs(priv, true); ++ mlx5e_destroy_indirect_tirs(priv); + err_destroy_direct_rqts: + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + err_destroy_indirect_rqts: +@@ -412,7 +412,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) + { + mlx5i_destroy_flow_steering(priv); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); +- mlx5e_destroy_indirect_tirs(priv, true); ++ mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index c96a0e501007..7dcdda9ca351 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1183,7 +1183,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) + + err = mlx5_function_setup(dev, boot); + if (err) +- goto out; ++ goto err_function; + + if (boot) { + err = mlx5_init_once(dev); +@@ -1229,6 +1229,7 @@ err_load: + mlx5_cleanup_once(dev); + function_teardown: + mlx5_function_teardown(dev, boot); ++err_function: + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + mutex_unlock(&dev->intf_state_mutex); + +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +index a806c6190bb1..6a432bb93dbb 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +@@ -3932,6 +3932,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) + mlxsw_sp_cpu_port_remove(mlxsw_sp); + kfree(mlxsw_sp->port_to_module); + kfree(mlxsw_sp->ports); ++ mlxsw_sp->ports = NULL; + } + + static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) +@@ -3986,6 +3987,7 @@ err_cpu_port_create: + kfree(mlxsw_sp->port_to_module); + err_port_to_module_alloc: + kfree(mlxsw_sp->ports); ++ mlxsw_sp->ports = NULL; + return err; + } + +@@ -4040,6 +4042,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, + } + } + ++static struct mlxsw_sp_port * ++mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) ++{ ++ if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) ++ return mlxsw_sp->ports[local_port]; ++ return NULL; ++} ++ + static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, + unsigned int count, + struct netlink_ext_ack *extack) +@@ -4058,7 +4068,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, + local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); + local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); + +- mlxsw_sp_port = mlxsw_sp->ports[local_port]; ++ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); + if (!mlxsw_sp_port) { + dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", + local_port); +@@ -4136,7 +4146,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, + local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); + local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); + +- mlxsw_sp_port = mlxsw_sp->ports[local_port]; ++ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); + if (!mlxsw_sp_port) { + dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", + local_port); +diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +index 63e7a058b7c6..059cc1600890 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +@@ -1258,6 +1258,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) + if (mlxsw_sx_port_created(mlxsw_sx, i)) + mlxsw_sx_port_remove(mlxsw_sx, i); + kfree(mlxsw_sx->ports); ++ mlxsw_sx->ports = NULL; + } + + static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) +@@ -1292,6 +1293,7 @@ err_port_module_info_get: + if (mlxsw_sx_port_created(mlxsw_sx, i)) + mlxsw_sx_port_remove(mlxsw_sx, i); + kfree(mlxsw_sx->ports); ++ mlxsw_sx->ports = NULL; + return err; + } + +@@ -1375,6 +1377,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port, + u8 module, width; + int err; + ++ if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) { ++ dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n", ++ local_port); ++ return -EINVAL; ++ } ++ + if (new_type == DEVLINK_PORT_TYPE_AUTO) + return -EOPNOTSUPP; + +diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c +index 52c41d11f565..c3a6edc0ddf6 100644 +--- a/drivers/net/ethernet/microchip/encx24j600.c ++++ b/drivers/net/ethernet/microchip/encx24j600.c +@@ -1070,7 +1070,7 @@ static int encx24j600_spi_probe(struct spi_device *spi) + if (unlikely(ret)) { + netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n", + ret); +- goto out_free; ++ goto out_stop; + } + + eidled = encx24j600_read_reg(priv, EIDLED); +@@ -1088,6 +1088,8 @@ static int encx24j600_spi_probe(struct spi_device *spi) + + out_unregister: + unregister_netdev(priv->ndev); ++out_stop: ++ kthread_stop(priv->kworker_task); + out_free: + free_netdev(ndev); + +@@ -1100,6 +1102,7 @@ static int encx24j600_spi_remove(struct spi_device *spi) + struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev); + + unregister_netdev(priv->ndev); ++ kthread_stop(priv->kworker_task); + + free_netdev(priv->ndev); + +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +index 2a533280b124..29b9c728a65e 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +@@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) + ahw->diag_cnt = 0; + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + if (ret) +- goto fail_diag_irq; ++ goto fail_mbx_args; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intrpt_id = ahw->intr_tbl[0].id; +@@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) + + done: + qlcnic_free_mbx_args(&cmd); ++ ++fail_mbx_args: + qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); + + fail_diag_irq: +diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c +index c91876f8c536..6e78a33aa5e4 100644 +--- a/drivers/net/ethernet/sun/cassini.c ++++ b/drivers/net/ethernet/sun/cassini.c +@@ -4971,7 +4971,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + cas_cacheline_size)) { + dev_err(&pdev->dev, "Could not set PCI cache " + "line size\n"); +- goto err_write_cacheline; ++ goto err_out_free_res; + } + } + #endif +@@ -5144,7 +5144,6 @@ err_out_iounmap: + err_out_free_res: + pci_release_regions(pdev); + +-err_write_cacheline: + /* Try to restore it in case the error occurred after we + * set it. + */ +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c +index d7a953c647b4..39df8c8feb6c 100644 +--- a/drivers/net/ethernet/ti/cpsw.c ++++ b/drivers/net/ethernet/ti/cpsw.c +@@ -2999,11 +2999,15 @@ static int cpsw_suspend(struct device *dev) + struct cpsw_common *cpsw = dev_get_drvdata(dev); + int i; + ++ rtnl_lock(); ++ + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev) + if (netif_running(cpsw->slaves[i].ndev)) + cpsw_ndo_stop(cpsw->slaves[i].ndev); + ++ rtnl_unlock(); ++ + /* Select sleep pin state */ + pinctrl_pm_select_sleep_state(dev); + +diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c +index fbea6f232819..e2ad3c2e8df5 100644 +--- a/drivers/net/hamradio/bpqether.c ++++ b/drivers/net/hamradio/bpqether.c +@@ -127,7 +127,8 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev) + { + struct bpqdev *bpq; + +- list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) { ++ list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list, ++ lockdep_rtnl_is_held()) { + if (bpq->ethdev == dev) + return bpq->axdev; + } +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c +index fe630438f67b..9bb37ac99a10 100644 +--- a/drivers/net/usb/cdc_ether.c ++++ b/drivers/net/usb/cdc_ether.c +@@ -808,14 +808,21 @@ static const struct usb_device_id products[] = { + .driver_info = 0, + }, + +-/* Microsoft Surface 3 dock (based on Realtek RTL8153) */ ++/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */ + { + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, + }, + +- /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ ++/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */ ++{ ++ USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM, ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), ++ .driver_info = 0, ++}, ++ ++/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ + { + USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index 44ea5dcc43fd..cd1a07175e11 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -5837,6 +5837,7 @@ static const struct usb_device_id rtl8152_table[] = { + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, ++ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, + {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, +diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c +index 73a852b2f417..34eec26b0c1f 100644 +--- a/drivers/soc/mediatek/mtk-cmdq-helper.c ++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c +@@ -258,7 +258,9 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, + spin_unlock_irqrestore(&client->lock, flags); + } + +- mbox_send_message(client->chan, pkt); ++ err = mbox_send_message(client->chan, pkt); ++ if (err < 0) ++ return err; + /* We can send next packet immediately, so just call txdone. */ + mbox_client_txdone(client->chan, 0); + +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index 7051611229c9..b67372737dc9 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -114,6 +114,7 @@ static const struct property_entry dwc3_pci_intel_properties[] = { + + static const struct property_entry dwc3_pci_mrfld_properties[] = { + PROPERTY_ENTRY_STRING("dr_mode", "otg"), ++ PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), + {} + }; +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c +index b47938dff1a2..238f555fe494 100644 +--- a/drivers/usb/gadget/legacy/inode.c ++++ b/drivers/usb/gadget/legacy/inode.c +@@ -1361,7 +1361,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + + req->buf = dev->rbuf; + req->context = NULL; +- value = -EOPNOTSUPP; + switch (ctrl->bRequest) { + + case USB_REQ_GET_DESCRIPTOR: +@@ -1784,7 +1783,7 @@ static ssize_t + dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + { + struct dev_data *dev = fd->private_data; +- ssize_t value = len, length = len; ++ ssize_t value, length = len; + unsigned total; + u32 tag; + char *kbuf; +diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c +index bfebf1f2e991..9a7e655d5280 100644 +--- a/drivers/usb/phy/phy-twl6030-usb.c ++++ b/drivers/usb/phy/phy-twl6030-usb.c +@@ -377,7 +377,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) + if (status < 0) { + dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", + twl->irq1, status); +- return status; ++ goto err_put_regulator; + } + + status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq, +@@ -386,8 +386,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) + if (status < 0) { + dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", + twl->irq2, status); +- free_irq(twl->irq1, twl); +- return status; ++ goto err_free_irq1; + } + + twl->asleep = 0; +@@ -396,6 +395,13 @@ static int twl6030_usb_probe(struct platform_device *pdev) + dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); + + return 0; ++ ++err_free_irq1: ++ free_irq(twl->irq1, twl); ++err_put_regulator: ++ regulator_put(twl->usb3v3); ++ ++ return status; + } + + static int twl6030_usb_remove(struct platform_device *pdev) +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index c5642bcb6b46..7ce3cfd965d2 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -1731,7 +1731,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, + (!regset->active || regset->active(t->task, regset) > 0)) { + int ret; + size_t size = regset_size(t->task, regset); +- void *data = kmalloc(size, GFP_KERNEL); ++ void *data = kzalloc(size, GFP_KERNEL); + if (unlikely(!data)) + return 0; + ret = regset->get(t->task, regset, +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c +index 44a3ce1e4ce4..ad057ed2b30b 100644 +--- a/fs/cachefiles/rdwr.c ++++ b/fs/cachefiles/rdwr.c +@@ -60,9 +60,9 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, + object = container_of(op->op.object, struct cachefiles_object, fscache); + spin_lock(&object->work_lock); + list_add_tail(&monitor->op_link, &op->to_do); ++ fscache_enqueue_retrieval(op); + spin_unlock(&object->work_lock); + +- fscache_enqueue_retrieval(op); + fscache_put_retrieval(op); + return 0; + } +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c +index 2d602c2b0ff6..b2695919435e 100644 +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -3938,7 +3938,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, + __ceph_queue_cap_release(session, cap); + spin_unlock(&session->s_cap_lock); + } +- goto done; ++ goto flush_cap_releases; + } + + /* these will work even if we don't have a cap yet */ +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index b095094c0842..4959dbe740f7 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -3997,7 +3997,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) + * than it negotiated since it will refuse the read + * then. + */ +- if ((tcon->ses) && !(tcon->ses->capabilities & ++ if (!(tcon->ses->capabilities & + tcon->ses->server->vals->cap_large_files)) { + current_read_size = min_t(uint, + current_read_size, CIFSMaxBufSize); +diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c +index 47bc27d4169e..110e5c4db819 100644 +--- a/fs/gfs2/log.c ++++ b/fs/gfs2/log.c +@@ -598,13 +598,13 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) + struct buffer_head *bh = bd->bd_bh; + struct gfs2_glock *gl = bd->bd_gl; + ++ sdp->sd_log_num_revoke++; ++ if (atomic_inc_return(&gl->gl_revokes) == 1) ++ gfs2_glock_hold(gl); + bh->b_private = NULL; + bd->bd_blkno = bh->b_blocknr; + gfs2_remove_from_ail(bd); /* drops ref on bh */ + bd->bd_bh = NULL; +- sdp->sd_log_num_revoke++; +- if (atomic_inc_return(&gl->gl_revokes) == 1) +- gfs2_glock_hold(gl); + set_bit(GLF_LFLUSH, &gl->gl_flags); + list_add(&bd->bd_list, &sdp->sd_log_revokes); + } +diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c +index 7c016a082aa6..cbee745169b8 100644 +--- a/fs/gfs2/quota.c ++++ b/fs/gfs2/quota.c +@@ -1040,8 +1040,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) + u32 x; + int error = 0; + +- if (capable(CAP_SYS_RESOURCE) || +- sdp->sd_args.ar_quota != GFS2_QUOTA_ON) ++ if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) + return 0; + + error = gfs2_quota_hold(ip, uid, gid); +diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h +index 765627d9a91e..fe68a91dc16f 100644 +--- a/fs/gfs2/quota.h ++++ b/fs/gfs2/quota.h +@@ -44,7 +44,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, + int ret; + + ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ +- if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) ++ if (capable(CAP_SYS_RESOURCE) || ++ sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) + return 0; + ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); + if (ret) +diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h +index 238873739550..5aa8705df87e 100644 +--- a/include/asm-generic/topology.h ++++ b/include/asm-generic/topology.h +@@ -48,7 +48,7 @@ + #ifdef CONFIG_NEED_MULTIPLE_NODES + #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) + #else +- #define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #define cpumask_of_node(node) ((void)(node), cpu_online_mask) + #endif + #endif + #ifndef pcibus_to_node +diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h +index 7f3486e32e5d..624d2643bfba 100644 +--- a/include/linux/ieee80211.h ++++ b/include/linux/ieee80211.h +@@ -2047,7 +2047,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) + } + + /* HE Operation defines */ +-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003 ++#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007 + #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 + #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 + #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h +index 3e80f03a387f..897829651204 100644 +--- a/include/linux/mlx5/driver.h ++++ b/include/linux/mlx5/driver.h +@@ -756,6 +756,7 @@ struct mlx5_cmd_work_ent { + struct delayed_work cb_timeout_work; + void *context; + int idx; ++ struct completion handling; + struct completion done; + struct mlx5_cmd *cmd; + struct work_struct work; +diff --git a/include/linux/mm.h b/include/linux/mm.h +index afa77b683a04..53bad834adf5 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -695,6 +695,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) + + extern void kvfree(const void *addr); + ++/* ++ * Mapcount of compound page as a whole, does not include mapped sub-pages. ++ * ++ * Must be called only for compound pages or any their tail sub-pages. ++ */ + static inline int compound_mapcount(struct page *page) + { + VM_BUG_ON_PAGE(!PageCompound(page), page); +@@ -714,10 +719,16 @@ static inline void page_mapcount_reset(struct page *page) + + int __page_mapcount(struct page *page); + ++/* ++ * Mapcount of 0-order page; when compound sub-page, includes ++ * compound_mapcount(). ++ * ++ * Result is undefined for pages which cannot be mapped into userspace. ++ * For example SLAB or special types of pages. See function page_has_type(). ++ * They use this place in struct page differently. ++ */ + static inline int page_mapcount(struct page *page) + { +- VM_BUG_ON_PAGE(PageSlab(page), page); +- + if (unlikely(PageCompound(page))) + return __page_mapcount(page); + return atomic_read(&page->_mapcount) + 1; +diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h +index fcc409de31a4..a28aa289afdc 100644 +--- a/include/linux/netfilter/nf_conntrack_pptp.h ++++ b/include/linux/netfilter/nf_conntrack_pptp.h +@@ -10,7 +10,7 @@ + #include + #include + +-extern const char *const pptp_msg_name[]; ++const char *pptp_msg_name(u_int16_t msg); + + /* state of the control session */ + enum pptp_ctrlsess_state { +diff --git a/include/net/act_api.h b/include/net/act_api.h +index b18c699681ca..59d05feecfb8 100644 +--- a/include/net/act_api.h ++++ b/include/net/act_api.h +@@ -69,7 +69,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm) + { + dtm->install = jiffies_to_clock_t(jiffies - stm->install); + dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse); +- dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse); ++ dtm->firstuse = stm->firstuse ? ++ jiffies_to_clock_t(jiffies - stm->firstuse) : 0; + dtm->expires = jiffies_to_clock_t(stm->expires); + } + +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h +index ab1ca9e238d2..ffbae7683450 100644 +--- a/include/net/ip_fib.h ++++ b/include/net/ip_fib.h +@@ -244,7 +244,6 @@ struct fib_dump_filter { + u32 table_id; + /* filter_set is an optimization that an entry is set */ + bool filter_set; +- bool dump_all_families; + bool dump_routes; + bool dump_exceptions; + unsigned char protocol; +@@ -423,6 +422,16 @@ static inline int fib_num_tclassid_users(struct net *net) + #endif + int fib_unmerge(struct net *net); + ++static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc, ++const struct net_device *dev) ++{ ++ if (nhc->nhc_dev == dev || ++ l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) ++ return true; ++ ++ return false; ++} ++ + /* Exported by fib_semantics.c */ + int ip_fib_check_default(__be32 gw, struct net_device *dev); + int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); +diff --git a/include/net/nexthop.h b/include/net/nexthop.h +index 331ebbc94fe7..3bb618e5ecf7 100644 +--- a/include/net/nexthop.h ++++ b/include/net/nexthop.h +@@ -70,6 +70,7 @@ struct nh_grp_entry { + }; + + struct nh_group { ++ struct nh_group *spare; /* spare group for removals */ + u16 num_nh; + bool mpath; + bool has_v4; +@@ -136,21 +137,20 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh) + { + unsigned int rc = 1; + +- if (nexthop_is_multipath(nh)) { ++ if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); +- rc = nh_grp->num_nh; ++ if (nh_grp->mpath) ++ rc = nh_grp->num_nh; + } + + return rc; + } + + static inline +-struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel) ++struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel) + { +- const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); +- + /* for_nexthops macros in fib_semantics.c grabs a pointer to + * the nexthop before checking nhsel + */ +@@ -185,12 +185,14 @@ static inline bool nexthop_is_blackhole(const struct nexthop *nh) + { + const struct nh_info *nhi; + +- if (nexthop_is_multipath(nh)) { +- if (nexthop_num_path(nh) > 1) +- return false; +- nh = nexthop_mpath_select(nh, 0); +- if (!nh) ++ if (nh->is_group) { ++ struct nh_group *nh_grp; ++ ++ nh_grp = rcu_dereference_rtnl(nh->nh_grp); ++ if (nh_grp->num_nh > 1) + return false; ++ ++ nh = nh_grp->nh_entries[0].nh; + } + + nhi = rcu_dereference_rtnl(nh->nh_info); +@@ -216,16 +218,46 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel) + BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0); + BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0); + +- if (nexthop_is_multipath(nh)) { +- nh = nexthop_mpath_select(nh, nhsel); +- if (!nh) +- return NULL; ++ if (nh->is_group) { ++ struct nh_group *nh_grp; ++ ++ nh_grp = rcu_dereference_rtnl(nh->nh_grp); ++ if (nh_grp->mpath) { ++ nh = nexthop_mpath_select(nh_grp, nhsel); ++ if (!nh) ++ return NULL; ++ } + } + + nhi = rcu_dereference_rtnl(nh->nh_info); + return &nhi->fib_nhc; + } + ++static inline bool nexthop_uses_dev(const struct nexthop *nh, ++ const struct net_device *dev) ++{ ++ struct nh_info *nhi; ++ ++ if (nh->is_group) { ++ struct nh_group *nhg = rcu_dereference(nh->nh_grp); ++ int i; ++ ++ for (i = 0; i < nhg->num_nh; i++) { ++ struct nexthop *nhe = nhg->nh_entries[i].nh; ++ ++ nhi = rcu_dereference(nhe->nh_info); ++ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) ++ return true; ++ } ++ } else { ++ nhi = rcu_dereference(nh->nh_info); ++ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) ++ return true; ++ } ++ ++ return false; ++} ++ + static inline unsigned int fib_info_num_path(const struct fib_info *fi) + { + if (unlikely(fi->nh)) +@@ -263,8 +295,11 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh) + { + struct nh_info *nhi; + +- if (nexthop_is_multipath(nh)) { +- nh = nexthop_mpath_select(nh, 0); ++ if (nh->is_group) { ++ struct nh_group *nh_grp; ++ ++ nh_grp = rcu_dereference_rtnl(nh->nh_grp); ++ nh = nexthop_mpath_select(nh_grp, 0); + if (!nh) + return NULL; + } +diff --git a/include/net/tls.h b/include/net/tls.h +index 093abb5a3dff..db26e3ec918f 100644 +--- a/include/net/tls.h ++++ b/include/net/tls.h +@@ -157,6 +157,8 @@ struct tls_sw_context_tx { + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; ++ /* protect crypto_wait with encrypt_pending */ ++ spinlock_t encrypt_compl_lock; + int async_notify; + int async_capable; + +@@ -177,6 +179,8 @@ struct tls_sw_context_rx { + int async_capable; + bool decrypted; + atomic_t decrypt_pending; ++ /* protect crypto_wait with decrypt_pending*/ ++ spinlock_t decrypt_compl_lock; + bool async_notify; + }; + +diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h +index 05eabfd5d0d3..9f382e7d4579 100644 +--- a/include/rdma/uverbs_std_types.h ++++ b/include/rdma/uverbs_std_types.h +@@ -88,7 +88,7 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, + + static inline void uobj_put_destroy(struct ib_uobject *uobj) + { +- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); ++ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); + } + + static inline void uobj_put_read(struct ib_uobject *uobj) +diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h +index 5f3b9fec7b5f..ff7cfdc6cb44 100644 +--- a/include/uapi/linux/xfrm.h ++++ b/include/uapi/linux/xfrm.h +@@ -304,7 +304,7 @@ enum xfrm_attr_type_t { + XFRMA_PROTO, /* __u8 */ + XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ + XFRMA_PAD, +- XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ ++ XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */ + XFRMA_SET_MARK, /* __u32 */ + XFRMA_SET_MARK_MASK, /* __u32 */ + XFRMA_IF_ID, /* __u32 */ +diff --git a/mm/khugepaged.c b/mm/khugepaged.c +index a8a57bebb5fa..f765475be359 100644 +--- a/mm/khugepaged.c ++++ b/mm/khugepaged.c +@@ -1655,6 +1655,7 @@ static void collapse_file(struct mm_struct *mm, + if (page_has_private(page) && + !try_to_release_page(page, GFP_KERNEL)) { + result = SCAN_PAGE_HAS_PRIVATE; ++ putback_lru_page(page); + goto out_unlock; + } + +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c +index bb222b882b67..e5a3dc28116d 100644 +--- a/net/ax25/af_ax25.c ++++ b/net/ax25/af_ax25.c +@@ -635,8 +635,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, + break; + + case SO_BINDTODEVICE: +- if (optlen > IFNAMSIZ) +- optlen = IFNAMSIZ; ++ if (optlen > IFNAMSIZ - 1) ++ optlen = IFNAMSIZ - 1; ++ ++ memset(devname, 0, sizeof(devname)); + + if (copy_from_user(devname, optval, optlen)) { + res = -EFAULT; +diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c +index b325b569e761..f48cf4cfb80f 100644 +--- a/net/bridge/netfilter/nft_reject_bridge.c ++++ b/net/bridge/netfilter/nft_reject_bridge.c +@@ -31,6 +31,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, + ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); + eth->h_proto = eth_hdr(oldskb)->h_proto; + skb_pull(nskb, ETH_HLEN); ++ ++ if (skb_vlan_tag_present(oldskb)) { ++ u16 vid = skb_vlan_tag_get(oldskb); ++ ++ __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); ++ } + } + + static int nft_bridge_iphdr_validate(struct sk_buff *skb) +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c +index 2352afa62d1f..e513b8876d13 100644 +--- a/net/ceph/osd_client.c ++++ b/net/ceph/osd_client.c +@@ -3652,7 +3652,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) + * supported. + */ + req->r_t.target_oloc.pool = m.redirect.oloc.pool; +- req->r_flags |= CEPH_OSD_FLAG_REDIRECTED; ++ req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | ++ CEPH_OSD_FLAG_IGNORE_OVERLAY | ++ CEPH_OSD_FLAG_IGNORE_CACHE; + req->r_tid = 0; + __submit_request(req, false); + goto out_unlock_osdc; +diff --git a/net/core/dev.c b/net/core/dev.c +index 120b994af31c..8552874e5aac 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -4713,11 +4713,12 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, + return 0; + } + +-static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, ++static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, + struct packet_type **ppt_prev) + { + struct packet_type *ptype, *pt_prev; + rx_handler_func_t *rx_handler; ++ struct sk_buff *skb = *pskb; + struct net_device *orig_dev; + bool deliver_exact = false; + int ret = NET_RX_DROP; +@@ -4748,8 +4749,10 @@ another_round: + ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); + preempt_enable(); + +- if (ret2 != XDP_PASS) +- return NET_RX_DROP; ++ if (ret2 != XDP_PASS) { ++ ret = NET_RX_DROP; ++ goto out; ++ } + skb_reset_mac_len(skb); + } + +@@ -4899,6 +4902,13 @@ drop: + } + + out: ++ /* The invariant here is that if *ppt_prev is not NULL ++ * then skb should also be non-NULL. ++ * ++ * Apparently *ppt_prev assignment above holds this invariant due to ++ * skb dereferencing near it. ++ */ ++ *pskb = skb; + return ret; + } + +@@ -4908,7 +4918,7 @@ static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) + struct packet_type *pt_prev = NULL; + int ret; + +- ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); ++ ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); + if (pt_prev) + ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, + skb->dev, pt_prev, orig_dev); +@@ -4986,7 +4996,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo + struct packet_type *pt_prev = NULL; + + skb_list_del_init(skb); +- __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); ++ __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); + if (!pt_prev) + continue; + if (pt_curr != pt_prev || od_curr != orig_dev) { +diff --git a/net/dsa/slave.c b/net/dsa/slave.c +index 23c2210fa7ec..f734ce0bcb56 100644 +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -1409,6 +1409,7 @@ int dsa_slave_create(struct dsa_port *port) + if (ds->ops->port_vlan_add && ds->ops->port_vlan_del) + slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + slave_dev->hw_features |= NETIF_F_HW_TC; ++ slave_dev->features |= NETIF_F_LLTX; + slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; + if (!IS_ERR_OR_NULL(port->mac)) + ether_addr_copy(slave_dev->dev_addr, port->mac); +diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c +index b5705cba8318..d6619edd53e5 100644 +--- a/net/dsa/tag_mtk.c ++++ b/net/dsa/tag_mtk.c +@@ -15,6 +15,7 @@ + #define MTK_HDR_XMIT_TAGGED_TPID_8100 1 + #define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0) + #define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0) ++#define MTK_HDR_XMIT_SA_DIS BIT(6) + + static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, + struct net_device *dev) +@@ -22,6 +23,9 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, + struct dsa_port *dp = dsa_slave_to_port(dev); + u8 *mtk_tag; + bool is_vlan_skb = true; ++ unsigned char *dest = eth_hdr(skb)->h_dest; ++ bool is_multicast_skb = is_multicast_ether_addr(dest) && ++ !is_broadcast_ether_addr(dest); + + /* Build the special tag after the MAC Source Address. If VLAN header + * is present, it's required that VLAN header and special tag is +@@ -47,6 +51,10 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, + MTK_HDR_XMIT_UNTAGGED; + mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK; + ++ /* Disable SA learning for multicast frames */ ++ if (unlikely(is_multicast_skb)) ++ mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS; ++ + /* Tag control information is kept for 802.1Q */ + if (!is_vlan_skb) { + mtk_tag[2] = 0; +@@ -61,6 +69,9 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev, + { + int port; + __be16 *phdr, hdr; ++ unsigned char *dest = eth_hdr(skb)->h_dest; ++ bool is_multicast_skb = is_multicast_ether_addr(dest) && ++ !is_broadcast_ether_addr(dest); + + if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) + return NULL; +@@ -86,6 +97,10 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev, + if (!skb->dev) + return NULL; + ++ /* Only unicast or broadcast frames are offloaded */ ++ if (likely(!is_multicast_skb)) ++ skb->offload_fwd_mark = 1; ++ + return skb; + } + +diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c +index e2e219c7854a..25c8ba6732df 100644 +--- a/net/ipv4/esp4_offload.c ++++ b/net/ipv4/esp4_offload.c +@@ -63,10 +63,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head, + sp->olen++; + + xo = xfrm_offload(skb); +- if (!xo) { +- xfrm_state_put(x); ++ if (!xo) + goto out_reset; +- } + } + + xo->flags |= XFRM_GRO; +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 48bf3b9be475..2b0521feadaa 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -319,17 +319,18 @@ bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev) + { + bool dev_match = false; + #ifdef CONFIG_IP_ROUTE_MULTIPATH +- int ret; ++ if (unlikely(fi->nh)) { ++ dev_match = nexthop_uses_dev(fi->nh, dev); ++ } else { ++ int ret; + +- for (ret = 0; ret < fib_info_num_path(fi); ret++) { +- const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); ++ for (ret = 0; ret < fib_info_num_path(fi); ret++) { ++ const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); + +- if (nhc->nhc_dev == dev) { +- dev_match = true; +- break; +- } else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) { +- dev_match = true; +- break; ++ if (nhc_l3mdev_matches_dev(nhc, dev)) { ++ dev_match = true; ++ break; ++ } + } + } + #else +@@ -928,7 +929,6 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, + else + filter->dump_exceptions = false; + +- filter->dump_all_families = (rtm->rtm_family == AF_UNSPEC); + filter->flags = rtm->rtm_flags; + filter->protocol = rtm->rtm_protocol; + filter->rt_type = rtm->rtm_type; +@@ -1000,7 +1000,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) + if (filter.table_id) { + tb = fib_get_table(net, filter.table_id); + if (!tb) { +- if (filter.dump_all_families) ++ if (rtnl_msg_family(cb->nlh) != PF_INET) + return skb->len; + + NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist"); +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index b0010c710802..5e486895d67c 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -24,17 +24,19 @@ + #include + + #if IS_ENABLED(CONFIG_IPV6) +-/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6 +- * only, and any IPv4 addresses if not IPv6 only +- * match_wildcard == false: addresses must be exactly the same, i.e. +- * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, +- * and 0.0.0.0 equals to 0.0.0.0 only ++/* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses ++ * if IPv6 only, and any IPv4 addresses ++ * if not IPv6 only ++ * match_sk*_wildcard == false: addresses must be exactly the same, i.e. ++ * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, ++ * and 0.0.0.0 equals to 0.0.0.0 only + */ + static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, + const struct in6_addr *sk2_rcv_saddr6, + __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, + bool sk1_ipv6only, bool sk2_ipv6only, +- bool match_wildcard) ++ bool match_sk1_wildcard, ++ bool match_sk2_wildcard) + { + int addr_type = ipv6_addr_type(sk1_rcv_saddr6); + int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; +@@ -44,8 +46,8 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, + if (!sk2_ipv6only) { + if (sk1_rcv_saddr == sk2_rcv_saddr) + return true; +- if (!sk1_rcv_saddr || !sk2_rcv_saddr) +- return match_wildcard; ++ return (match_sk1_wildcard && !sk1_rcv_saddr) || ++ (match_sk2_wildcard && !sk2_rcv_saddr); + } + return false; + } +@@ -53,11 +55,11 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, + if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) + return true; + +- if (addr_type2 == IPV6_ADDR_ANY && match_wildcard && ++ if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard && + !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) + return true; + +- if (addr_type == IPV6_ADDR_ANY && match_wildcard && ++ if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard && + !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) + return true; + +@@ -69,18 +71,19 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, + } + #endif + +-/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses +- * match_wildcard == false: addresses must be exactly the same, i.e. +- * 0.0.0.0 only equals to 0.0.0.0 ++/* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses ++ * match_sk*_wildcard == false: addresses must be exactly the same, i.e. ++ * 0.0.0.0 only equals to 0.0.0.0 + */ + static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, +- bool sk2_ipv6only, bool match_wildcard) ++ bool sk2_ipv6only, bool match_sk1_wildcard, ++ bool match_sk2_wildcard) + { + if (!sk2_ipv6only) { + if (sk1_rcv_saddr == sk2_rcv_saddr) + return true; +- if (!sk1_rcv_saddr || !sk2_rcv_saddr) +- return match_wildcard; ++ return (match_sk1_wildcard && !sk1_rcv_saddr) || ++ (match_sk2_wildcard && !sk2_rcv_saddr); + } + return false; + } +@@ -96,10 +99,12 @@ bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, + sk2->sk_rcv_saddr, + ipv6_only_sock(sk), + ipv6_only_sock(sk2), ++ match_wildcard, + match_wildcard); + #endif + return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, +- ipv6_only_sock(sk2), match_wildcard); ++ ipv6_only_sock(sk2), match_wildcard, ++ match_wildcard); + } + EXPORT_SYMBOL(inet_rcv_saddr_equal); + +@@ -273,10 +278,10 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb, + tb->fast_rcv_saddr, + sk->sk_rcv_saddr, + tb->fast_ipv6_only, +- ipv6_only_sock(sk), true); ++ ipv6_only_sock(sk), true, false); + #endif + return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, +- ipv6_only_sock(sk), true); ++ ipv6_only_sock(sk), true, false); + } + + /* Obtain a reference to a local port for the given sock, +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c +index 8ecaf0f26973..bd41354ed8c1 100644 +--- a/net/ipv4/ip_vti.c ++++ b/net/ipv4/ip_vti.c +@@ -93,7 +93,28 @@ static int vti_rcv_proto(struct sk_buff *skb) + + static int vti_rcv_tunnel(struct sk_buff *skb) + { +- return vti_rcv(skb, ip_hdr(skb)->saddr, true); ++ struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id); ++ const struct iphdr *iph = ip_hdr(skb); ++ struct ip_tunnel *tunnel; ++ ++ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, ++ iph->saddr, iph->daddr, 0); ++ if (tunnel) { ++ struct tnl_ptk_info tpi = { ++ .proto = htons(ETH_P_IP), ++ }; ++ ++ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) ++ goto drop; ++ if (iptunnel_pull_header(skb, 0, tpi.proto, false)) ++ goto drop; ++ return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false); ++ } ++ ++ return -EINVAL; ++drop: ++ kfree_skb(skb); ++ return 0; + } + + static int vti_rcv_cb(struct sk_buff *skb, int err) +diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c +index 2f01cf6fa0de..678575adaf3b 100644 +--- a/net/ipv4/ipip.c ++++ b/net/ipv4/ipip.c +@@ -698,7 +698,7 @@ out: + + rtnl_link_failed: + #if IS_ENABLED(CONFIG_MPLS) +- xfrm4_tunnel_deregister(&mplsip_handler, AF_INET); ++ xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS); + xfrm_tunnel_mplsip_failed: + + #endif +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c +index 58007439cffd..d71935618871 100644 +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -2609,7 +2609,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) + + mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id); + if (!mrt) { +- if (filter.dump_all_families) ++ if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR) + return skb->len; + + NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist"); +diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c +index b2aeb7bf5dac..2a1e10f4ae93 100644 +--- a/net/ipv4/netfilter/nf_nat_pptp.c ++++ b/net/ipv4/netfilter/nf_nat_pptp.c +@@ -166,8 +166,7 @@ pptp_outbound_pkt(struct sk_buff *skb, + break; + default: + pr_debug("unknown outbound packet 0x%04x:%s\n", msg, +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : +- pptp_msg_name[0]); ++ pptp_msg_name(msg)); + /* fall through */ + case PPTP_SET_LINK_INFO: + /* only need to NAT in case PAC is behind NAT box */ +@@ -268,9 +267,7 @@ pptp_inbound_pkt(struct sk_buff *skb, + pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); + break; + default: +- pr_debug("unknown inbound packet %s\n", +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : +- pptp_msg_name[0]); ++ pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg)); + /* fall through */ + case PPTP_START_SESSION_REQUEST: + case PPTP_START_SESSION_REPLY: +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c +index 3737d32ad11a..a01f500d6a6b 100644 +--- a/net/ipv4/nexthop.c ++++ b/net/ipv4/nexthop.c +@@ -64,9 +64,16 @@ static void nexthop_free_mpath(struct nexthop *nh) + int i; + + nhg = rcu_dereference_raw(nh->nh_grp); +- for (i = 0; i < nhg->num_nh; ++i) +- WARN_ON(nhg->nh_entries[i].nh); ++ for (i = 0; i < nhg->num_nh; ++i) { ++ struct nh_grp_entry *nhge = &nhg->nh_entries[i]; ++ ++ WARN_ON(!list_empty(&nhge->nh_list)); ++ nexthop_put(nhge->nh); ++ } ++ ++ WARN_ON(nhg->spare == nhg); + ++ kfree(nhg->spare); + kfree(nhg); + } + +@@ -277,6 +284,7 @@ out: + return 0; + + nla_put_failure: ++ nlmsg_cancel(skb, nlh); + return -EMSGSIZE; + } + +@@ -434,7 +442,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[], + if (!valid_group_nh(nh, len, extack)) + return -EINVAL; + } +- for (i = NHA_GROUP + 1; i < __NHA_MAX; ++i) { ++ for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) { + if (!tb[i]) + continue; + +@@ -694,41 +702,56 @@ static void nh_group_rebalance(struct nh_group *nhg) + } + } + +-static void remove_nh_grp_entry(struct nh_grp_entry *nhge, +- struct nh_group *nhg, ++static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, + struct nl_info *nlinfo) + { ++ struct nh_grp_entry *nhges, *new_nhges; ++ struct nexthop *nhp = nhge->nh_parent; + struct nexthop *nh = nhge->nh; +- struct nh_grp_entry *nhges; +- bool found = false; +- int i; ++ struct nh_group *nhg, *newg; ++ int i, j; + + WARN_ON(!nh); + +- nhges = nhg->nh_entries; +- for (i = 0; i < nhg->num_nh; ++i) { +- if (found) { +- nhges[i-1].nh = nhges[i].nh; +- nhges[i-1].weight = nhges[i].weight; +- list_del(&nhges[i].nh_list); +- list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list); +- } else if (nhg->nh_entries[i].nh == nh) { +- found = true; +- } +- } ++ nhg = rtnl_dereference(nhp->nh_grp); ++ newg = nhg->spare; + +- if (WARN_ON(!found)) ++ /* last entry, keep it visible and remove the parent */ ++ if (nhg->num_nh == 1) { ++ remove_nexthop(net, nhp, nlinfo); + return; ++ } ++ ++ newg->has_v4 = nhg->has_v4; ++ newg->mpath = nhg->mpath; ++ newg->num_nh = nhg->num_nh; + +- nhg->num_nh--; +- nhg->nh_entries[nhg->num_nh].nh = NULL; ++ /* copy old entries to new except the one getting removed */ ++ nhges = nhg->nh_entries; ++ new_nhges = newg->nh_entries; ++ for (i = 0, j = 0; i < nhg->num_nh; ++i) { ++ /* current nexthop getting removed */ ++ if (nhg->nh_entries[i].nh == nh) { ++ newg->num_nh--; ++ continue; ++ } + +- nh_group_rebalance(nhg); ++ list_del(&nhges[i].nh_list); ++ new_nhges[j].nh_parent = nhges[i].nh_parent; ++ new_nhges[j].nh = nhges[i].nh; ++ new_nhges[j].weight = nhges[i].weight; ++ list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); ++ j++; ++ } + +- nexthop_put(nh); ++ nh_group_rebalance(newg); ++ rcu_assign_pointer(nhp->nh_grp, newg); ++ ++ list_del(&nhge->nh_list); ++ nexthop_put(nhge->nh); + + if (nlinfo) +- nexthop_notify(RTM_NEWNEXTHOP, nhge->nh_parent, nlinfo); ++ nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); + } + + static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, +@@ -736,17 +759,11 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, + { + struct nh_grp_entry *nhge, *tmp; + +- list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) { +- struct nh_group *nhg; +- +- list_del(&nhge->nh_list); +- nhg = rtnl_dereference(nhge->nh_parent->nh_grp); +- remove_nh_grp_entry(nhge, nhg, nlinfo); ++ list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) ++ remove_nh_grp_entry(net, nhge, nlinfo); + +- /* if this group has no more entries then remove it */ +- if (!nhg->num_nh) +- remove_nexthop(net, nhge->nh_parent, nlinfo); +- } ++ /* make sure all see the newly published array before releasing rtnl */ ++ synchronize_rcu(); + } + + static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) +@@ -760,10 +777,7 @@ static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) + if (WARN_ON(!nhge->nh)) + continue; + +- list_del(&nhge->nh_list); +- nexthop_put(nhge->nh); +- nhge->nh = NULL; +- nhg->num_nh--; ++ list_del_init(&nhge->nh_list); + } + } + +@@ -1086,6 +1100,7 @@ static struct nexthop *nexthop_create_group(struct net *net, + { + struct nlattr *grps_attr = cfg->nh_grp; + struct nexthop_grp *entry = nla_data(grps_attr); ++ u16 num_nh = nla_len(grps_attr) / sizeof(*entry); + struct nh_group *nhg; + struct nexthop *nh; + int i; +@@ -1096,12 +1111,21 @@ static struct nexthop *nexthop_create_group(struct net *net, + + nh->is_group = 1; + +- nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry)); ++ nhg = nexthop_grp_alloc(num_nh); + if (!nhg) { + kfree(nh); + return ERR_PTR(-ENOMEM); + } + ++ /* spare group used for removals */ ++ nhg->spare = nexthop_grp_alloc(num_nh); ++ if (!nhg) { ++ kfree(nhg); ++ kfree(nh); ++ return NULL; ++ } ++ nhg->spare->spare = nhg; ++ + for (i = 0; i < nhg->num_nh; ++i) { + struct nexthop *nhe; + struct nh_info *nhi; +@@ -1133,6 +1157,7 @@ out_no_nh: + for (; i >= 0; --i) + nexthop_put(nhg->nh_entries[i].nh); + ++ kfree(nhg->spare); + kfree(nhg); + kfree(nh); + +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 558ddf7ab395..b3a8d32f7d8d 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -490,18 +490,16 @@ u32 ip_idents_reserve(u32 hash, int segs) + atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; + u32 old = READ_ONCE(*p_tstamp); + u32 now = (u32)jiffies; +- u32 new, delta = 0; ++ u32 delta = 0; + + if (old != now && cmpxchg(p_tstamp, old, now) == old) + delta = prandom_u32_max(now - old); + +- /* Do not use atomic_add_return() as it makes UBSAN unhappy */ +- do { +- old = (u32)atomic_read(p_id); +- new = old + delta + segs; +- } while (atomic_cmpxchg(p_id, old, new) != old); +- +- return new - segs; ++ /* If UBSAN reports an error there, please make sure your compiler ++ * supports -fno-strict-overflow before reporting it that was a bug ++ * in UBSAN, and it has been fixed in GCC-8. ++ */ ++ return atomic_add_return(segs + delta, p_id) - segs; + } + EXPORT_SYMBOL(ip_idents_reserve); + +diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c +index fd535053245b..93e086cf058a 100644 +--- a/net/ipv6/esp6_offload.c ++++ b/net/ipv6/esp6_offload.c +@@ -85,10 +85,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head, + sp->olen++; + + xo = xfrm_offload(skb); +- if (!xo) { +- xfrm_state_put(x); ++ if (!xo) + goto out_reset; +- } + } + + xo->flags |= XFRM_GRO; +@@ -123,9 +121,16 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) + struct ip_esp_hdr *esph; + struct ipv6hdr *iph = ipv6_hdr(skb); + struct xfrm_offload *xo = xfrm_offload(skb); +- int proto = iph->nexthdr; ++ u8 proto = iph->nexthdr; + + skb_push(skb, -skb_network_offset(skb)); ++ ++ if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) { ++ __be16 frag; ++ ++ ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag); ++ } ++ + esph = ip_esp_hdr(skb); + *skb_mac_header(skb) = IPPROTO_ESP; + +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index c75274e0745c..7a0c877ca306 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -613,7 +613,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) + if (arg.filter.table_id) { + tb = fib6_get_table(net, arg.filter.table_id); + if (!tb) { +- if (arg.filter.dump_all_families) ++ if (rtnl_msg_family(cb->nlh) != PF_INET6) + goto out; + + NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist"); +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index 857a89ad4d6c..dd41313d7fa5 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -2498,7 +2498,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) + + mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id); + if (!mrt) { +- if (filter.dump_all_families) ++ if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR) + return skb->len; + + NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist"); +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c +index 38a0383dfbcf..aa5150929996 100644 +--- a/net/mac80211/mesh_hwmp.c ++++ b/net/mac80211/mesh_hwmp.c +@@ -1103,7 +1103,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) + mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, + target_flags, mpath->dst, mpath->sn, da, 0, + ttl, lifetime, 0, ifmsh->preq_id++, sdata); ++ ++ spin_lock_bh(&mpath->state_lock); ++ if (mpath->flags & MESH_PATH_DELETED) { ++ spin_unlock_bh(&mpath->state_lock); ++ goto enddiscovery; ++ } + mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); ++ spin_unlock_bh(&mpath->state_lock); + + enddiscovery: + rcu_read_unlock(); +diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c +index 67ac50104e6f..63908123f7ba 100644 +--- a/net/netfilter/ipset/ip_set_list_set.c ++++ b/net/netfilter/ipset/ip_set_list_set.c +@@ -59,7 +59,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, + /* Don't lookup sub-counters at all */ + opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; + if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) +- opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; ++ opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE; + list_for_each_entry_rcu(e, &map->members, list) { + ret = ip_set_test(e->id, skb, par, opt); + if (ret <= 0) +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index c2ad462f33f1..48db4aec02de 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -1879,22 +1879,18 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) + nf_conntrack_get(skb_nfct(nskb)); + } + +-static int nf_conntrack_update(struct net *net, struct sk_buff *skb) ++static int __nf_conntrack_update(struct net *net, struct sk_buff *skb, ++ struct nf_conn *ct, ++ enum ip_conntrack_info ctinfo) + { + struct nf_conntrack_tuple_hash *h; + struct nf_conntrack_tuple tuple; +- enum ip_conntrack_info ctinfo; + struct nf_nat_hook *nat_hook; + unsigned int status; +- struct nf_conn *ct; + int dataoff; + u16 l3num; + u8 l4num; + +- ct = nf_ct_get(skb, &ctinfo); +- if (!ct || nf_ct_is_confirmed(ct)) +- return 0; +- + l3num = nf_ct_l3num(ct); + + dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num); +@@ -1951,6 +1947,76 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb) + return 0; + } + ++/* This packet is coming from userspace via nf_queue, complete the packet ++ * processing after the helper invocation in nf_confirm(). ++ */ ++static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct, ++ enum ip_conntrack_info ctinfo) ++{ ++ const struct nf_conntrack_helper *helper; ++ const struct nf_conn_help *help; ++ int protoff; ++ ++ help = nfct_help(ct); ++ if (!help) ++ return 0; ++ ++ helper = rcu_dereference(help->helper); ++ if (!(helper->flags & NF_CT_HELPER_F_USERSPACE)) ++ return 0; ++ ++ switch (nf_ct_l3num(ct)) { ++ case NFPROTO_IPV4: ++ protoff = skb_network_offset(skb) + ip_hdrlen(skb); ++ break; ++#if IS_ENABLED(CONFIG_IPV6) ++ case NFPROTO_IPV6: { ++ __be16 frag_off; ++ u8 pnum; ++ ++ pnum = ipv6_hdr(skb)->nexthdr; ++ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, ++ &frag_off); ++ if (protoff < 0 || (frag_off & htons(~0x7)) != 0) ++ return 0; ++ break; ++ } ++#endif ++ default: ++ return 0; ++ } ++ ++ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && ++ !nf_is_loopback_packet(skb)) { ++ if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { ++ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); ++ return -1; ++ } ++ } ++ ++ /* We've seen it coming out the other side: confirm it */ ++ return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0; ++} ++ ++static int nf_conntrack_update(struct net *net, struct sk_buff *skb) ++{ ++ enum ip_conntrack_info ctinfo; ++ struct nf_conn *ct; ++ int err; ++ ++ ct = nf_ct_get(skb, &ctinfo); ++ if (!ct) ++ return 0; ++ ++ if (!nf_ct_is_confirmed(ct)) { ++ err = __nf_conntrack_update(net, skb, ct, ctinfo); ++ if (err < 0) ++ return err; ++ } ++ ++ return nf_confirm_cthelper(skb, ct, ctinfo); ++} ++ + static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb) + { +diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c +index a971183f11af..1f44d523b512 100644 +--- a/net/netfilter/nf_conntrack_pptp.c ++++ b/net/netfilter/nf_conntrack_pptp.c +@@ -72,24 +72,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); + + #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) + /* PptpControlMessageType names */ +-const char *const pptp_msg_name[] = { +- "UNKNOWN_MESSAGE", +- "START_SESSION_REQUEST", +- "START_SESSION_REPLY", +- "STOP_SESSION_REQUEST", +- "STOP_SESSION_REPLY", +- "ECHO_REQUEST", +- "ECHO_REPLY", +- "OUT_CALL_REQUEST", +- "OUT_CALL_REPLY", +- "IN_CALL_REQUEST", +- "IN_CALL_REPLY", +- "IN_CALL_CONNECT", +- "CALL_CLEAR_REQUEST", +- "CALL_DISCONNECT_NOTIFY", +- "WAN_ERROR_NOTIFY", +- "SET_LINK_INFO" ++static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = { ++ [0] = "UNKNOWN_MESSAGE", ++ [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST", ++ [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY", ++ [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST", ++ [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY", ++ [PPTP_ECHO_REQUEST] = "ECHO_REQUEST", ++ [PPTP_ECHO_REPLY] = "ECHO_REPLY", ++ [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST", ++ [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY", ++ [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST", ++ [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY", ++ [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT", ++ [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST", ++ [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY", ++ [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY", ++ [PPTP_SET_LINK_INFO] = "SET_LINK_INFO" + }; ++ ++const char *pptp_msg_name(u_int16_t msg) ++{ ++ if (msg > PPTP_MSG_MAX) ++ return pptp_msg_name_array[0]; ++ ++ return pptp_msg_name_array[msg]; ++} + EXPORT_SYMBOL(pptp_msg_name); + #endif + +@@ -276,7 +284,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; + + msg = ntohs(ctlh->messageType); +- pr_debug("inbound control message %s\n", pptp_msg_name[msg]); ++ pr_debug("inbound control message %s\n", pptp_msg_name(msg)); + + switch (msg) { + case PPTP_START_SESSION_REPLY: +@@ -311,7 +319,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + pcid = pptpReq->ocack.peersCallID; + if (info->pns_call_id != pcid) + goto invalid; +- pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], ++ pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg), + ntohs(cid), ntohs(pcid)); + + if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { +@@ -328,7 +336,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + goto invalid; + + cid = pptpReq->icreq.callID; +- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); ++ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); + info->cstate = PPTP_CALL_IN_REQ; + info->pac_call_id = cid; + break; +@@ -347,7 +355,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + if (info->pns_call_id != pcid) + goto invalid; + +- pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); ++ pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid)); + info->cstate = PPTP_CALL_IN_CONF; + + /* we expect a GRE connection from PAC to PNS */ +@@ -357,7 +365,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + case PPTP_CALL_DISCONNECT_NOTIFY: + /* server confirms disconnect */ + cid = pptpReq->disc.callID; +- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); ++ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); + info->cstate = PPTP_CALL_NONE; + + /* untrack this call id, unexpect GRE packets */ +@@ -384,7 +392,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, + invalid: + pr_debug("invalid %s: type=%d cid=%u pcid=%u " + "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], ++ pptp_msg_name(msg), + msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, + ntohs(info->pns_call_id), ntohs(info->pac_call_id)); + return NF_ACCEPT; +@@ -404,7 +412,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, + typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; + + msg = ntohs(ctlh->messageType); +- pr_debug("outbound control message %s\n", pptp_msg_name[msg]); ++ pr_debug("outbound control message %s\n", pptp_msg_name(msg)); + + switch (msg) { + case PPTP_START_SESSION_REQUEST: +@@ -426,7 +434,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, + info->cstate = PPTP_CALL_OUT_REQ; + /* track PNS call id */ + cid = pptpReq->ocreq.callID; +- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); ++ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); + info->pns_call_id = cid; + break; + +@@ -440,7 +448,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, + pcid = pptpReq->icack.peersCallID; + if (info->pac_call_id != pcid) + goto invalid; +- pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], ++ pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg), + ntohs(cid), ntohs(pcid)); + + if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { +@@ -480,7 +488,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, + invalid: + pr_debug("invalid %s: type=%d cid=%u pcid=%u " + "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], ++ pptp_msg_name(msg), + msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, + ntohs(info->pns_call_id), ntohs(info->pac_call_id)); + return NF_ACCEPT; +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c +index 60838d5fb8e0..81406b93f126 100644 +--- a/net/netfilter/nfnetlink_cthelper.c ++++ b/net/netfilter/nfnetlink_cthelper.c +@@ -103,7 +103,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) + if (help->helper->data_len == 0) + return -EINVAL; + +- nla_memcpy(help->data, nla_data(attr), sizeof(help->data)); ++ nla_memcpy(help->data, attr, sizeof(help->data)); + return 0; + } + +@@ -240,6 +240,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[], + ret = -ENOMEM; + goto err2; + } ++ helper->data_len = size; + + helper->flags |= NF_CT_HELPER_F_USERSPACE; + memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple)); +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c +index 930f48a20546..2a4d50e04441 100644 +--- a/net/qrtr/qrtr.c ++++ b/net/qrtr/qrtr.c +@@ -711,7 +711,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, + } + mutex_unlock(&qrtr_node_lock); + +- qrtr_local_enqueue(node, skb, type, from, to); ++ qrtr_local_enqueue(NULL, skb, type, from, to); + + return 0; + } +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c +index b06cae508158..6927b658dad3 100644 +--- a/net/sctp/sm_sideeffect.c ++++ b/net/sctp/sm_sideeffect.c +@@ -1522,9 +1522,17 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, + timeout = asoc->timeouts[cmd->obj.to]; + BUG_ON(!timeout); + +- timer->expires = jiffies + timeout; +- sctp_association_hold(asoc); +- add_timer(timer); ++ /* ++ * SCTP has a hard time with timer starts. Because we process ++ * timer starts as side effects, it can be hard to tell if we ++ * have already started a timer or not, which leads to BUG ++ * halts when we call add_timer. So here, instead of just starting ++ * a timer, if the timer is already started, and just mod ++ * the timer with the shorter of the two expiration times ++ */ ++ if (!timer_pending(timer)) ++ sctp_association_hold(asoc); ++ timer_reduce(timer, jiffies + timeout); + break; + + case SCTP_CMD_TIMER_RESTART: +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 5625a9500f21..84138a07e936 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -1856,12 +1856,13 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( + /* Update the content of current association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); +- if (sctp_state(asoc, SHUTDOWN_PENDING) && ++ if ((sctp_state(asoc, SHUTDOWN_PENDING) || ++ sctp_state(asoc, SHUTDOWN_SENT)) && + (sctp_sstate(asoc->base.sk, CLOSING) || + sock_flag(asoc->base.sk, SOCK_DEAD))) { +- /* if were currently in SHUTDOWN_PENDING, but the socket +- * has been closed by user, don't transition to ESTABLISHED. +- * Instead trigger SHUTDOWN bundled with COOKIE_ACK. ++ /* If the socket has been closed by user, don't ++ * transition to ESTABLISHED. Instead trigger SHUTDOWN ++ * bundled with COOKIE_ACK. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, +diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c +index 186c78431217..8f0977a9d423 100644 +--- a/net/tipc/udp_media.c ++++ b/net/tipc/udp_media.c +@@ -161,9 +161,11 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, + struct udp_bearer *ub, struct udp_media_addr *src, + struct udp_media_addr *dst, struct dst_cache *cache) + { +- struct dst_entry *ndst = dst_cache_get(cache); ++ struct dst_entry *ndst; + int ttl, err = 0; + ++ local_bh_disable(); ++ ndst = dst_cache_get(cache); + if (dst->proto == htons(ETH_P_IP)) { + struct rtable *rt = (struct rtable *)ndst; + +@@ -210,9 +212,11 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, + src->port, dst->port, false); + #endif + } ++ local_bh_enable(); + return err; + + tx_error: ++ local_bh_enable(); + kfree_skb(skb); + return err; + } +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index 5513a08a4308..fbf6a496ee8b 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -203,10 +203,12 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err) + + kfree(aead_req); + ++ spin_lock_bh(&ctx->decrypt_compl_lock); + pending = atomic_dec_return(&ctx->decrypt_pending); + +- if (!pending && READ_ONCE(ctx->async_notify)) ++ if (!pending && ctx->async_notify) + complete(&ctx->async_wait.completion); ++ spin_unlock_bh(&ctx->decrypt_compl_lock); + } + + static int tls_do_decryption(struct sock *sk, +@@ -464,10 +466,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) + ready = true; + } + ++ spin_lock_bh(&ctx->encrypt_compl_lock); + pending = atomic_dec_return(&ctx->encrypt_pending); + +- if (!pending && READ_ONCE(ctx->async_notify)) ++ if (!pending && ctx->async_notify) + complete(&ctx->async_wait.completion); ++ spin_unlock_bh(&ctx->encrypt_compl_lock); + + if (!ready) + return; +@@ -777,7 +781,7 @@ static int tls_push_record(struct sock *sk, int flags, + + static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, + bool full_record, u8 record_type, +- size_t *copied, int flags) ++ ssize_t *copied, int flags) + { + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); +@@ -793,9 +797,10 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, + psock = sk_psock_get(sk); + if (!psock || !policy) { + err = tls_push_record(sk, flags, record_type); +- if (err && err != -EINPROGRESS) { ++ if (err && sk->sk_err == EBADMSG) { + *copied -= sk_msg_free(sk, msg); + tls_free_open_rec(sk); ++ err = -sk->sk_err; + } + if (psock) + sk_psock_put(sk, psock); +@@ -821,9 +826,10 @@ more_data: + switch (psock->eval) { + case __SK_PASS: + err = tls_push_record(sk, flags, record_type); +- if (err && err != -EINPROGRESS) { ++ if (err && sk->sk_err == EBADMSG) { + *copied -= sk_msg_free(sk, msg); + tls_free_open_rec(sk); ++ err = -sk->sk_err; + goto out_err; + } + break; +@@ -913,7 +919,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) + unsigned char record_type = TLS_RECORD_TYPE_DATA; + bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); + bool eor = !(msg->msg_flags & MSG_MORE); +- size_t try_to_copy, copied = 0; ++ size_t try_to_copy; ++ ssize_t copied = 0; + struct sk_msg *msg_pl, *msg_en; + struct tls_rec *rec; + int required_size; +@@ -923,6 +930,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) + int num_zc = 0; + int orig_size; + int ret = 0; ++ int pending; + + if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) + return -EOPNOTSUPP; +@@ -1089,13 +1097,19 @@ trim_sgl: + goto send_end; + } else if (num_zc) { + /* Wait for pending encryptions to get completed */ +- smp_store_mb(ctx->async_notify, true); ++ spin_lock_bh(&ctx->encrypt_compl_lock); ++ ctx->async_notify = true; + +- if (atomic_read(&ctx->encrypt_pending)) ++ pending = atomic_read(&ctx->encrypt_pending); ++ spin_unlock_bh(&ctx->encrypt_compl_lock); ++ if (pending) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + else + reinit_completion(&ctx->async_wait.completion); + ++ /* There can be no concurrent accesses, since we have no ++ * pending encrypt operations ++ */ + WRITE_ONCE(ctx->async_notify, false); + + if (ctx->async_wait.err) { +@@ -1115,7 +1129,7 @@ send_end: + + release_sock(sk); + mutex_unlock(&tls_ctx->tx_lock); +- return copied ? copied : ret; ++ return copied > 0 ? copied : ret; + } + + static int tls_sw_do_sendpage(struct sock *sk, struct page *page, +@@ -1129,7 +1143,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page, + struct sk_msg *msg_pl; + struct tls_rec *rec; + int num_async = 0; +- size_t copied = 0; ++ ssize_t copied = 0; + bool full_record; + int record_room; + int ret = 0; +@@ -1231,7 +1245,7 @@ wait_for_memory: + } + sendpage_end: + ret = sk_stream_error(sk, flags, ret); +- return copied ? copied : ret; ++ return copied > 0 ? copied : ret; + } + + int tls_sw_sendpage_locked(struct sock *sk, struct page *page, +@@ -1724,6 +1738,7 @@ int tls_sw_recvmsg(struct sock *sk, + bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); + bool is_peek = flags & MSG_PEEK; + int num_async = 0; ++ int pending; + + flags |= nonblock; + +@@ -1886,8 +1901,11 @@ pick_next_record: + recv_end: + if (num_async) { + /* Wait for all previously submitted records to be decrypted */ +- smp_store_mb(ctx->async_notify, true); +- if (atomic_read(&ctx->decrypt_pending)) { ++ spin_lock_bh(&ctx->decrypt_compl_lock); ++ ctx->async_notify = true; ++ pending = atomic_read(&ctx->decrypt_pending); ++ spin_unlock_bh(&ctx->decrypt_compl_lock); ++ if (pending) { + err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + if (err) { + /* one of async decrypt failed */ +@@ -1899,6 +1917,10 @@ recv_end: + } else { + reinit_completion(&ctx->async_wait.completion); + } ++ ++ /* There can be no concurrent accesses, since we have no ++ * pending decrypt operations ++ */ + WRITE_ONCE(ctx->async_notify, false); + + /* Drain records from the rx_list & copy if required */ +@@ -2285,6 +2307,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) + + if (tx) { + crypto_init_wait(&sw_ctx_tx->async_wait); ++ spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); + crypto_info = &ctx->crypto_send.info; + cctx = &ctx->tx; + aead = &sw_ctx_tx->aead_send; +@@ -2293,6 +2316,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) + sw_ctx_tx->tx_work.sk = sk; + } else { + crypto_init_wait(&sw_ctx_rx->async_wait); ++ spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); + crypto_info = &ctx->crypto_recv.info; + cctx = &ctx->rx; + skb_queue_head_init(&sw_ctx_rx->rx_list); +diff --git a/net/wireless/core.c b/net/wireless/core.c +index 3e25229a059d..ee5bb8d8af04 100644 +--- a/net/wireless/core.c ++++ b/net/wireless/core.c +@@ -142,7 +142,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, + if (result) + return result; + +- if (rdev->wiphy.debugfsdir) ++ if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir)) + debugfs_rename(rdev->wiphy.debugfsdir->d_parent, + rdev->wiphy.debugfsdir, + rdev->wiphy.debugfsdir->d_parent, newname); +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c +index c5dba371a765..993f14acbb9f 100644 +--- a/net/xdp/xdp_umem.c ++++ b/net/xdp/xdp_umem.c +@@ -341,8 +341,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + { + bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; + u32 chunk_size = mr->chunk_size, headroom = mr->headroom; ++ u64 npgs, addr = mr->addr, size = mr->len; + unsigned int chunks, chunks_per_page; +- u64 addr = mr->addr, size = mr->len; + int err; + + if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { +@@ -372,6 +372,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + if ((addr + size) < addr) + return -EINVAL; + ++ npgs = div_u64(size, PAGE_SIZE); ++ if (npgs > U32_MAX) ++ return -EINVAL; ++ + chunks = (unsigned int)div_u64(size, chunk_size); + if (chunks == 0) + return -EINVAL; +@@ -391,7 +395,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + umem->size = size; + umem->headroom = headroom; + umem->chunk_size_nohr = chunk_size - headroom; +- umem->npgs = size / PAGE_SIZE; ++ umem->npgs = (u32)npgs; + umem->pgs = NULL; + umem->user = NULL; + umem->flags = mr->flags; +diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c +index 64486ad81341..c365b918be35 100644 +--- a/net/xfrm/xfrm_device.c ++++ b/net/xfrm/xfrm_device.c +@@ -25,12 +25,10 @@ static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb, + struct xfrm_offload *xo = xfrm_offload(skb); + + skb_reset_mac_len(skb); +- pskb_pull(skb, skb->mac_len + hsize + x->props.header_len); +- +- if (xo->flags & XFRM_GSO_SEGMENT) { +- skb_reset_transport_header(skb); ++ if (xo->flags & XFRM_GSO_SEGMENT) + skb->transport_header -= x->props.header_len; +- } ++ ++ pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); + } + + static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb, +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c +index 2c86a2fc3915..7a8474547791 100644 +--- a/net/xfrm/xfrm_input.c ++++ b/net/xfrm/xfrm_input.c +@@ -643,7 +643,7 @@ resume: + dev_put(skb->dev); + + spin_lock(&x->lock); +- if (nexthdr <= 0) { ++ if (nexthdr < 0) { + if (nexthdr == -EBADMSG) { + xfrm_audit_state_icvfail(x, skb, + x->type->proto); +diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c +index 4d5627e274fe..0ab2b35c95de 100644 +--- a/net/xfrm/xfrm_interface.c ++++ b/net/xfrm/xfrm_interface.c +@@ -772,7 +772,28 @@ static void __net_exit xfrmi_exit_net(struct net *net) + rtnl_unlock(); + } + ++static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list) ++{ ++ struct net *net; ++ LIST_HEAD(list); ++ ++ rtnl_lock(); ++ list_for_each_entry(net, net_exit_list, exit_list) { ++ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); ++ struct xfrm_if __rcu **xip; ++ struct xfrm_if *xi; ++ ++ for (xip = &xfrmn->xfrmi[0]; ++ (xi = rtnl_dereference(*xip)) != NULL; ++ xip = &xi->next) ++ unregister_netdevice_queue(xi->dev, &list); ++ } ++ unregister_netdevice_many(&list); ++ rtnl_unlock(); ++} ++ + static struct pernet_operations xfrmi_net_ops = { ++ .exit_batch = xfrmi_exit_batch_net, + .exit = xfrmi_exit_net, + .id = &xfrmi_net_id, + .size = sizeof(struct xfrmi_net), +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c +index b1db55b50ba1..9a6a8c4008ab 100644 +--- a/net/xfrm/xfrm_output.c ++++ b/net/xfrm/xfrm_output.c +@@ -586,18 +586,20 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) + xfrm_state_hold(x); + + if (skb_is_gso(skb)) { +- skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; ++ if (skb->inner_protocol) ++ return xfrm_output_gso(net, sk, skb); + +- return xfrm_output2(net, sk, skb); ++ skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; ++ goto out; + } + + if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) + goto out; ++ } else { ++ if (skb_is_gso(skb)) ++ return xfrm_output_gso(net, sk, skb); + } + +- if (skb_is_gso(skb)) +- return xfrm_output_gso(net, sk, skb); +- + if (skb->ip_summed == CHECKSUM_PARTIAL) { + err = skb_checksum_help(skb); + if (err) { +@@ -643,7 +645,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu) + + if (skb->protocol == htons(ETH_P_IP)) + proto = AF_INET; +- else if (skb->protocol == htons(ETH_P_IPV6)) ++ else if (skb->protocol == htons(ETH_P_IPV6) && ++ skb->sk->sk_family == AF_INET6) + proto = AF_INET6; + else + return; +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 264cf05a4eaa..6a1a21ae47bb 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -1433,12 +1433,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, + static bool xfrm_policy_mark_match(struct xfrm_policy *policy, + struct xfrm_policy *pol) + { +- u32 mark = policy->mark.v & policy->mark.m; +- +- if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) +- return true; +- +- if ((mark & pol->mark.m) == pol->mark.v && ++ if (policy->mark.v == pol->mark.v && + policy->priority == pol->priority) + return true; + +diff --git a/samples/bpf/lwt_len_hist_user.c b/samples/bpf/lwt_len_hist_user.c +index 587b68b1f8dd..430a4b7e353e 100644 +--- a/samples/bpf/lwt_len_hist_user.c ++++ b/samples/bpf/lwt_len_hist_user.c +@@ -15,8 +15,6 @@ + #define MAX_INDEX 64 + #define MAX_STARS 38 + +-char bpf_log_buf[BPF_LOG_BUF_SIZE]; +- + static void stars(char *str, long val, long max, int width) + { + int i; +diff --git a/security/commoncap.c b/security/commoncap.c +index f4ee0ae106b2..0ca31c8bc0b1 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -812,6 +812,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm) + int ret; + kuid_t root_uid; + ++ new->cap_ambient = old->cap_ambient; + if (WARN_ON(!cap_ambient_invariant_ok(old))) + return -EPERM; + +diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c +index 00cb5aed10a9..28bec15b0959 100644 +--- a/sound/core/hwdep.c ++++ b/sound/core/hwdep.c +@@ -216,12 +216,12 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw, + if (info.index >= 32) + return -EINVAL; + /* check whether the dsp was already loaded */ +- if (hw->dsp_loaded & (1 << info.index)) ++ if (hw->dsp_loaded & (1u << info.index)) + return -EBUSY; + err = hw->ops.dsp_load(hw, &info); + if (err < 0) + return err; +- hw->dsp_loaded |= (1 << info.index); ++ hw->dsp_loaded |= (1u << info.index); + return 0; + } + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index c5bec191e003..da4d21445e80 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -384,6 +384,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) + case 0x10ec0282: + case 0x10ec0283: + case 0x10ec0286: ++ case 0x10ec0287: + case 0x10ec0288: + case 0x10ec0285: + case 0x10ec0298: +@@ -5484,18 +5485,9 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, + { 0x19, 0x21a11010 }, /* dock mic */ + { } + }; +- /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise +- * the speaker output becomes too low by some reason on Thinkpads with +- * ALC298 codec +- */ +- static const hda_nid_t preferred_pairs[] = { +- 0x14, 0x03, 0x17, 0x02, 0x21, 0x02, +- 0 +- }; + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { +- spec->gen.preferred_dacs = preferred_pairs; + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; + snd_hda_apply_pincfgs(codec, pincfgs); + } else if (action == HDA_FIXUP_ACT_INIT) { +@@ -5508,6 +5500,23 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, + } + } + ++static void alc_fixup_tpt470_dacs(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise ++ * the speaker output becomes too low by some reason on Thinkpads with ++ * ALC298 codec ++ */ ++ static const hda_nid_t preferred_pairs[] = { ++ 0x14, 0x03, 0x17, 0x02, 0x21, 0x02, ++ 0 ++ }; ++ struct alc_spec *spec = codec->spec; ++ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) ++ spec->gen.preferred_dacs = preferred_pairs; ++} ++ + static void alc_shutup_dell_xps13(struct hda_codec *codec) + { + struct alc_spec *spec = codec->spec; +@@ -6063,6 +6072,7 @@ enum { + ALC700_FIXUP_INTEL_REFERENCE, + ALC274_FIXUP_DELL_BIND_DACS, + ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, ++ ALC298_FIXUP_TPT470_DOCK_FIX, + ALC298_FIXUP_TPT470_DOCK, + ALC255_FIXUP_DUMMY_LINEOUT_VERB, + ALC255_FIXUP_DELL_HEADSET_MIC, +@@ -6994,12 +7004,18 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC274_FIXUP_DELL_BIND_DACS + }, +- [ALC298_FIXUP_TPT470_DOCK] = { ++ [ALC298_FIXUP_TPT470_DOCK_FIX] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_tpt470_dock, + .chained = true, + .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE + }, ++ [ALC298_FIXUP_TPT470_DOCK] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc_fixup_tpt470_dacs, ++ .chained = true, ++ .chain_id = ALC298_FIXUP_TPT470_DOCK_FIX ++ }, + [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +@@ -7638,6 +7654,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, + {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, + {.id = ALC292_FIXUP_TPT460, .name = "tpt460"}, ++ {.id = ALC298_FIXUP_TPT470_DOCK_FIX, .name = "tpt470-dock-fix"}, + {.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"}, + {.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, + {.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"}, +@@ -8305,6 +8322,7 @@ static int patch_alc269(struct hda_codec *codec) + case 0x10ec0215: + case 0x10ec0245: + case 0x10ec0285: ++ case 0x10ec0287: + case 0x10ec0289: + spec->codec_variant = ALC269_TYPE_ALC215; + spec->shutup = alc225_shutup; +@@ -9583,6 +9601,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { + HDA_CODEC_ENTRY(0x10ec0284, "ALC284", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269), ++ HDA_CODEC_ENTRY(0x10ec0287, "ALC287", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269), +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 583edacc9fe8..f55afe3a98e3 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -1171,6 +1171,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, + cval->res = 384; + } + break; ++ case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */ ++ if ((strstr(kctl->id.name, "Playback Volume") != NULL) || ++ strstr(kctl->id.name, "Capture Volume") != NULL) { ++ cval->min >>= 8; ++ cval->max = 0; ++ cval->res = 1; ++ } ++ break; + } + } + +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c +index 39d6c6fa5e33..ac84f0b2b0bc 100644 +--- a/sound/usb/mixer_maps.c ++++ b/sound/usb/mixer_maps.c +@@ -387,6 +387,21 @@ static const struct usbmix_connector_map trx40_mobo_connector_map[] = { + {} + }; + ++/* Rear panel + front mic on Gigabyte TRX40 Aorus Master with ALC1220-VB */ ++static const struct usbmix_name_map aorus_master_alc1220vb_map[] = { ++ { 17, NULL }, /* OT, IEC958?, disabled */ ++ { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */ ++ { 16, "Line Out" }, /* OT */ ++ { 22, "Line Out Playback" }, /* FU */ ++ { 7, "Line" }, /* IT */ ++ { 19, "Line Capture" }, /* FU */ ++ { 8, "Mic" }, /* IT */ ++ { 20, "Mic Capture" }, /* FU */ ++ { 9, "Front Mic" }, /* IT */ ++ { 21, "Front Mic Capture" }, /* FU */ ++ {} ++}; ++ + /* + * Control map entries + */ +@@ -506,6 +521,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { + .id = USB_ID(0x05a7, 0x1020), + .map = bose_companion5_map, + }, ++ { /* Gigabyte TRX40 Aorus Master (rear panel + front mic) */ ++ .id = USB_ID(0x0414, 0xa001), ++ .map = aorus_master_alc1220vb_map, ++ }, + { /* Gigabyte TRX40 Aorus Pro WiFi */ + .id = USB_ID(0x0414, 0xa002), + .map = trx40_mobo_map, +@@ -529,6 +548,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, + }, ++ { /* Asrock TRX40 Creator */ ++ .id = USB_ID(0x26ce, 0x0a01), ++ .map = trx40_mobo_map, ++ .connector_map = trx40_mobo_connector_map, ++ }, + { 0 } /* terminator */ + }; + +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h +index 8c2f5c23e1b4..bbae11605a4c 100644 +--- a/sound/usb/quirks-table.h ++++ b/sound/usb/quirks-table.h +@@ -3647,6 +3647,32 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), + ALC1220_VB_DESKTOP(0x0414, 0xa002), /* Gigabyte TRX40 Aorus Pro WiFi */ + ALC1220_VB_DESKTOP(0x0db0, 0x0d64), /* MSI TRX40 Creator */ + ALC1220_VB_DESKTOP(0x0db0, 0x543d), /* MSI TRX40 */ ++ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */ + #undef ALC1220_VB_DESKTOP + ++/* Two entries for Gigabyte TRX40 Aorus Master: ++ * TRX40 Aorus Master has two USB-audio devices, one for the front headphone ++ * with ESS SABRE9218 DAC chip, while another for the rest I/O (the rear ++ * panel and the front mic) with Realtek ALC1220-VB. ++ * Here we provide two distinct names for making UCM profiles easier. ++ */ ++{ ++ USB_DEVICE(0x0414, 0xa000), ++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { ++ .vendor_name = "Gigabyte", ++ .product_name = "Aorus Master Front Headphone", ++ .profile_name = "Gigabyte-Aorus-Master-Front-Headphone", ++ .ifnum = QUIRK_NO_INTERFACE ++ } ++}, ++{ ++ USB_DEVICE(0x0414, 0xa001), ++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { ++ .vendor_name = "Gigabyte", ++ .product_name = "Aorus Master Main Audio", ++ .profile_name = "Gigabyte-Aorus-Master-Main-Audio", ++ .ifnum = QUIRK_NO_INTERFACE ++ } ++}, ++ + #undef USB_DEVICE_VENDOR_SPEC +diff --git a/tools/arch/x86/include/uapi/asm/unistd.h b/tools/arch/x86/include/uapi/asm/unistd.h +index 196fdd02b8b1..30d7d04d72d6 100644 +--- a/tools/arch/x86/include/uapi/asm/unistd.h ++++ b/tools/arch/x86/include/uapi/asm/unistd.h +@@ -3,7 +3,7 @@ + #define _UAPI_ASM_X86_UNISTD_H + + /* x32 syscall flag bit */ +-#define __X32_SYSCALL_BIT 0x40000000UL ++#define __X32_SYSCALL_BIT 0x40000000 + + #ifndef __KERNEL__ + # ifdef __i386__ +diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c +index 6ccf6f6d09df..5b7d6c16d33f 100644 +--- a/tools/perf/util/srcline.c ++++ b/tools/perf/util/srcline.c +@@ -193,16 +193,30 @@ static void find_address_in_section(bfd *abfd, asection *section, void *data) + bfd_vma pc, vma; + bfd_size_type size; + struct a2l_data *a2l = data; ++ flagword flags; + + if (a2l->found) + return; + +- if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0) ++#ifdef bfd_get_section_flags ++ flags = bfd_get_section_flags(abfd, section); ++#else ++ flags = bfd_section_flags(section); ++#endif ++ if ((flags & SEC_ALLOC) == 0) + return; + + pc = a2l->addr; ++#ifdef bfd_get_section_vma + vma = bfd_get_section_vma(abfd, section); ++#else ++ vma = bfd_section_vma(section); ++#endif ++#ifdef bfd_get_section_size + size = bfd_get_section_size(section); ++#else ++ size = bfd_section_size(section); ++#endif + + if (pc < vma || pc >= vma + size) + return; diff --git a/patch/kernel/odroidxu4-current/patch-5.4.44-45.patch b/patch/kernel/odroidxu4-current/patch-5.4.44-45.patch new file mode 100644 index 000000000..235a18c8f --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.44-45.patch @@ -0,0 +1,1836 @@ +diff --git a/Makefile b/Makefile +index ef4697fcb8ea..d57c443d9073 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 44 ++SUBLEVEL = 45 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c +index 7ee89dc61f6e..23dc002aa574 100644 +--- a/arch/arc/kernel/setup.c ++++ b/arch/arc/kernel/setup.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -409,12 +410,12 @@ static void arc_chk_core_config(void) + if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr) + panic("Linux built with incorrect DCCM Base address\n"); + +- if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz) ++ if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz) + panic("Linux built with incorrect DCCM Size\n"); + #endif + + #ifdef CONFIG_ARC_HAS_ICCM +- if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) ++ if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz) + panic("Linux built with incorrect ICCM Size\n"); + #endif + +diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig +index a931d0a256d0..a645bca5899a 100644 +--- a/arch/arc/plat-eznps/Kconfig ++++ b/arch/arc/plat-eznps/Kconfig +@@ -6,6 +6,7 @@ + + menuconfig ARC_PLAT_EZNPS + bool "\"EZchip\" ARC dev platform" ++ depends on ISA_ARCOMPACT + select CPU_BIG_ENDIAN + select CLKSRC_NPS if !PHYS_ADDR_T_64BIT + select EZNPS_GIC +diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c +index 7ccc5c85c74e..000b350d4060 100644 +--- a/arch/powerpc/platforms/powernv/opal-imc.c ++++ b/arch/powerpc/platforms/powernv/opal-imc.c +@@ -59,10 +59,6 @@ static void export_imc_mode_and_cmd(struct device_node *node, + + imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root); + +- /* +- * Return here, either because 'imc' directory already exists, +- * Or failed to create a new one. +- */ + if (!imc_debugfs_parent) + return; + +@@ -135,7 +131,6 @@ static int imc_get_mem_addr_nest(struct device_node *node, + } + + pmu_ptr->imc_counter_mmaped = true; +- export_imc_mode_and_cmd(node, pmu_ptr); + kfree(base_addr_arr); + kfree(chipid_arr); + return 0; +@@ -151,7 +146,7 @@ error: + * and domain as the inputs. + * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets + */ +-static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) ++static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain) + { + int ret = 0; + struct imc_pmu *pmu_ptr; +@@ -159,27 +154,23 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) + + /* Return for unknown domain */ + if (domain < 0) +- return -EINVAL; ++ return NULL; + + /* memory for pmu */ + pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); + if (!pmu_ptr) +- return -ENOMEM; ++ return NULL; + + /* Set the domain */ + pmu_ptr->domain = domain; + + ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size); +- if (ret) { +- ret = -EINVAL; ++ if (ret) + goto free_pmu; +- } + + if (!of_property_read_u32(parent, "offset", &offset)) { +- if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) { +- ret = -EINVAL; ++ if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) + goto free_pmu; +- } + } + + /* Function to register IMC pmu */ +@@ -190,14 +181,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) + if (pmu_ptr->domain == IMC_DOMAIN_NEST) + kfree(pmu_ptr->mem_info); + kfree(pmu_ptr); +- return ret; ++ return NULL; + } + +- return 0; ++ return pmu_ptr; + + free_pmu: + kfree(pmu_ptr); +- return ret; ++ return NULL; + } + + static void disable_nest_pmu_counters(void) +@@ -254,6 +245,7 @@ int get_max_nest_dev(void) + static int opal_imc_counters_probe(struct platform_device *pdev) + { + struct device_node *imc_dev = pdev->dev.of_node; ++ struct imc_pmu *pmu; + int pmu_count = 0, domain; + bool core_imc_reg = false, thread_imc_reg = false; + u32 type; +@@ -269,6 +261,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev) + } + + for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) { ++ pmu = NULL; + if (of_property_read_u32(imc_dev, "type", &type)) { + pr_warn("IMC Device without type property\n"); + continue; +@@ -300,9 +293,13 @@ static int opal_imc_counters_probe(struct platform_device *pdev) + break; + } + +- if (!imc_pmu_create(imc_dev, pmu_count, domain)) { +- if (domain == IMC_DOMAIN_NEST) ++ pmu = imc_pmu_create(imc_dev, pmu_count, domain); ++ if (pmu != NULL) { ++ if (domain == IMC_DOMAIN_NEST) { ++ if (!imc_debugfs_parent) ++ export_imc_mode_and_cmd(imc_dev, pmu); + pmu_count++; ++ } + if (domain == IMC_DOMAIN_CORE) + core_imc_reg = true; + if (domain == IMC_DOMAIN_THREAD) +@@ -310,10 +307,6 @@ static int opal_imc_counters_probe(struct platform_device *pdev) + } + } + +- /* If none of the nest units are registered, remove debugfs interface */ +- if (pmu_count == 0) +- debugfs_remove_recursive(imc_debugfs_parent); +- + /* If core imc is not registered, unregister thread-imc */ + if (!core_imc_reg && thread_imc_reg) + unregister_thread_imc(); +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c +index 8057aafd5f5e..6d130c89fbd8 100644 +--- a/arch/powerpc/xmon/xmon.c ++++ b/arch/powerpc/xmon/xmon.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -187,6 +188,8 @@ static void dump_tlb_44x(void); + static void dump_tlb_book3e(void); + #endif + ++static void clear_all_bpt(void); ++ + #ifdef CONFIG_PPC64 + #define REG "%.16lx" + #else +@@ -283,10 +286,38 @@ Commands:\n\ + " U show uptime information\n" + " ? help\n" + " # n limit output to n lines per page (for dp, dpa, dl)\n" +-" zr reboot\n\ +- zh halt\n" ++" zr reboot\n" ++" zh halt\n" + ; + ++#ifdef CONFIG_SECURITY ++static bool xmon_is_locked_down(void) ++{ ++ static bool lockdown; ++ ++ if (!lockdown) { ++ lockdown = !!security_locked_down(LOCKDOWN_XMON_RW); ++ if (lockdown) { ++ printf("xmon: Disabled due to kernel lockdown\n"); ++ xmon_is_ro = true; ++ } ++ } ++ ++ if (!xmon_is_ro) { ++ xmon_is_ro = !!security_locked_down(LOCKDOWN_XMON_WR); ++ if (xmon_is_ro) ++ printf("xmon: Read-only due to kernel lockdown\n"); ++ } ++ ++ return lockdown; ++} ++#else /* CONFIG_SECURITY */ ++static inline bool xmon_is_locked_down(void) ++{ ++ return false; ++} ++#endif ++ + static struct pt_regs *xmon_regs; + + static inline void sync(void) +@@ -438,7 +469,10 @@ static bool wait_for_other_cpus(int ncpus) + + return false; + } +-#endif /* CONFIG_SMP */ ++#else /* CONFIG_SMP */ ++static inline void get_output_lock(void) {} ++static inline void release_output_lock(void) {} ++#endif + + static inline int unrecoverable_excp(struct pt_regs *regs) + { +@@ -455,6 +489,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) + int cmd = 0; + struct bpt *bp; + long recurse_jmp[JMP_BUF_LEN]; ++ bool locked_down; + unsigned long offset; + unsigned long flags; + #ifdef CONFIG_SMP +@@ -465,6 +500,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) + local_irq_save(flags); + hard_irq_disable(); + ++ locked_down = xmon_is_locked_down(); ++ + if (!fromipi) { + tracing_enabled = tracing_is_on(); + tracing_off(); +@@ -518,7 +555,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) + + if (!fromipi) { + get_output_lock(); +- excprint(regs); ++ if (!locked_down) ++ excprint(regs); + if (bp) { + printf("cpu 0x%x stopped at breakpoint 0x%tx (", + cpu, BP_NUM(bp)); +@@ -570,10 +608,14 @@ static int xmon_core(struct pt_regs *regs, int fromipi) + } + remove_bpts(); + disable_surveillance(); +- /* for breakpoint or single step, print the current instr. */ +- if (bp || TRAP(regs) == 0xd00) +- ppc_inst_dump(regs->nip, 1, 0); +- printf("enter ? for help\n"); ++ ++ if (!locked_down) { ++ /* for breakpoint or single step, print curr insn */ ++ if (bp || TRAP(regs) == 0xd00) ++ ppc_inst_dump(regs->nip, 1, 0); ++ printf("enter ? for help\n"); ++ } ++ + mb(); + xmon_gate = 1; + barrier(); +@@ -597,8 +639,9 @@ static int xmon_core(struct pt_regs *regs, int fromipi) + spin_cpu_relax(); + touch_nmi_watchdog(); + } else { +- cmd = cmds(regs); +- if (cmd != 0) { ++ if (!locked_down) ++ cmd = cmds(regs); ++ if (locked_down || cmd != 0) { + /* exiting xmon */ + insert_bpts(); + xmon_gate = 0; +@@ -635,13 +678,16 @@ static int xmon_core(struct pt_regs *regs, int fromipi) + "can't continue\n"); + remove_bpts(); + disable_surveillance(); +- /* for breakpoint or single step, print the current instr. */ +- if (bp || TRAP(regs) == 0xd00) +- ppc_inst_dump(regs->nip, 1, 0); +- printf("enter ? for help\n"); ++ if (!locked_down) { ++ /* for breakpoint or single step, print current insn */ ++ if (bp || TRAP(regs) == 0xd00) ++ ppc_inst_dump(regs->nip, 1, 0); ++ printf("enter ? for help\n"); ++ } + } + +- cmd = cmds(regs); ++ if (!locked_down) ++ cmd = cmds(regs); + + insert_bpts(); + in_xmon = 0; +@@ -670,7 +716,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi) + } + } + #endif +- insert_cpu_bpts(); ++ if (locked_down) ++ clear_all_bpt(); ++ else ++ insert_cpu_bpts(); + + touch_nmi_watchdog(); + local_irq_restore(flags); +@@ -3761,6 +3810,11 @@ static void xmon_init(int enable) + #ifdef CONFIG_MAGIC_SYSRQ + static void sysrq_handle_xmon(int key) + { ++ if (xmon_is_locked_down()) { ++ clear_all_bpt(); ++ xmon_init(0); ++ return; ++ } + /* ensure xmon is enabled */ + xmon_init(1); + debugger(get_irq_regs()); +@@ -3782,7 +3836,6 @@ static int __init setup_xmon_sysrq(void) + device_initcall(setup_xmon_sysrq); + #endif /* CONFIG_MAGIC_SYSRQ */ + +-#ifdef CONFIG_DEBUG_FS + static void clear_all_bpt(void) + { + int i; +@@ -3800,18 +3853,22 @@ static void clear_all_bpt(void) + iabr = NULL; + dabr.enabled = 0; + } +- +- printf("xmon: All breakpoints cleared\n"); + } + ++#ifdef CONFIG_DEBUG_FS + static int xmon_dbgfs_set(void *data, u64 val) + { + xmon_on = !!val; + xmon_init(xmon_on); + + /* make sure all breakpoints removed when disabling */ +- if (!xmon_on) ++ if (!xmon_on) { + clear_all_bpt(); ++ get_output_lock(); ++ printf("xmon: All breakpoints cleared\n"); ++ release_output_lock(); ++ } ++ + return 0; + } + +@@ -3837,7 +3894,11 @@ static int xmon_early __initdata; + + static int __init early_parse_xmon(char *p) + { +- if (!p || strncmp(p, "early", 5) == 0) { ++ if (xmon_is_locked_down()) { ++ xmon_init(0); ++ xmon_early = 0; ++ xmon_on = 0; ++ } else if (!p || strncmp(p, "early", 5) == 0) { + /* just "xmon" is equivalent to "xmon=early" */ + xmon_init(1); + xmon_early = 1; +diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S +index 3431b2d5e334..f942341429b1 100644 +--- a/arch/s390/kernel/mcount.S ++++ b/arch/s390/kernel/mcount.S +@@ -41,6 +41,7 @@ EXPORT_SYMBOL(_mcount) + ENTRY(ftrace_caller) + .globl ftrace_regs_caller + .set ftrace_regs_caller,ftrace_caller ++ stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller + lgr %r1,%r15 + #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)) + aghi %r0,MCOUNT_RETURN_FIXUP +diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c +index 5674710a4841..7dfae86afa47 100644 +--- a/arch/s390/mm/hugetlbpage.c ++++ b/arch/s390/mm/hugetlbpage.c +@@ -159,10 +159,13 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + rste &= ~_SEGMENT_ENTRY_NOEXEC; + + /* Set correct table type for 2G hugepages */ +- if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) +- rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE; +- else ++ if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { ++ if (likely(pte_present(pte))) ++ rste |= _REGION3_ENTRY_LARGE; ++ rste |= _REGION_ENTRY_TYPE_R3; ++ } else if (likely(pte_present(pte))) + rste |= _SEGMENT_ENTRY_LARGE; ++ + clear_huge_pte_skeys(mm, rste); + pte_val(*ptep) = rste; + } +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index 1e6bb4c25334..ea85f23d9e22 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -253,6 +253,7 @@ static inline int pmd_large(pmd_t pte) + } + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE ++/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */ + static inline int pmd_trans_huge(pmd_t pmd) + { + return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; +diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c +index b8ef8557d4b3..2a36902d418c 100644 +--- a/arch/x86/mm/mmio-mod.c ++++ b/arch/x86/mm/mmio-mod.c +@@ -372,7 +372,7 @@ static void enter_uniprocessor(void) + int cpu; + int err; + +- if (downed_cpus == NULL && ++ if (!cpumask_available(downed_cpus) && + !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) { + pr_notice("Failed to allocate mask\n"); + goto out; +@@ -402,7 +402,7 @@ static void leave_uniprocessor(void) + int cpu; + int err; + +- if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0) ++ if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0) + return; + pr_notice("Re-enabling CPUs...\n"); + for_each_cpu(cpu, downed_cpus) { +diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c +index 3d7fdea872f8..2553e05e0725 100644 +--- a/drivers/block/null_blk_zoned.c ++++ b/drivers/block/null_blk_zoned.c +@@ -20,6 +20,10 @@ int null_zone_init(struct nullb_device *dev) + pr_err("zone_size must be power-of-two\n"); + return -EINVAL; + } ++ if (dev->zone_size > dev->size) { ++ pr_err("Zone size larger than device capacity\n"); ++ return -EINVAL; ++ } + + dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT; + dev->nr_zones = dev_size >> +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index ea2849338d6c..9b69e55ad701 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -191,10 +191,11 @@ static const struct edid_quirk { + { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, + { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP }, + +- /* Oculus Rift DK1, DK2, and CV1 VR Headsets */ ++ /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */ + { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP }, + { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP }, + { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP }, ++ { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP }, + + /* Windows Mixed Reality Headsets */ + { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP }, +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c +index 9b15ac4f2fb6..4ab6531a4a74 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp.c ++++ b/drivers/gpu/drm/i915/display/intel_dp.c +@@ -7218,11 +7218,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, + intel_connector->get_hw_state = intel_connector_get_hw_state; + + /* init MST on ports that can support it */ +- if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) && +- (port == PORT_B || port == PORT_C || +- port == PORT_D || port == PORT_F)) +- intel_dp_mst_encoder_init(intel_dig_port, +- intel_connector->base.base.id); ++ intel_dp_mst_encoder_init(intel_dig_port, ++ intel_connector->base.base.id); + + if (!intel_edp_init_connector(intel_dp, intel_connector)) { + intel_dp_aux_fini(intel_dp); +diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c +index 600873c796d0..74d45a0eecb8 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c ++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c +@@ -653,21 +653,31 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port) + int + intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id) + { ++ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct intel_dp *intel_dp = &intel_dig_port->dp; +- struct drm_device *dev = intel_dig_port->base.base.dev; ++ enum port port = intel_dig_port->base.port; + int ret; + +- intel_dp->can_mst = true; ++ if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp)) ++ return 0; ++ ++ if (INTEL_GEN(i915) < 12 && port == PORT_A) ++ return 0; ++ ++ if (INTEL_GEN(i915) < 11 && port == PORT_E) ++ return 0; ++ + intel_dp->mst_mgr.cbs = &mst_cbs; + + /* create encoders */ + intel_dp_create_fake_mst_encoders(intel_dig_port); +- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev, ++ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm, + &intel_dp->aux, 16, 3, conn_base_id); +- if (ret) { +- intel_dp->can_mst = false; ++ if (ret) + return ret; +- } ++ ++ intel_dp->can_mst = true; ++ + return 0; + } + +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 03c720b47306..39e4da7468e1 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -69,6 +69,7 @@ MODULE_LICENSE("GPL"); + #define MT_QUIRK_ASUS_CUSTOM_UP BIT(17) + #define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18) + #define MT_QUIRK_SEPARATE_APP_REPORT BIT(19) ++#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20) + + #define MT_INPUTMODE_TOUCHSCREEN 0x02 + #define MT_INPUTMODE_TOUCHPAD 0x03 +@@ -189,6 +190,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app); + #define MT_CLS_WIN_8 0x0012 + #define MT_CLS_EXPORT_ALL_INPUTS 0x0013 + #define MT_CLS_WIN_8_DUAL 0x0014 ++#define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015 + + /* vendor specific classes */ + #define MT_CLS_3M 0x0101 +@@ -279,6 +281,15 @@ static const struct mt_class mt_classes[] = { + MT_QUIRK_CONTACT_CNT_ACCURATE | + MT_QUIRK_WIN8_PTP_BUTTONS, + .export_all_inputs = true }, ++ { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT, ++ .quirks = MT_QUIRK_ALWAYS_VALID | ++ MT_QUIRK_IGNORE_DUPLICATES | ++ MT_QUIRK_HOVERING | ++ MT_QUIRK_CONTACT_CNT_ACCURATE | ++ MT_QUIRK_STICKY_FINGERS | ++ MT_QUIRK_WIN8_PTP_BUTTONS | ++ MT_QUIRK_FORCE_MULTI_INPUT, ++ .export_all_inputs = true }, + + /* + * vendor specific classes +@@ -1714,6 +1725,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + if (id->group != HID_GROUP_MULTITOUCH_WIN_8) + hdev->quirks |= HID_QUIRK_MULTI_INPUT; + ++ if (mtclass->quirks & MT_QUIRK_FORCE_MULTI_INPUT) { ++ hdev->quirks &= ~HID_QUIRK_INPUT_PER_APP; ++ hdev->quirks |= HID_QUIRK_MULTI_INPUT; ++ } ++ + timer_setup(&td->release_timer, mt_expired_timeout, 0); + + ret = hid_parse(hdev); +@@ -1926,6 +1942,11 @@ static const struct hid_device_id mt_devices[] = { + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, + ++ /* Elan devices */ ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, ++ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, ++ USB_VENDOR_ID_ELAN, 0x313a) }, ++ + /* Elitegroup panel */ + { .driver_data = MT_CLS_SERIAL, + MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, +@@ -2056,6 +2077,11 @@ static const struct hid_device_id mt_devices[] = { + MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, + USB_DEVICE_ID_MTP_STM)}, + ++ /* Synaptics devices */ ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, ++ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, ++ USB_VENDOR_ID_SYNAPTICS, 0xce08) }, ++ + /* TopSeed panels */ + { .driver_data = MT_CLS_TOPSEED, + MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c +index 4c6ed6ef31f1..2f073f536070 100644 +--- a/drivers/hid/hid-sony.c ++++ b/drivers/hid/hid-sony.c +@@ -867,6 +867,23 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc, + if (sc->quirks & PS3REMOTE) + return ps3remote_fixup(hdev, rdesc, rsize); + ++ /* ++ * Some knock-off USB dongles incorrectly report their button count ++ * as 13 instead of 16 causing three non-functional buttons. ++ */ ++ if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 && ++ /* Report Count (13) */ ++ rdesc[23] == 0x95 && rdesc[24] == 0x0D && ++ /* Usage Maximum (13) */ ++ rdesc[37] == 0x29 && rdesc[38] == 0x0D && ++ /* Report Count (3) */ ++ rdesc[43] == 0x95 && rdesc[44] == 0x03) { ++ hid_info(hdev, "Fixing up USB dongle report descriptor\n"); ++ rdesc[24] = 0x10; ++ rdesc[38] = 0x10; ++ rdesc[44] = 0x00; ++ } ++ + return rdesc; + } + +diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +index a66f08041a1a..ec142bc8c1da 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c ++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +@@ -389,6 +389,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { + }, + .driver_data = (void *)&sipodev_desc + }, ++ { ++ .ident = "Schneider SCL142ALM", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"), ++ }, ++ .driver_data = (void *)&sipodev_desc ++ }, + { } /* Terminate list */ + }; + +diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c +index 92d2c706c2a7..a60042431370 100644 +--- a/drivers/i2c/busses/i2c-altera.c ++++ b/drivers/i2c/busses/i2c-altera.c +@@ -70,6 +70,7 @@ + * @isr_mask: cached copy of local ISR enables. + * @isr_status: cached copy of local ISR status. + * @lock: spinlock for IRQ synchronization. ++ * @isr_mutex: mutex for IRQ thread. + */ + struct altr_i2c_dev { + void __iomem *base; +@@ -86,6 +87,7 @@ struct altr_i2c_dev { + u32 isr_mask; + u32 isr_status; + spinlock_t lock; /* IRQ synchronization */ ++ struct mutex isr_mutex; + }; + + static void +@@ -245,10 +247,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev) + struct altr_i2c_dev *idev = _dev; + u32 status = idev->isr_status; + ++ mutex_lock(&idev->isr_mutex); + if (!idev->msg) { + dev_warn(idev->dev, "unexpected interrupt\n"); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); +- return IRQ_HANDLED; ++ goto out; + } + read = (idev->msg->flags & I2C_M_RD) != 0; + +@@ -301,6 +304,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev) + complete(&idev->msg_complete); + dev_dbg(idev->dev, "Message Complete\n"); + } ++out: ++ mutex_unlock(&idev->isr_mutex); + + return IRQ_HANDLED; + } +@@ -312,6 +317,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) + u32 value; + u8 addr = i2c_8bit_addr_from_msg(msg); + ++ mutex_lock(&idev->isr_mutex); + idev->msg = msg; + idev->msg_len = msg->len; + idev->buf = msg->buf; +@@ -336,6 +342,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) + altr_i2c_int_enable(idev, imask, true); + altr_i2c_fill_tx_fifo(idev); + } ++ mutex_unlock(&idev->isr_mutex); + + time_left = wait_for_completion_timeout(&idev->msg_complete, + ALTR_I2C_XFER_TIMEOUT); +@@ -409,6 +416,7 @@ static int altr_i2c_probe(struct platform_device *pdev) + idev->dev = &pdev->dev; + init_completion(&idev->msg_complete); + spin_lock_init(&idev->lock); ++ mutex_init(&idev->isr_mutex); + + ret = device_property_read_u32(idev->dev, "fifo-size", + &idev->fifo_size); +diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c +index b462eaca1ee3..4494dab8c3d8 100644 +--- a/drivers/infiniband/hw/qedr/main.c ++++ b/drivers/infiniband/hw/qedr/main.c +@@ -360,7 +360,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) + xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ); + + if (IS_IWARP(dev)) { +- xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ); ++ xa_init(&dev->qps); + dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); + } + +diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h +index 0cfd849b13d6..8e927f6c1520 100644 +--- a/drivers/infiniband/hw/qedr/qedr.h ++++ b/drivers/infiniband/hw/qedr/qedr.h +@@ -40,6 +40,7 @@ + #include + #include + #include ++#include + #include "qedr_hsi_rdma.h" + + #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" +@@ -377,10 +378,20 @@ enum qedr_qp_err_bitmap { + QEDR_QP_ERR_RQ_PBL_FULL = 32, + }; + ++enum qedr_qp_create_type { ++ QEDR_QP_CREATE_NONE, ++ QEDR_QP_CREATE_USER, ++ QEDR_QP_CREATE_KERNEL, ++}; ++ ++enum qedr_iwarp_cm_flags { ++ QEDR_IWARP_CM_WAIT_FOR_CONNECT = BIT(0), ++ QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1), ++}; ++ + struct qedr_qp { + struct ib_qp ibqp; /* must be first */ + struct qedr_dev *dev; +- struct qedr_iw_ep *ep; + struct qedr_qp_hwq_info sq; + struct qedr_qp_hwq_info rq; + +@@ -395,6 +406,7 @@ struct qedr_qp { + u32 id; + struct qedr_pd *pd; + enum ib_qp_type qp_type; ++ enum qedr_qp_create_type create_type; + struct qed_rdma_qp *qed_qp; + u32 qp_id; + u16 icid; +@@ -437,8 +449,11 @@ struct qedr_qp { + /* Relevant to qps created from user space only (applications) */ + struct qedr_userq usq; + struct qedr_userq urq; +- atomic_t refcnt; +- bool destroyed; ++ ++ /* synchronization objects used with iwarp ep */ ++ struct kref refcnt; ++ struct completion iwarp_cm_comp; ++ unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */ + }; + + struct qedr_ah { +@@ -531,7 +546,7 @@ struct qedr_iw_ep { + struct iw_cm_id *cm_id; + struct qedr_qp *qp; + void *qed_context; +- u8 during_connect; ++ struct kref refcnt; + }; + + static inline +diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c +index 22881d4442b9..5e9732990be5 100644 +--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c ++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c +@@ -79,6 +79,27 @@ qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info, + } + } + ++static void qedr_iw_free_qp(struct kref *ref) ++{ ++ struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt); ++ ++ kfree(qp); ++} ++ ++static void ++qedr_iw_free_ep(struct kref *ref) ++{ ++ struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt); ++ ++ if (ep->qp) ++ kref_put(&ep->qp->refcnt, qedr_iw_free_qp); ++ ++ if (ep->cm_id) ++ ep->cm_id->rem_ref(ep->cm_id); ++ ++ kfree(ep); ++} ++ + static void + qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params) + { +@@ -93,6 +114,7 @@ qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params) + + ep->dev = dev; + ep->qed_context = params->ep_context; ++ kref_init(&ep->refcnt); + + memset(&event, 0, sizeof(event)); + event.event = IW_CM_EVENT_CONNECT_REQUEST; +@@ -141,12 +163,10 @@ qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params) + { + struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; + +- if (ep->cm_id) { ++ if (ep->cm_id) + qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE); + +- ep->cm_id->rem_ref(ep->cm_id); +- ep->cm_id = NULL; +- } ++ kref_put(&ep->refcnt, qedr_iw_free_ep); + } + + static void +@@ -186,11 +206,13 @@ static void qedr_iw_disconnect_worker(struct work_struct *work) + struct qedr_qp *qp = ep->qp; + struct iw_cm_event event; + +- if (qp->destroyed) { +- kfree(dwork); +- qedr_iw_qp_rem_ref(&qp->ibqp); +- return; +- } ++ /* The qp won't be released until we release the ep. ++ * the ep's refcnt was increased before calling this ++ * function, therefore it is safe to access qp ++ */ ++ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT, ++ &qp->iwarp_cm_flags)) ++ goto out; + + memset(&event, 0, sizeof(event)); + event.status = dwork->status; +@@ -204,7 +226,6 @@ static void qedr_iw_disconnect_worker(struct work_struct *work) + else + qp_params.new_state = QED_ROCE_QP_STATE_SQD; + +- kfree(dwork); + + if (ep->cm_id) + ep->cm_id->event_handler(ep->cm_id, &event); +@@ -214,7 +235,10 @@ static void qedr_iw_disconnect_worker(struct work_struct *work) + + dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params); + +- qedr_iw_qp_rem_ref(&qp->ibqp); ++ complete(&ep->qp->iwarp_cm_comp); ++out: ++ kfree(dwork); ++ kref_put(&ep->refcnt, qedr_iw_free_ep); + } + + static void +@@ -224,13 +248,17 @@ qedr_iw_disconnect_event(void *context, + struct qedr_discon_work *work; + struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; + struct qedr_dev *dev = ep->dev; +- struct qedr_qp *qp = ep->qp; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + +- qedr_iw_qp_add_ref(&qp->ibqp); ++ /* We can't get a close event before disconnect, but since ++ * we're scheduling a work queue we need to make sure close ++ * won't delete the ep, so we increase the refcnt ++ */ ++ kref_get(&ep->refcnt); ++ + work->ep = ep; + work->event = params->event; + work->status = params->status; +@@ -252,16 +280,30 @@ qedr_iw_passive_complete(void *context, + if ((params->status == -ECONNREFUSED) && (!ep->qp)) { + DP_DEBUG(dev, QEDR_MSG_IWARP, + "PASSIVE connection refused releasing ep...\n"); +- kfree(ep); ++ kref_put(&ep->refcnt, qedr_iw_free_ep); + return; + } + ++ complete(&ep->qp->iwarp_cm_comp); + qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED); + + if (params->status < 0) + qedr_iw_close_event(context, params); + } + ++static void ++qedr_iw_active_complete(void *context, ++ struct qed_iwarp_cm_event_params *params) ++{ ++ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; ++ ++ complete(&ep->qp->iwarp_cm_comp); ++ qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY); ++ ++ if (params->status < 0) ++ kref_put(&ep->refcnt, qedr_iw_free_ep); ++} ++ + static int + qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params) + { +@@ -288,27 +330,15 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params) + qedr_iw_mpa_reply(context, params); + break; + case QED_IWARP_EVENT_PASSIVE_COMPLETE: +- ep->during_connect = 0; + qedr_iw_passive_complete(context, params); + break; +- + case QED_IWARP_EVENT_ACTIVE_COMPLETE: +- ep->during_connect = 0; +- qedr_iw_issue_event(context, +- params, +- IW_CM_EVENT_CONNECT_REPLY); +- if (params->status < 0) { +- struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; +- +- ep->cm_id->rem_ref(ep->cm_id); +- ep->cm_id = NULL; +- } ++ qedr_iw_active_complete(context, params); + break; + case QED_IWARP_EVENT_DISCONNECT: + qedr_iw_disconnect_event(context, params); + break; + case QED_IWARP_EVENT_CLOSE: +- ep->during_connect = 0; + qedr_iw_close_event(context, params); + break; + case QED_IWARP_EVENT_RQ_EMPTY: +@@ -476,6 +506,19 @@ qedr_addr6_resolve(struct qedr_dev *dev, + return rc; + } + ++struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn) ++{ ++ struct qedr_qp *qp; ++ ++ xa_lock(&dev->qps); ++ qp = xa_load(&dev->qps, qpn); ++ if (qp) ++ kref_get(&qp->refcnt); ++ xa_unlock(&dev->qps); ++ ++ return qp; ++} ++ + int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + { + struct qedr_dev *dev = get_qedr_dev(cm_id->device); +@@ -491,10 +534,6 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + int rc = 0; + int i; + +- qp = xa_load(&dev->qps, conn_param->qpn); +- if (unlikely(!qp)) +- return -EINVAL; +- + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; +@@ -516,8 +555,15 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + return -ENOMEM; + + ep->dev = dev; ++ kref_init(&ep->refcnt); ++ ++ qp = qedr_iw_load_qp(dev, conn_param->qpn); ++ if (!qp) { ++ rc = -EINVAL; ++ goto err; ++ } ++ + ep->qp = qp; +- qp->ep = ep; + cm_id->add_ref(cm_id); + ep->cm_id = cm_id; + +@@ -580,16 +626,20 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + in_params.qp = qp->qed_qp; + memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN); + +- ep->during_connect = 1; ++ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, ++ &qp->iwarp_cm_flags)) ++ goto err; /* QP already being destroyed */ ++ + rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params); +- if (rc) ++ if (rc) { ++ complete(&qp->iwarp_cm_comp); + goto err; ++ } + + return rc; + + err: +- cm_id->rem_ref(cm_id); +- kfree(ep); ++ kref_put(&ep->refcnt, qedr_iw_free_ep); + return rc; + } + +@@ -677,18 +727,17 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + struct qedr_dev *dev = ep->dev; + struct qedr_qp *qp; + struct qed_iwarp_accept_in params; +- int rc; ++ int rc = 0; + + DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn); + +- qp = xa_load(&dev->qps, conn_param->qpn); ++ qp = qedr_iw_load_qp(dev, conn_param->qpn); + if (!qp) { + DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn); + return -EINVAL; + } + + ep->qp = qp; +- qp->ep = ep; + cm_id->add_ref(cm_id); + ep->cm_id = cm_id; + +@@ -700,15 +749,21 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + params.ird = conn_param->ird; + params.ord = conn_param->ord; + +- ep->during_connect = 1; ++ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, ++ &qp->iwarp_cm_flags)) ++ goto err; /* QP already destroyed */ ++ + rc = dev->ops->iwarp_accept(dev->rdma_ctx, ¶ms); +- if (rc) ++ if (rc) { ++ complete(&qp->iwarp_cm_comp); + goto err; ++ } + + return rc; ++ + err: +- ep->during_connect = 0; +- cm_id->rem_ref(cm_id); ++ kref_put(&ep->refcnt, qedr_iw_free_ep); ++ + return rc; + } + +@@ -731,17 +786,14 @@ void qedr_iw_qp_add_ref(struct ib_qp *ibqp) + { + struct qedr_qp *qp = get_qedr_qp(ibqp); + +- atomic_inc(&qp->refcnt); ++ kref_get(&qp->refcnt); + } + + void qedr_iw_qp_rem_ref(struct ib_qp *ibqp) + { + struct qedr_qp *qp = get_qedr_qp(ibqp); + +- if (atomic_dec_and_test(&qp->refcnt)) { +- xa_erase_irq(&qp->dev->qps, qp->qp_id); +- kfree(qp); +- } ++ kref_put(&qp->refcnt, qedr_iw_free_qp); + } + + struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn) +diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c +index a7ccca3c4f89..8b4240c1cc76 100644 +--- a/drivers/infiniband/hw/qedr/verbs.c ++++ b/drivers/infiniband/hw/qedr/verbs.c +@@ -51,6 +51,7 @@ + #include "verbs.h" + #include + #include "qedr_roce_cm.h" ++#include "qedr_iw_cm.h" + + #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm) + #define RDMA_MAX_SGE_PER_SRQ (4) +@@ -1193,7 +1194,10 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs) + { + spin_lock_init(&qp->q_lock); +- atomic_set(&qp->refcnt, 1); ++ if (rdma_protocol_iwarp(&dev->ibdev, 1)) { ++ kref_init(&qp->refcnt); ++ init_completion(&qp->iwarp_cm_comp); ++ } + qp->pd = pd; + qp->qp_type = attrs->qp_type; + qp->max_inline_data = attrs->cap.max_inline_data; +@@ -1600,6 +1604,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, + int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1); + int rc = -EINVAL; + ++ qp->create_type = QEDR_QP_CREATE_USER; + memset(&ureq, 0, sizeof(ureq)); + rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq)); + if (rc) { +@@ -1813,6 +1818,7 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev, + u32 n_sq_entries; + + memset(&in_params, 0, sizeof(in_params)); ++ qp->create_type = QEDR_QP_CREATE_KERNEL; + + /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in + * the ring. The ring should allow at least a single WR, even if the +@@ -1926,7 +1932,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, + qp->ibqp.qp_num = qp->qp_id; + + if (rdma_protocol_iwarp(&dev->ibdev, 1)) { +- rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL); ++ rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL); + if (rc) + goto err; + } +@@ -2445,7 +2451,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp, + return rc; + } + +- if (udata) ++ if (qp->create_type == QEDR_QP_CREATE_USER) + qedr_cleanup_user(dev, qp); + else + qedr_cleanup_kernel(dev, qp); +@@ -2475,34 +2481,44 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) + qedr_modify_qp(ibqp, &attr, attr_mask, NULL); + } + } else { +- /* Wait for the connect/accept to complete */ +- if (qp->ep) { +- int wait_count = 1; +- +- while (qp->ep->during_connect) { +- DP_DEBUG(dev, QEDR_MSG_QP, +- "Still in during connect/accept\n"); +- +- msleep(100); +- if (wait_count++ > 200) { +- DP_NOTICE(dev, +- "during connect timeout\n"); +- break; +- } +- } +- } ++ /* If connection establishment started the WAIT_FOR_CONNECT ++ * bit will be on and we need to Wait for the establishment ++ * to complete before destroying the qp. ++ */ ++ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, ++ &qp->iwarp_cm_flags)) ++ wait_for_completion(&qp->iwarp_cm_comp); ++ ++ /* If graceful disconnect started, the WAIT_FOR_DISCONNECT ++ * bit will be on, and we need to wait for the disconnect to ++ * complete before continuing. We can use the same completion, ++ * iwarp_cm_comp, since this is the only place that waits for ++ * this completion and it is sequential. In addition, ++ * disconnect can't occur before the connection is fully ++ * established, therefore if WAIT_FOR_DISCONNECT is on it ++ * means WAIT_FOR_CONNECT is also on and the completion for ++ * CONNECT already occurred. ++ */ ++ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT, ++ &qp->iwarp_cm_flags)) ++ wait_for_completion(&qp->iwarp_cm_comp); + } + + if (qp->qp_type == IB_QPT_GSI) + qedr_destroy_gsi_qp(dev); + ++ /* We need to remove the entry from the xarray before we release the ++ * qp_id to avoid a race of the qp_id being reallocated and failing ++ * on xa_insert ++ */ ++ if (rdma_protocol_iwarp(&dev->ibdev, 1)) ++ xa_erase(&dev->qps, qp->qp_id); ++ + qedr_free_qp_resources(dev, qp, udata); + +- if (atomic_dec_and_test(&qp->refcnt) && +- rdma_protocol_iwarp(&dev->ibdev, 1)) { +- xa_erase_irq(&dev->qps, qp->qp_id); +- kfree(qp); +- } ++ if (rdma_protocol_iwarp(&dev->ibdev, 1)) ++ qedr_iw_qp_rem_ref(&qp->ibqp); ++ + return 0; + } + +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c +index 6027bb65f7f6..dc9a3bb24114 100644 +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -818,10 +818,15 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) + PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS)); + + /* Trapped into security mode allows packet forwarding through VLAN +- * table lookup. ++ * table lookup. CPU port is set to fallback mode to let untagged ++ * frames pass through. + */ +- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, +- MT7530_PORT_SECURITY_MODE); ++ if (dsa_is_cpu_port(ds, port)) ++ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, ++ MT7530_PORT_FALLBACK_MODE); ++ else ++ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, ++ MT7530_PORT_SECURITY_MODE); + + /* Set the port as a user port which is to be able to recognize VID + * from incoming packets before fetching entry within the VLAN table. +diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h +index 0e7e36d8f994..3ef7b5a6fc22 100644 +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -148,6 +148,12 @@ enum mt7530_port_mode { + /* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */ + MT7530_PORT_MATRIX_MODE = PORT_VLAN(0), + ++ /* Fallback Mode: Forward received frames with ingress ports that do ++ * not belong to the VLAN member. Frames whose VID is not listed on ++ * the VLAN table are forwarded by the PCR_MATRIX members. ++ */ ++ MT7530_PORT_FALLBACK_MODE = PORT_VLAN(1), ++ + /* Security Mode: Discard any frame due to ingress membership + * violation or VID missed on the VLAN table. + */ +diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c +index a58185b1d8bf..3e3711b60d01 100644 +--- a/drivers/net/ethernet/apple/bmac.c ++++ b/drivers/net/ethernet/apple/bmac.c +@@ -1182,7 +1182,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea) + int i; + unsigned short data; + +- for (i = 0; i < 6; i++) ++ for (i = 0; i < 3; i++) + { + reset_and_select_srom(dev); + data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); +diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c +index f839fa94ebdd..d3b8ce734c1b 100644 +--- a/drivers/net/ethernet/freescale/ucc_geth.c ++++ b/drivers/net/ethernet/freescale/ucc_geth.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + + #include "ucc_geth.h" + +@@ -1548,11 +1549,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) + + static void ugeth_quiesce(struct ucc_geth_private *ugeth) + { +- /* Prevent any further xmits, plus detach the device. */ +- netif_device_detach(ugeth->ndev); +- +- /* Wait for any current xmits to finish. */ +- netif_tx_disable(ugeth->ndev); ++ /* Prevent any further xmits */ ++ netif_tx_stop_all_queues(ugeth->ndev); + + /* Disable the interrupt to avoid NAPI rescheduling. */ + disable_irq(ugeth->ug_info->uf_info.irq); +@@ -1565,7 +1563,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth) + { + napi_enable(&ugeth->napi); + enable_irq(ugeth->ug_info->uf_info.irq); +- netif_device_attach(ugeth->ndev); ++ ++ /* allow to xmit again */ ++ netif_tx_wake_all_queues(ugeth->ndev); ++ __netdev_watchdog_up(ugeth->ndev); + } + + /* Called every time the controller might need to be made +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c +index 38068fc34141..c7bdada4d1b9 100644 +--- a/drivers/net/ethernet/smsc/smsc911x.c ++++ b/drivers/net/ethernet/smsc/smsc911x.c +@@ -2502,20 +2502,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev) + + retval = smsc911x_init(dev); + if (retval < 0) +- goto out_disable_resources; ++ goto out_init_fail; + + netif_carrier_off(dev); + + retval = smsc911x_mii_init(pdev, dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); +- goto out_disable_resources; ++ goto out_init_fail; + } + + retval = register_netdev(dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i registering device", retval); +- goto out_disable_resources; ++ goto out_init_fail; + } else { + SMSC_TRACE(pdata, probe, + "Network interface: \"%s\"", dev->name); +@@ -2556,9 +2556,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev) + + return 0; + +-out_disable_resources: ++out_init_fail: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); ++out_disable_resources: + (void)smsc911x_disable_resources(pdev); + out_enable_resources_fail: + smsc911x_free_resources(pdev); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +index 0d21082ceb93..4d75158c64b2 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +@@ -318,6 +318,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) + /* Enable PTP clock */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); + val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); ++ switch (gmac->phy_mode) { ++ case PHY_INTERFACE_MODE_RGMII: ++ val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | ++ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); ++ break; ++ case PHY_INTERFACE_MODE_SGMII: ++ val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | ++ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); ++ break; ++ default: ++ /* We don't get here; the switch above will have errored out */ ++ unreachable(); ++ } + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); + + if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { +diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c +index c4c8f1b62e1e..da0d3834b5f0 100644 +--- a/drivers/net/wireless/cisco/airo.c ++++ b/drivers/net/wireless/cisco/airo.c +@@ -1925,6 +1925,10 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb, + airo_print_err(dev->name, "%s: skb == NULL!",__func__); + return NETDEV_TX_OK; + } ++ if (skb_padto(skb, ETH_ZLEN)) { ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } + npacks = skb_queue_len (&ai->txq); + + if (npacks >= MAXTXQ - 1) { +@@ -2127,6 +2131,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb, + airo_print_err(dev->name, "%s: skb == NULL!", __func__); + return NETDEV_TX_OK; + } ++ if (skb_padto(skb, ETH_ZLEN)) { ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } + + /* Find a vacant FID */ + for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ ); +@@ -2201,6 +2209,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb, + airo_print_err(dev->name, "%s: skb == NULL!", __func__); + return NETDEV_TX_OK; + } ++ if (skb_padto(skb, ETH_ZLEN)) { ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } + + /* Find a vacant FID */ + for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ ); +diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c +index b94764c88750..ff0e30c0c14c 100644 +--- a/drivers/net/wireless/intersil/p54/p54usb.c ++++ b/drivers/net/wireless/intersil/p54/p54usb.c +@@ -61,6 +61,7 @@ static const struct usb_device_id p54u_table[] = { + {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ + {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ + {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ ++ {USB_DEVICE(0x124a, 0x4026)}, /* AirVasT USB wireless device */ + {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */ + {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */ + {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */ +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h +index e858bba8c8ff..0075fba93546 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h +@@ -212,6 +212,7 @@ static inline bool is_mt76x0(struct mt76x02_dev *dev) + static inline bool is_mt76x2(struct mt76x02_dev *dev) + { + return mt76_chip(&dev->mt76) == 0x7612 || ++ mt76_chip(&dev->mt76) == 0x7632 || + mt76_chip(&dev->mt76) == 0x7662 || + mt76_chip(&dev->mt76) == 0x7602; + } +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +index 8b26c6108186..96a2b7ba6764 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +@@ -18,6 +18,7 @@ static const struct usb_device_id mt76x2u_device_table[] = { + { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ + { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ + { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */ ++ { USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */ + { }, + }; + +diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c +index 849335d76cf6..6f4692f0d714 100644 +--- a/drivers/scsi/hisi_sas/hisi_sas_main.c ++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c +@@ -974,12 +974,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) + struct hisi_hba *hisi_hba = sas_ha->lldd_ha; + struct hisi_sas_phy *phy = sas_phy->lldd_phy; + struct asd_sas_port *sas_port = sas_phy->port; +- struct hisi_sas_port *port = to_hisi_sas_port(sas_port); ++ struct hisi_sas_port *port; + unsigned long flags; + + if (!sas_port) + return; + ++ port = to_hisi_sas_port(sas_port); + spin_lock_irqsave(&hisi_hba->lock, flags); + port->port_attached = 1; + port->id = phy->port_id; +diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c +index 3717eea37ecb..5f0ad8b32e3a 100644 +--- a/drivers/scsi/scsi_pm.c ++++ b/drivers/scsi/scsi_pm.c +@@ -80,6 +80,10 @@ static int scsi_dev_type_resume(struct device *dev, + dev_dbg(dev, "scsi resume: %d\n", err); + + if (err == 0) { ++ bool was_runtime_suspended; ++ ++ was_runtime_suspended = pm_runtime_suspended(dev); ++ + pm_runtime_disable(dev); + err = pm_runtime_set_active(dev); + pm_runtime_enable(dev); +@@ -93,8 +97,10 @@ static int scsi_dev_type_resume(struct device *dev, + */ + if (!err && scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); +- +- blk_set_runtime_active(sdev->request_queue); ++ if (was_runtime_suspended) ++ blk_post_runtime_resume(sdev->request_queue, 0); ++ else ++ blk_set_runtime_active(sdev->request_queue); + } + } + +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 13ab1494c384..bc73181b0405 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -2480,6 +2480,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) + + err = ufshcd_map_sg(hba, lrbp); + if (err) { ++ ufshcd_release(hba); + lrbp->cmd = NULL; + clear_bit_unlock(tag, &hba->lrb_in_use); + goto out; +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index 11cac7e10663..d2ca3b357cfe 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -297,6 +297,9 @@ static int dw_spi_transfer_one(struct spi_controller *master, + dws->len = transfer->len; + spin_unlock_irqrestore(&dws->buf_lock, flags); + ++ /* Ensure dw->rx and dw->rx_end are visible */ ++ smp_mb(); ++ + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed */ +diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h +index 0b1cb9f9cbd1..1bfa8c86132a 100644 +--- a/drivers/staging/media/ipu3/include/intel-ipu3.h ++++ b/drivers/staging/media/ipu3/include/intel-ipu3.h +@@ -450,7 +450,7 @@ struct ipu3_uapi_awb_fr_config_s { + __u32 bayer_sign; + __u8 bayer_nf; + __u8 reserved2[7]; +-} __attribute__((aligned(32))) __packed; ++} __packed; + + /** + * struct ipu3_uapi_4a_config - 4A config +@@ -466,7 +466,8 @@ struct ipu3_uapi_4a_config { + struct ipu3_uapi_ae_grid_config ae_grd_config; + __u8 padding[20]; + struct ipu3_uapi_af_config_s af_config; +- struct ipu3_uapi_awb_fr_config_s awb_fr_config; ++ struct ipu3_uapi_awb_fr_config_s awb_fr_config ++ __attribute__((aligned(32))); + } __packed; + + /** +@@ -2472,7 +2473,7 @@ struct ipu3_uapi_acc_param { + struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32))); + struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32))); + struct ipu3_uapi_anr_config anr; +- struct ipu3_uapi_awb_fr_config_s awb_fr __attribute__((aligned(32))); ++ struct ipu3_uapi_awb_fr_config_s awb_fr; + struct ipu3_uapi_ae_config ae; + struct ipu3_uapi_af_config_s af; + struct ipu3_uapi_awb_config awb; +diff --git a/fs/io_uring.c b/fs/io_uring.c +index b2ccb908f6b6..2050100e6e84 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -409,6 +409,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) + } + + ctx->flags = p->flags; ++ init_waitqueue_head(&ctx->sqo_wait); + init_waitqueue_head(&ctx->cq_wait); + init_completion(&ctx->ctx_done); + init_completion(&ctx->sqo_thread_started); +@@ -3237,7 +3238,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, + { + int ret; + +- init_waitqueue_head(&ctx->sqo_wait); + mmgrab(current->mm); + ctx->sqo_mm = current->mm; + +diff --git a/include/linux/security.h b/include/linux/security.h +index 9df7547afc0c..fd022768e91d 100644 +--- a/include/linux/security.h ++++ b/include/linux/security.h +@@ -117,12 +117,14 @@ enum lockdown_reason { + LOCKDOWN_MODULE_PARAMETERS, + LOCKDOWN_MMIOTRACE, + LOCKDOWN_DEBUGFS, ++ LOCKDOWN_XMON_WR, + LOCKDOWN_INTEGRITY_MAX, + LOCKDOWN_KCORE, + LOCKDOWN_KPROBES, + LOCKDOWN_BPF_READ, + LOCKDOWN_PERF, + LOCKDOWN_TRACEFS, ++ LOCKDOWN_XMON_RW, + LOCKDOWN_CONFIDENTIALITY_MAX, + }; + +diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h +index 00c08120f3ba..27a39847d55c 100644 +--- a/include/uapi/linux/mmc/ioctl.h ++++ b/include/uapi/linux/mmc/ioctl.h +@@ -3,6 +3,7 @@ + #define LINUX_MMC_IOCTL_H + + #include ++#include + + struct mmc_ioc_cmd { + /* +diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c +index ca19b4c8acf5..4a942d4e9763 100644 +--- a/kernel/cgroup/rstat.c ++++ b/kernel/cgroup/rstat.c +@@ -33,12 +33,9 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) + return; + + /* +- * Paired with the one in cgroup_rstat_cpu_pop_upated(). Either we +- * see NULL updated_next or they see our updated stat. +- */ +- smp_mb(); +- +- /* ++ * Speculative already-on-list test. This may race leading to ++ * temporary inaccuracies, which is fine. ++ * + * Because @parent's updated_children is terminated with @parent + * instead of NULL, we can tell whether @cgrp is on the list by + * testing the next pointer for NULL. +@@ -134,13 +131,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos, + *nextp = rstatc->updated_next; + rstatc->updated_next = NULL; + +- /* +- * Paired with the one in cgroup_rstat_cpu_updated(). +- * Either they see NULL updated_next or we see their +- * updated stat. +- */ +- smp_mb(); +- + return pos; + } + +diff --git a/kernel/relay.c b/kernel/relay.c +index ade14fb7ce2e..4b760ec16342 100644 +--- a/kernel/relay.c ++++ b/kernel/relay.c +@@ -581,6 +581,11 @@ struct rchan *relay_open(const char *base_filename, + return NULL; + + chan->buf = alloc_percpu(struct rchan_buf *); ++ if (!chan->buf) { ++ kfree(chan); ++ return NULL; ++ } ++ + chan->version = RELAYFS_CHANNEL_VERSION; + chan->n_subbufs = n_subbufs; + chan->subbuf_size = subbuf_size; +diff --git a/mm/mremap.c b/mm/mremap.c +index 245bf9c555b2..8005d0b2b843 100644 +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -266,7 +266,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, + new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); + if (!new_pmd) + break; +- if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) { ++ if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { + if (extent == HPAGE_PMD_SIZE) { + bool moved; + /* See comment in move_ptes() */ +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index cc826c2767a3..fbc2ee6d46fc 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -209,7 +209,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, + data->hdr.length = crypto_shash_digestsize(desc->tfm); + + error = -ENODATA; +- list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) { ++ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) { + bool is_ima = false; + + if (strcmp(xattr->name, XATTR_NAME_IMA) == 0) +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c +index f9a81b187fae..a2c393385db0 100644 +--- a/security/integrity/evm/evm_main.c ++++ b/security/integrity/evm/evm_main.c +@@ -99,7 +99,7 @@ static int evm_find_protected_xattrs(struct dentry *dentry) + if (!(inode->i_opflags & IOP_XATTR)) + return -EOPNOTSUPP; + +- list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) { ++ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) { + error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0); + if (error < 0) { + if (error == -ENODATA) +@@ -230,7 +230,7 @@ static int evm_protected_xattr(const char *req_xattr_name) + struct xattr_list *xattr; + + namelen = strlen(req_xattr_name); +- list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) { ++ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) { + if ((strlen(xattr->name) == namelen) + && (strncmp(req_xattr_name, xattr->name, namelen) == 0)) { + found = 1; +diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c +index c11c1f7b3ddd..0f37ef27268d 100644 +--- a/security/integrity/evm/evm_secfs.c ++++ b/security/integrity/evm/evm_secfs.c +@@ -234,7 +234,14 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf, + goto out; + } + +- /* Guard against races in evm_read_xattrs */ ++ /* ++ * xattr_list_mutex guards against races in evm_read_xattrs(). ++ * Entries are only added to the evm_config_xattrnames list ++ * and never deleted. Therefore, the list is traversed ++ * using list_for_each_entry_lockless() without holding ++ * the mutex in evm_calc_hmac_or_hash(), evm_find_protected_xattrs() ++ * and evm_protected_xattr(). ++ */ + mutex_lock(&xattr_list_mutex); + list_for_each_entry(tmp, &evm_config_xattrnames, list) { + if (strcmp(xattr->name, tmp->name) == 0) { +diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c +index 40b790536def..b2f87015d6e9 100644 +--- a/security/lockdown/lockdown.c ++++ b/security/lockdown/lockdown.c +@@ -32,12 +32,14 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { + [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters", + [LOCKDOWN_MMIOTRACE] = "unsafe mmio", + [LOCKDOWN_DEBUGFS] = "debugfs access", ++ [LOCKDOWN_XMON_WR] = "xmon write access", + [LOCKDOWN_INTEGRITY_MAX] = "integrity", + [LOCKDOWN_KCORE] = "/proc/kcore access", + [LOCKDOWN_KPROBES] = "use of kprobes", + [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM", + [LOCKDOWN_PERF] = "unsafe use of perf", + [LOCKDOWN_TRACEFS] = "use of tracefs", ++ [LOCKDOWN_XMON_RW] = "xmon read and write access", + [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality", + }; + +diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c +index 67b276a65a8d..8ad31c91fc75 100644 +--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c ++++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c +@@ -626,7 +626,7 @@ static int kabylake_card_late_probe(struct snd_soc_card *card) + * kabylake audio machine driver for MAX98927 + RT5514 + RT5663 + */ + static struct snd_soc_card kabylake_audio_card = { +- .name = "kbl_r5514_5663_max", ++ .name = "kbl-r5514-5663-max", + .owner = THIS_MODULE, + .dai_link = kabylake_dais, + .num_links = ARRAY_SIZE(kabylake_dais), +diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c +index 1778acdc367c..e8d676c192f6 100644 +--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c ++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c +@@ -90,7 +90,7 @@ skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link) + } + + static struct snd_soc_card hda_soc_card = { +- .name = "skl_hda_card", ++ .name = "hda-dsp", + .owner = THIS_MODULE, + .dai_link = skl_hda_be_dai_links, + .dapm_widgets = skl_hda_widgets, +diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c +index 06b7d6c6c9a0..302ca1920791 100644 +--- a/sound/soc/intel/boards/sof_rt5682.c ++++ b/sound/soc/intel/boards/sof_rt5682.c +@@ -374,7 +374,7 @@ static int dmic_init(struct snd_soc_pcm_runtime *rtd) + + /* sof audio machine driver for rt5682 codec */ + static struct snd_soc_card sof_audio_card_rt5682 = { +- .name = "sof_rt5682", ++ .name = "rt5682", /* the sof- prefix is added by the core */ + .owner = THIS_MODULE, + .controls = sof_controls, + .num_controls = ARRAY_SIZE(sof_controls), +diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh +index 24dd8ed48580..b025daea062d 100755 +--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh ++++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh +@@ -300,7 +300,7 @@ test_uc_aware() + local i + + for ((i = 0; i < attempts; ++i)); do +- if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then ++ if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 1; then + ((passes++)) + fi + diff --git a/patch/kernel/odroidxu4-current/patch-5.4.45-46.patch b/patch/kernel/odroidxu4-current/patch-5.4.45-46.patch new file mode 100644 index 000000000..d5b492dd8 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.45-46.patch @@ -0,0 +1,1598 @@ +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index fc20cde63d1e..c24afa60a30e 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -486,6 +486,7 @@ What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass + /sys/devices/system/cpu/vulnerabilities/l1tf + /sys/devices/system/cpu/vulnerabilities/mds ++ /sys/devices/system/cpu/vulnerabilities/srbds + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort + /sys/devices/system/cpu/vulnerabilities/itlb_multihit + Date: January 2018 +diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst +index 0795e3c2643f..ca4dbdd9016d 100644 +--- a/Documentation/admin-guide/hw-vuln/index.rst ++++ b/Documentation/admin-guide/hw-vuln/index.rst +@@ -14,3 +14,4 @@ are configurable at compile, boot or run time. + mds + tsx_async_abort + multihit.rst ++ special-register-buffer-data-sampling.rst +diff --git a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst +new file mode 100644 +index 000000000000..47b1b3afac99 +--- /dev/null ++++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst +@@ -0,0 +1,149 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++SRBDS - Special Register Buffer Data Sampling ++============================================= ++ ++SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to ++infer values returned from special register accesses. Special register ++accesses are accesses to off core registers. According to Intel's evaluation, ++the special register reads that have a security expectation of privacy are ++RDRAND, RDSEED and SGX EGETKEY. ++ ++When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved ++to the core through the special register mechanism that is susceptible ++to MDS attacks. ++ ++Affected processors ++-------------------- ++Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may ++be affected. ++ ++A processor is affected by SRBDS if its Family_Model and stepping is ++in the following list, with the exception of the listed processors ++exporting MDS_NO while Intel TSX is available yet not enabled. The ++latter class of processors are only affected when Intel TSX is enabled ++by software using TSX_CTRL_MSR otherwise they are not affected. ++ ++ ============= ============ ======== ++ common name Family_Model Stepping ++ ============= ============ ======== ++ IvyBridge 06_3AH All ++ ++ Haswell 06_3CH All ++ Haswell_L 06_45H All ++ Haswell_G 06_46H All ++ ++ Broadwell_G 06_47H All ++ Broadwell 06_3DH All ++ ++ Skylake_L 06_4EH All ++ Skylake 06_5EH All ++ ++ Kabylake_L 06_8EH <= 0xC ++ Kabylake 06_9EH <= 0xD ++ ============= ============ ======== ++ ++Related CVEs ++------------ ++ ++The following CVE entry is related to this SRBDS issue: ++ ++ ============== ===== ===================================== ++ CVE-2020-0543 SRBDS Special Register Buffer Data Sampling ++ ============== ===== ===================================== ++ ++Attack scenarios ++---------------- ++An unprivileged user can extract values returned from RDRAND and RDSEED ++executed on another core or sibling thread using MDS techniques. ++ ++ ++Mitigation mechanism ++------------------- ++Intel will release microcode updates that modify the RDRAND, RDSEED, and ++EGETKEY instructions to overwrite secret special register data in the shared ++staging buffer before the secret data can be accessed by another logical ++processor. ++ ++During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core ++accesses from other logical processors will be delayed until the special ++register read is complete and the secret data in the shared staging buffer is ++overwritten. ++ ++This has three effects on performance: ++ ++#. RDRAND, RDSEED, or EGETKEY instructions have higher latency. ++ ++#. Executing RDRAND at the same time on multiple logical processors will be ++ serialized, resulting in an overall reduction in the maximum RDRAND ++ bandwidth. ++ ++#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other ++ logical processors that miss their core caches, with an impact similar to ++ legacy locked cache-line-split accesses. ++ ++The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable ++the mitigation for RDRAND and RDSEED instructions executed outside of Intel ++Software Guard Extensions (Intel SGX) enclaves. On logical processors that ++disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not ++take longer to execute and do not impact performance of sibling logical ++processors memory accesses. The opt-out mechanism does not affect Intel SGX ++enclaves (including execution of RDRAND or RDSEED inside an enclave, as well ++as EGETKEY execution). ++ ++IA32_MCU_OPT_CTRL MSR Definition ++-------------------------------- ++Along with the mitigation for this issue, Intel added a new thread-scope ++IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and ++RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL = ++9]==1. This MSR is introduced through the microcode update. ++ ++Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor ++disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX ++enclave on that logical processor. Opting out of the mitigation for a ++particular logical processor does not affect the RDRAND and RDSEED mitigations ++for other logical processors. ++ ++Note that inside of an Intel SGX enclave, the mitigation is applied regardless ++of the value of RNGDS_MITG_DS. ++ ++Mitigation control on the kernel command line ++--------------------------------------------- ++The kernel command line allows control over the SRBDS mitigation at boot time ++with the option "srbds=". The option for this is: ++ ++ ============= ============================================================= ++ off This option disables SRBDS mitigation for RDRAND and RDSEED on ++ affected platforms. ++ ============= ============================================================= ++ ++SRBDS System Information ++----------------------- ++The Linux kernel provides vulnerability status information through sysfs. For ++SRBDS this can be accessed by the following sysfs file: ++/sys/devices/system/cpu/vulnerabilities/srbds ++ ++The possible values contained in this file are: ++ ++ ============================== ============================================= ++ Not affected Processor not vulnerable ++ Vulnerable Processor vulnerable and mitigation disabled ++ Vulnerable: No microcode Processor vulnerable and microcode is missing ++ mitigation ++ Mitigation: Microcode Processor is vulnerable and mitigation is in ++ effect. ++ Mitigation: TSX disabled Processor is only vulnerable when TSX is ++ enabled while this system was booted with TSX ++ disabled. ++ Unknown: Dependent on ++ hypervisor status Running on virtual guest processor that is ++ affected but with no way to know if host ++ processor is mitigated or vulnerable. ++ ============================== ============================================= ++ ++SRBDS Default mitigation ++------------------------ ++This new microcode serializes processor access during execution of RDRAND, ++RDSEED ensures that the shared buffer is overwritten before it is released for ++reuse. Use the "srbds=off" kernel command line to disable the mitigation for ++RDRAND and RDSEED. +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 1f77c99e2cba..13984b6cc322 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -4579,6 +4579,26 @@ + spia_pedr= + spia_peddr= + ++ srbds= [X86,INTEL] ++ Control the Special Register Buffer Data Sampling ++ (SRBDS) mitigation. ++ ++ Certain CPUs are vulnerable to an MDS-like ++ exploit which can leak bits from the random ++ number generator. ++ ++ By default, this issue is mitigated by ++ microcode. However, the microcode fix can cause ++ the RDRAND and RDSEED instructions to become ++ much slower. Among other effects, this will ++ result in reduced throughput from /dev/urandom. ++ ++ The microcode mitigation can be disabled with ++ the following option: ++ ++ off: Disable mitigation and remove ++ performance impact to RDRAND and RDSEED ++ + srcutree.counter_wrap_check [KNL] + Specifies how frequently to check for + grace-period sequence counter wrap for the +diff --git a/Makefile b/Makefile +index d57c443d9073..4835d6734c3f 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 45 ++SUBLEVEL = 46 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h +index 31c379c1da41..0c814cd9ea42 100644 +--- a/arch/x86/include/asm/cpu_device_id.h ++++ b/arch/x86/include/asm/cpu_device_id.h +@@ -9,6 +9,36 @@ + + #include + ++#define X86_CENTAUR_FAM6_C7_D 0xd ++#define X86_CENTAUR_FAM6_NANO 0xf ++ ++#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins) ++ ++/** ++ * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching ++ * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY ++ * The name is expanded to X86_VENDOR_@_vendor ++ * @_family: The family number or X86_FAMILY_ANY ++ * @_model: The model number, model constant or X86_MODEL_ANY ++ * @_steppings: Bitmask for steppings, stepping constant or X86_STEPPING_ANY ++ * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY ++ * @_data: Driver specific data or NULL. The internal storage ++ * format is unsigned long. The supplied value, pointer ++ * etc. is casted to unsigned long internally. ++ * ++ * Backport version to keep the SRBDS pile consistant. No shorter variants ++ * required for this. ++ */ ++#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \ ++ _steppings, _feature, _data) { \ ++ .vendor = X86_VENDOR_##_vendor, \ ++ .family = _family, \ ++ .model = _model, \ ++ .steppings = _steppings, \ ++ .feature = _feature, \ ++ .driver_data = (unsigned long) _data \ ++} ++ + /* + * Match specific microcode revisions. + * +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index c4fbe379cc0b..d912457f56a7 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -357,6 +357,7 @@ + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ + #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */ ++#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */ + #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ + #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ + #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ +@@ -401,5 +402,6 @@ + #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ + #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ ++#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 1682e4b5ce75..391812e0384e 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -119,6 +119,10 @@ + #define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ + #define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ + ++/* SRBDS support */ ++#define MSR_IA32_MCU_OPT_CTRL 0x00000123 ++#define RNGDS_MITG_DIS BIT(0) ++ + #define MSR_IA32_SYSENTER_CS 0x00000174 + #define MSR_IA32_SYSENTER_ESP 0x00000175 + #define MSR_IA32_SYSENTER_EIP 0x00000176 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 8bf64899f56a..3c3f3e02683a 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -41,6 +41,7 @@ static void __init l1tf_select_mitigation(void); + static void __init mds_select_mitigation(void); + static void __init mds_print_mitigation(void); + static void __init taa_select_mitigation(void); ++static void __init srbds_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ + u64 x86_spec_ctrl_base; +@@ -108,6 +109,7 @@ void __init check_bugs(void) + l1tf_select_mitigation(); + mds_select_mitigation(); + taa_select_mitigation(); ++ srbds_select_mitigation(); + + /* + * As MDS and TAA mitigations are inter-related, print MDS +@@ -390,6 +392,97 @@ static int __init tsx_async_abort_parse_cmdline(char *str) + } + early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); + ++#undef pr_fmt ++#define pr_fmt(fmt) "SRBDS: " fmt ++ ++enum srbds_mitigations { ++ SRBDS_MITIGATION_OFF, ++ SRBDS_MITIGATION_UCODE_NEEDED, ++ SRBDS_MITIGATION_FULL, ++ SRBDS_MITIGATION_TSX_OFF, ++ SRBDS_MITIGATION_HYPERVISOR, ++}; ++ ++static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; ++ ++static const char * const srbds_strings[] = { ++ [SRBDS_MITIGATION_OFF] = "Vulnerable", ++ [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", ++ [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", ++ [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", ++ [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", ++}; ++ ++static bool srbds_off; ++ ++void update_srbds_msr(void) ++{ ++ u64 mcu_ctrl; ++ ++ if (!boot_cpu_has_bug(X86_BUG_SRBDS)) ++ return; ++ ++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ return; ++ ++ if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) ++ return; ++ ++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); ++ ++ switch (srbds_mitigation) { ++ case SRBDS_MITIGATION_OFF: ++ case SRBDS_MITIGATION_TSX_OFF: ++ mcu_ctrl |= RNGDS_MITG_DIS; ++ break; ++ case SRBDS_MITIGATION_FULL: ++ mcu_ctrl &= ~RNGDS_MITG_DIS; ++ break; ++ default: ++ break; ++ } ++ ++ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); ++} ++ ++static void __init srbds_select_mitigation(void) ++{ ++ u64 ia32_cap; ++ ++ if (!boot_cpu_has_bug(X86_BUG_SRBDS)) ++ return; ++ ++ /* ++ * Check to see if this is one of the MDS_NO systems supporting ++ * TSX that are only exposed to SRBDS when TSX is enabled. ++ */ ++ ia32_cap = x86_read_arch_cap_msr(); ++ if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) ++ srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; ++ else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; ++ else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) ++ srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; ++ else if (cpu_mitigations_off() || srbds_off) ++ srbds_mitigation = SRBDS_MITIGATION_OFF; ++ ++ update_srbds_msr(); ++ pr_info("%s\n", srbds_strings[srbds_mitigation]); ++} ++ ++static int __init srbds_parse_cmdline(char *str) ++{ ++ if (!str) ++ return -EINVAL; ++ ++ if (!boot_cpu_has_bug(X86_BUG_SRBDS)) ++ return 0; ++ ++ srbds_off = !strcmp(str, "off"); ++ return 0; ++} ++early_param("srbds", srbds_parse_cmdline); ++ + #undef pr_fmt + #define pr_fmt(fmt) "Spectre V1 : " fmt + +@@ -1521,6 +1614,11 @@ static char *ibpb_state(void) + return ""; + } + ++static ssize_t srbds_show_state(char *buf) ++{ ++ return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); ++} ++ + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, + char *buf, unsigned int bug) + { +@@ -1565,6 +1663,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr + case X86_BUG_ITLB_MULTIHIT: + return itlb_multihit_show_state(buf); + ++ case X86_BUG_SRBDS: ++ return srbds_show_state(buf); ++ + default: + break; + } +@@ -1611,4 +1712,9 @@ ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr + { + return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); + } ++ ++ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); ++} + #endif +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 704caec136cf..650df6d21049 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1024,6 +1024,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) + #define MSBDS_ONLY BIT(5) + #define NO_SWAPGS BIT(6) + #define NO_ITLB_MULTIHIT BIT(7) ++#define NO_SPECTRE_V2 BIT(8) + + #define VULNWL(_vendor, _family, _model, _whitelist) \ + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } +@@ -1085,12 +1086,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), ++ ++ /* Zhaoxin Family 7 */ ++ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2), ++ VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2), ++ {} ++}; ++ ++#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ ++ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ ++ INTEL_FAM6_##model, steppings, \ ++ X86_FEATURE_ANY, issues) ++ ++#define SRBDS BIT(0) ++ ++static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { ++ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), + {} + }; + +-static bool __init cpu_matches(unsigned long which) ++static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) + { +- const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); ++ const struct x86_cpu_id *m = x86_match_cpu(table); + + return m && !!(m->driver_data & which); + } +@@ -1110,29 +1136,34 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + u64 ia32_cap = x86_read_arch_cap_msr(); + + /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ +- if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) ++ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && ++ !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) + setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); + +- if (cpu_matches(NO_SPECULATION)) ++ if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) + return; + + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); +- setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + +- if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && ++ if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) ++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2); ++ ++ if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && ++ !(ia32_cap & ARCH_CAP_SSB_NO) && + !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); + + if (ia32_cap & ARCH_CAP_IBRS_ALL) + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + +- if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { ++ if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && ++ !(ia32_cap & ARCH_CAP_MDS_NO)) { + setup_force_cpu_bug(X86_BUG_MDS); +- if (cpu_matches(MSBDS_ONLY)) ++ if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) + setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); + } + +- if (!cpu_matches(NO_SWAPGS)) ++ if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) + setup_force_cpu_bug(X86_BUG_SWAPGS); + + /* +@@ -1150,7 +1181,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) + setup_force_cpu_bug(X86_BUG_TAA); + +- if (cpu_matches(NO_MELTDOWN)) ++ /* ++ * SRBDS affects CPUs which support RDRAND or RDSEED and are listed ++ * in the vulnerability blacklist. ++ */ ++ if ((cpu_has(c, X86_FEATURE_RDRAND) || ++ cpu_has(c, X86_FEATURE_RDSEED)) && ++ cpu_matches(cpu_vuln_blacklist, SRBDS)) ++ setup_force_cpu_bug(X86_BUG_SRBDS); ++ ++ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + + /* Rogue Data Cache Load? No! */ +@@ -1159,7 +1199,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + +- if (cpu_matches(NO_L1TF)) ++ if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) + return; + + setup_force_cpu_bug(X86_BUG_L1TF); +@@ -1597,6 +1637,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) + mtrr_ap_init(); + validate_apic_and_package_id(c); + x86_spec_ctrl_setup_ap(); ++ update_srbds_msr(); + } + + static __init int setup_noclflush(char *arg) +diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h +index 38ab6e115eac..9d033693519a 100644 +--- a/arch/x86/kernel/cpu/cpu.h ++++ b/arch/x86/kernel/cpu/cpu.h +@@ -77,6 +77,7 @@ extern void detect_ht(struct cpuinfo_x86 *c); + unsigned int aperfmperf_get_khz(int cpu); + + extern void x86_spec_ctrl_setup_ap(void); ++extern void update_srbds_msr(void); + + extern u64 x86_read_arch_cap_msr(void); + +diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c +index 6dd78d8235e4..2f163e6646b6 100644 +--- a/arch/x86/kernel/cpu/match.c ++++ b/arch/x86/kernel/cpu/match.c +@@ -34,13 +34,18 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) + const struct x86_cpu_id *m; + struct cpuinfo_x86 *c = &boot_cpu_data; + +- for (m = match; m->vendor | m->family | m->model | m->feature; m++) { ++ for (m = match; ++ m->vendor | m->family | m->model | m->steppings | m->feature; ++ m++) { + if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor) + continue; + if (m->family != X86_FAMILY_ANY && c->x86 != m->family) + continue; + if (m->model != X86_MODEL_ANY && c->x86_model != m->model) + continue; ++ if (m->steppings != X86_STEPPING_ANY && ++ !(BIT(c->x86_stepping) & m->steppings)) ++ continue; + if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature)) + continue; + return m; +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index 6265871a4af2..f00da44ae6fe 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -567,6 +567,12 @@ ssize_t __weak cpu_show_itlb_multihit(struct device *dev, + return sprintf(buf, "Not affected\n"); + } + ++ssize_t __weak cpu_show_srbds(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); + static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); +@@ -575,6 +581,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); + static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); + static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); + static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); ++static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -585,6 +592,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_mds.attr, + &dev_attr_tsx_async_abort.attr, + &dev_attr_itlb_multihit.attr, ++ &dev_attr_srbds.attr, + NULL + }; + +diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c +index 93a096a91f8c..74f3a2be17a6 100644 +--- a/drivers/iio/adc/stm32-adc-core.c ++++ b/drivers/iio/adc/stm32-adc-core.c +@@ -65,12 +65,14 @@ struct stm32_adc_priv; + * @clk_sel: clock selection routine + * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet) + * @has_syscfg: SYSCFG capability flags ++ * @num_irqs: number of interrupt lines + */ + struct stm32_adc_priv_cfg { + const struct stm32_adc_common_regs *regs; + int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *); + u32 max_clk_rate_hz; + unsigned int has_syscfg; ++ unsigned int num_irqs; + }; + + /** +@@ -372,21 +374,15 @@ static int stm32_adc_irq_probe(struct platform_device *pdev, + struct device_node *np = pdev->dev.of_node; + unsigned int i; + +- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) { ++ /* ++ * Interrupt(s) must be provided, depending on the compatible: ++ * - stm32f4/h7 shares a common interrupt line. ++ * - stm32mp1, has one line per ADC ++ */ ++ for (i = 0; i < priv->cfg->num_irqs; i++) { + priv->irq[i] = platform_get_irq(pdev, i); +- if (priv->irq[i] < 0) { +- /* +- * At least one interrupt must be provided, make others +- * optional: +- * - stm32f4/h7 shares a common interrupt. +- * - stm32mp1, has one line per ADC (either for ADC1, +- * ADC2 or both). +- */ +- if (i && priv->irq[i] == -ENXIO) +- continue; +- ++ if (priv->irq[i] < 0) + return priv->irq[i]; +- } + } + + priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0, +@@ -397,9 +393,7 @@ static int stm32_adc_irq_probe(struct platform_device *pdev, + return -ENOMEM; + } + +- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) { +- if (priv->irq[i] < 0) +- continue; ++ for (i = 0; i < priv->cfg->num_irqs; i++) { + irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler); + irq_set_handler_data(priv->irq[i], priv); + } +@@ -417,11 +411,8 @@ static void stm32_adc_irq_remove(struct platform_device *pdev, + irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq)); + irq_domain_remove(priv->domain); + +- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) { +- if (priv->irq[i] < 0) +- continue; ++ for (i = 0; i < priv->cfg->num_irqs; i++) + irq_set_chained_handler(priv->irq[i], NULL); +- } + } + + static int stm32_adc_core_switches_supply_en(struct stm32_adc_priv *priv, +@@ -803,6 +794,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = { + .regs = &stm32f4_adc_common_regs, + .clk_sel = stm32f4_adc_clk_sel, + .max_clk_rate_hz = 36000000, ++ .num_irqs = 1, + }; + + static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { +@@ -810,6 +802,7 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { + .clk_sel = stm32h7_adc_clk_sel, + .max_clk_rate_hz = 36000000, + .has_syscfg = HAS_VBOOSTER, ++ .num_irqs = 1, + }; + + static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = { +@@ -817,6 +810,7 @@ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = { + .clk_sel = stm32h7_adc_clk_sel, + .max_clk_rate_hz = 40000000, + .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD, ++ .num_irqs = 2, + }; + + static const struct of_device_id stm32_adc_of_match[] = { +diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c +index 23c9ab252470..07bb90d72434 100644 +--- a/drivers/iio/chemical/pms7003.c ++++ b/drivers/iio/chemical/pms7003.c +@@ -73,6 +73,11 @@ struct pms7003_state { + struct pms7003_frame frame; + struct completion frame_ready; + struct mutex lock; /* must be held whenever state gets touched */ ++ /* Used to construct scan to push to the IIO buffer */ ++ struct { ++ u16 data[3]; /* PM1, PM2P5, PM10 */ ++ s64 ts; ++ } scan; + }; + + static int pms7003_do_cmd(struct pms7003_state *state, enum pms7003_cmd cmd) +@@ -104,7 +109,6 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p) + struct iio_dev *indio_dev = pf->indio_dev; + struct pms7003_state *state = iio_priv(indio_dev); + struct pms7003_frame *frame = &state->frame; +- u16 data[3 + 1 + 4]; /* PM1, PM2P5, PM10, padding, timestamp */ + int ret; + + mutex_lock(&state->lock); +@@ -114,12 +118,15 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p) + goto err; + } + +- data[PM1] = pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET); +- data[PM2P5] = pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET); +- data[PM10] = pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET); ++ state->scan.data[PM1] = ++ pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET); ++ state->scan.data[PM2P5] = ++ pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET); ++ state->scan.data[PM10] = ++ pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET); + mutex_unlock(&state->lock); + +- iio_push_to_buffers_with_timestamp(indio_dev, data, ++ iio_push_to_buffers_with_timestamp(indio_dev, &state->scan, + iio_get_time_ns(indio_dev)); + err: + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c +index edbb956e81e8..c0845d892faa 100644 +--- a/drivers/iio/chemical/sps30.c ++++ b/drivers/iio/chemical/sps30.c +@@ -230,15 +230,18 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p) + struct iio_dev *indio_dev = pf->indio_dev; + struct sps30_state *state = iio_priv(indio_dev); + int ret; +- s32 data[4 + 2]; /* PM1, PM2P5, PM4, PM10, timestamp */ ++ struct { ++ s32 data[4]; /* PM1, PM2P5, PM4, PM10 */ ++ s64 ts; ++ } scan; + + mutex_lock(&state->lock); +- ret = sps30_do_meas(state, data, 4); ++ ret = sps30_do_meas(state, scan.data, ARRAY_SIZE(scan.data)); + mutex_unlock(&state->lock); + if (ret) + goto err; + +- iio_push_to_buffers_with_timestamp(indio_dev, data, ++ iio_push_to_buffers_with_timestamp(indio_dev, &scan, + iio_get_time_ns(indio_dev)); + err: + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c +index e5b00a6611ac..7384a3ffcac4 100644 +--- a/drivers/iio/light/vcnl4000.c ++++ b/drivers/iio/light/vcnl4000.c +@@ -193,7 +193,6 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask, + u8 rdy_mask, u8 data_reg, int *val) + { + int tries = 20; +- __be16 buf; + int ret; + + mutex_lock(&data->vcnl4000_lock); +@@ -220,13 +219,12 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask, + goto fail; + } + +- ret = i2c_smbus_read_i2c_block_data(data->client, +- data_reg, sizeof(buf), (u8 *) &buf); ++ ret = i2c_smbus_read_word_swapped(data->client, data_reg); + if (ret < 0) + goto fail; + + mutex_unlock(&data->vcnl4000_lock); +- *val = be16_to_cpu(buf); ++ *val = ret; + + return 0; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 8d9aab45fd8e..86e6bbb57482 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -417,12 +417,6 @@ static void del_sw_ns(struct fs_node *node) + + static void del_sw_prio(struct fs_node *node) + { +- struct mlx5_flow_root_namespace *root_ns; +- struct mlx5_flow_namespace *ns; +- +- fs_get_obj(ns, node); +- root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns); +- mutex_destroy(&root_ns->chain_lock); + kfree(node); + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 7dcdda9ca351..e4a690128b3a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1554,6 +1554,22 @@ static void shutdown(struct pci_dev *pdev) + mlx5_pci_disable_device(dev); + } + ++static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) ++{ ++ struct mlx5_core_dev *dev = pci_get_drvdata(pdev); ++ ++ mlx5_unload_one(dev, false); ++ ++ return 0; ++} ++ ++static int mlx5_resume(struct pci_dev *pdev) ++{ ++ struct mlx5_core_dev *dev = pci_get_drvdata(pdev); ++ ++ return mlx5_load_one(dev, false); ++} ++ + static const struct pci_device_id mlx5_core_pci_table[] = { + { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) }, + { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ +@@ -1597,6 +1613,8 @@ static struct pci_driver mlx5_core_driver = { + .id_table = mlx5_core_pci_table, + .probe = init_one, + .remove = remove_one, ++ .suspend = mlx5_suspend, ++ .resume = mlx5_resume, + .shutdown = shutdown, + .err_handler = &mlx5_err_handler, + .sriov_configure = mlx5_core_sriov_configure, +diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c +index 987ae221f6be..4dd3f8a5a9b8 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c +@@ -1368,7 +1368,8 @@ __nfp_flower_update_merge_stats(struct nfp_app *app, + ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); + priv->stats[ctx_id].pkts += pkts; + priv->stats[ctx_id].bytes += bytes; +- max_t(u64, priv->stats[ctx_id].used, used); ++ priv->stats[ctx_id].used = max_t(u64, used, ++ priv->stats[ctx_id].used); + } + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 1623516efb17..982be75fde83 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -630,7 +630,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; +- ts_event_en = PTP_TCR_TSEVNTENA; ++ if (priv->synopsys_id != DWMAC_CORE_5_10) ++ ts_event_en = PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 4bb8552a00d3..4a2c7355be63 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1324,6 +1324,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ + {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ +diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c +index 60acdfd1cb8c..856a10c293f8 100644 +--- a/drivers/nfc/st21nfca/dep.c ++++ b/drivers/nfc/st21nfca/dep.c +@@ -173,8 +173,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev, + memcpy(atr_res->gbi, atr_req->gbi, gb_len); + r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, + gb_len); +- if (r < 0) ++ if (r < 0) { ++ kfree_skb(skb); + return r; ++ } + } + + info->dep_info.curr_nfc_dep_pni = 0; +diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c +index d057f1bfb2e9..8a91717600be 100644 +--- a/drivers/nvmem/qfprom.c ++++ b/drivers/nvmem/qfprom.c +@@ -27,25 +27,11 @@ static int qfprom_reg_read(void *context, + return 0; + } + +-static int qfprom_reg_write(void *context, +- unsigned int reg, void *_val, size_t bytes) +-{ +- struct qfprom_priv *priv = context; +- u8 *val = _val; +- int i = 0, words = bytes; +- +- while (words--) +- writeb(*val++, priv->base + reg + i++); +- +- return 0; +-} +- + static struct nvmem_config econfig = { + .name = "qfprom", + .stride = 1, + .word_size = 1, + .reg_read = qfprom_reg_read, +- .reg_write = qfprom_reg_write, + }; + + static int qfprom_probe(struct platform_device *pdev) +diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h +index be731f1a2209..91b65731fcaa 100644 +--- a/drivers/staging/rtl8712/wifi.h ++++ b/drivers/staging/rtl8712/wifi.h +@@ -440,7 +440,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe) + /* block-ack parameters */ + #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 + #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C +-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 ++#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 + #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 + #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 + +@@ -532,13 +532,6 @@ struct ieee80211_ht_addt_info { + #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 + #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 + +-/* block-ack parameters */ +-#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 +-#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C +-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 +-#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 +-#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 +- + /* + * A-PMDU buffer sizes + * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) +diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c +index 436cc51c92c3..cdcc64ea2554 100644 +--- a/drivers/tty/hvc/hvc_console.c ++++ b/drivers/tty/hvc/hvc_console.c +@@ -371,15 +371,14 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + * tty fields and return the kref reference. + */ + if (rc) { +- tty_port_tty_set(&hp->port, NULL); +- tty->driver_data = NULL; +- tty_port_put(&hp->port); + printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); +- } else ++ } else { + /* We are ready... raise DTR/RTS */ + if (C_BAUD(tty)) + if (hp->ops->dtr_rts) + hp->ops->dtr_rts(hp, 1); ++ tty_port_set_initialized(&hp->port, true); ++ } + + /* Force wakeup of the polling thread */ + hvc_kick(); +@@ -389,22 +388,12 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + + static void hvc_close(struct tty_struct *tty, struct file * filp) + { +- struct hvc_struct *hp; ++ struct hvc_struct *hp = tty->driver_data; + unsigned long flags; + + if (tty_hung_up_p(filp)) + return; + +- /* +- * No driver_data means that this close was issued after a failed +- * hvc_open by the tty layer's release_dev() function and we can just +- * exit cleanly because the kref reference wasn't made. +- */ +- if (!tty->driver_data) +- return; +- +- hp = tty->driver_data; +- + spin_lock_irqsave(&hp->port.lock, flags); + + if (--hp->port.count == 0) { +@@ -412,6 +401,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) + /* We are done with the tty pointer now. */ + tty_port_tty_set(&hp->port, NULL); + ++ if (!tty_port_initialized(&hp->port)) ++ return; ++ + if (C_HUPCL(tty)) + if (hp->ops->dtr_rts) + hp->ops->dtr_rts(hp, 0); +@@ -428,6 +420,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) + * waking periodically to check chars_in_buffer(). + */ + tty_wait_until_sent(tty, HVC_CLOSE_WAIT); ++ tty_port_set_initialized(&hp->port, false); + } else { + if (hp->port.count < 0) + printk(KERN_ERR "hvc_close %X: oops, count is %d\n", +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c +index 15d33fa0c925..568b2171f335 100644 +--- a/drivers/tty/vt/keyboard.c ++++ b/drivers/tty/vt/keyboard.c +@@ -127,7 +127,11 @@ static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */ + static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ + static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ + static bool dead_key_next; +-static int npadch = -1; /* -1 or number assembled on pad */ ++ ++/* Handles a number being assembled on the number pad */ ++static bool npadch_active; ++static unsigned int npadch_value; ++ + static unsigned int diacr; + static char rep; /* flag telling character repeat */ + +@@ -845,12 +849,12 @@ static void k_shift(struct vc_data *vc, unsigned char value, char up_flag) + shift_state &= ~(1 << value); + + /* kludge */ +- if (up_flag && shift_state != old_state && npadch != -1) { ++ if (up_flag && shift_state != old_state && npadch_active) { + if (kbd->kbdmode == VC_UNICODE) +- to_utf8(vc, npadch); ++ to_utf8(vc, npadch_value); + else +- put_queue(vc, npadch & 0xff); +- npadch = -1; ++ put_queue(vc, npadch_value & 0xff); ++ npadch_active = false; + } + } + +@@ -868,7 +872,7 @@ static void k_meta(struct vc_data *vc, unsigned char value, char up_flag) + + static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) + { +- int base; ++ unsigned int base; + + if (up_flag) + return; +@@ -882,10 +886,12 @@ static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) + base = 16; + } + +- if (npadch == -1) +- npadch = value; +- else +- npadch = npadch * base + value; ++ if (!npadch_active) { ++ npadch_value = 0; ++ npadch_active = true; ++ } ++ ++ npadch_value = npadch_value * base + value; + } + + static void k_lock(struct vc_data *vc, unsigned char value, char up_flag) +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 8ca72d80501d..f67088bb8218 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -584,7 +584,7 @@ static void acm_softint(struct work_struct *work) + } + + if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) { +- for (i = 0; i < ACM_NR; i++) ++ for (i = 0; i < acm->rx_buflimit; i++) + if (test_and_clear_bit(i, &acm->urbs_in_error_delay)) + acm_submit_read_urb(acm, i, GFP_NOIO); + } +diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c +index bf083c1f997f..0fbf9adef34b 100644 +--- a/drivers/usb/musb/musb_core.c ++++ b/drivers/usb/musb/musb_core.c +@@ -2721,6 +2721,13 @@ static int musb_resume(struct device *dev) + musb_enable_interrupts(musb); + musb_platform_enable(musb); + ++ /* session might be disabled in suspend */ ++ if (musb->port_mode == MUSB_HOST && ++ !(musb->ops->quirks & MUSB_PRESERVE_SESSION)) { ++ devctl |= MUSB_DEVCTL_SESSION; ++ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); ++ } ++ + spin_lock_irqsave(&musb->lock, flags); + error = musb_run_resume_work(musb); + if (error) +diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c +index f42858e2b54c..0c6204add616 100644 +--- a/drivers/usb/musb/musb_debugfs.c ++++ b/drivers/usb/musb/musb_debugfs.c +@@ -168,6 +168,11 @@ static ssize_t musb_test_mode_write(struct file *file, + u8 test; + char buf[24]; + ++ memset(buf, 0x00, sizeof(buf)); ++ ++ if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) ++ return -EFAULT; ++ + pm_runtime_get_sync(musb->controller); + test = musb_readb(musb->mregs, MUSB_TESTMODE); + if (test) { +@@ -176,11 +181,6 @@ static ssize_t musb_test_mode_write(struct file *file, + goto ret; + } + +- memset(buf, 0x00, sizeof(buf)); +- +- if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) +- return -EFAULT; +- + if (strstarts(buf, "force host full-speed")) + test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS; + +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index 955ab97b9b22..dcdd541b3291 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -93,6 +93,7 @@ struct ch341_private { + u8 mcr; + u8 msr; + u8 lcr; ++ unsigned long quirks; + }; + + static void ch341_set_termios(struct tty_struct *tty, +@@ -245,6 +246,53 @@ out: kfree(buffer); + return r; + } + ++static int ch341_detect_quirks(struct usb_serial_port *port) ++{ ++ struct ch341_private *priv = usb_get_serial_port_data(port); ++ struct usb_device *udev = port->serial->dev; ++ const unsigned int size = 2; ++ unsigned long quirks = 0; ++ char *buffer; ++ int r; ++ ++ buffer = kmalloc(size, GFP_KERNEL); ++ if (!buffer) ++ return -ENOMEM; ++ ++ /* ++ * A subset of CH34x devices does not support all features. The ++ * prescaler is limited and there is no support for sending a RS232 ++ * break condition. A read failure when trying to set up the latter is ++ * used to detect these devices. ++ */ ++ r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), CH341_REQ_READ_REG, ++ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, ++ CH341_REG_BREAK, 0, buffer, size, DEFAULT_TIMEOUT); ++ if (r == -EPIPE) { ++ dev_dbg(&port->dev, "break control not supported\n"); ++ r = 0; ++ goto out; ++ } ++ ++ if (r != size) { ++ if (r >= 0) ++ r = -EIO; ++ dev_err(&port->dev, "failed to read break control: %d\n", r); ++ goto out; ++ } ++ ++ r = 0; ++out: ++ kfree(buffer); ++ ++ if (quirks) { ++ dev_dbg(&port->dev, "enabling quirk flags: 0x%02lx\n", quirks); ++ priv->quirks |= quirks; ++ } ++ ++ return r; ++} ++ + static int ch341_port_probe(struct usb_serial_port *port) + { + struct ch341_private *priv; +@@ -267,6 +315,11 @@ static int ch341_port_probe(struct usb_serial_port *port) + goto error; + + usb_set_serial_port_data(port, priv); ++ ++ r = ch341_detect_quirks(port); ++ if (r < 0) ++ goto error; ++ + return 0; + + error: kfree(priv); +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 8bfffca3e4ae..254a8bbeea67 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1157,6 +1157,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1031, 0xff), /* Telit LE910C1-EUX */ ++ .driver_info = NCTRL(0) | RSVD(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */ ++ .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), + .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index ce0401d3137f..d147feae83e6 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ ++ {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */ + {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ + {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ + {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ +diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c +index 13be21aad2f4..4b9845807bee 100644 +--- a/drivers/usb/serial/usb_wwan.c ++++ b/drivers/usb/serial/usb_wwan.c +@@ -270,6 +270,10 @@ static void usb_wwan_indat_callback(struct urb *urb) + if (status) { + dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n", + __func__, status, endpoint); ++ ++ /* don't resubmit on fatal errors */ ++ if (status == -ESHUTDOWN || status == -ENOENT) ++ return; + } else { + if (urb->actual_length) { + tty_insert_flip_string(&port->port, data, +diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h +index e3596db077dc..953d7ca01eb6 100644 +--- a/include/linux/mod_devicetable.h ++++ b/include/linux/mod_devicetable.h +@@ -657,6 +657,10 @@ struct mips_cdmm_device_id { + /* + * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id. + * Although gcc seems to ignore this error, clang fails without this define. ++ * ++ * Note: The ordering of the struct is different from upstream because the ++ * static initializers in kernels < 5.7 still use C89 style while upstream ++ * has been converted to proper C99 initializers. + */ + #define x86cpu_device_id x86_cpu_id + struct x86_cpu_id { +@@ -665,6 +669,7 @@ struct x86_cpu_id { + __u16 model; + __u16 feature; /* bit index */ + kernel_ulong_t driver_data; ++ __u16 steppings; + }; + + #define X86_FEATURE_MATCH(x) \ +@@ -673,6 +678,7 @@ struct x86_cpu_id { + #define X86_VENDOR_ANY 0xffff + #define X86_FAMILY_ANY 0 + #define X86_MODEL_ANY 0 ++#define X86_STEPPING_ANY 0 + #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ + + /* +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h +index 6f6ade63b04c..e8a924eeea3d 100644 +--- a/include/linux/virtio_net.h ++++ b/include/linux/virtio_net.h +@@ -31,6 +31,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + { + unsigned int gso_type = 0; + unsigned int thlen = 0; ++ unsigned int p_off = 0; + unsigned int ip_proto; + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { +@@ -68,7 +69,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + if (!skb_partial_csum_set(skb, start, off)) + return -EINVAL; + +- if (skb_transport_offset(skb) + thlen > skb_headlen(skb)) ++ p_off = skb_transport_offset(skb) + thlen; ++ if (p_off > skb_headlen(skb)) + return -EINVAL; + } else { + /* gso packets without NEEDS_CSUM do not set transport_offset. +@@ -92,23 +94,32 @@ retry: + return -EINVAL; + } + +- if (keys.control.thoff + thlen > skb_headlen(skb) || ++ p_off = keys.control.thoff + thlen; ++ if (p_off > skb_headlen(skb) || + keys.basic.ip_proto != ip_proto) + return -EINVAL; + + skb_set_transport_header(skb, keys.control.thoff); ++ } else if (gso_type) { ++ p_off = thlen; ++ if (p_off > skb_headlen(skb)) ++ return -EINVAL; + } + } + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); ++ struct skb_shared_info *shinfo = skb_shinfo(skb); + +- skb_shinfo(skb)->gso_size = gso_size; +- skb_shinfo(skb)->gso_type = gso_type; ++ /* Too small packets are not really GSO ones. */ ++ if (skb->len - p_off > gso_size) { ++ shinfo->gso_size = gso_size; ++ shinfo->gso_type = gso_type; + +- /* Header must be checked, and gso_segs computed. */ +- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; +- skb_shinfo(skb)->gso_segs = 0; ++ /* Header must be checked, and gso_segs computed. */ ++ shinfo->gso_type |= SKB_GSO_DODGY; ++ shinfo->gso_segs = 0; ++ } + } + + return 0; +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c +index c74761004ee5..291680ba8504 100644 +--- a/kernel/events/uprobes.c ++++ b/kernel/events/uprobes.c +@@ -867,10 +867,6 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, + if (ret) + goto out; + +- /* uprobe_write_opcode() assumes we don't cross page boundary */ +- BUG_ON((uprobe->offset & ~PAGE_MASK) + +- UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); +- + smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ + set_bit(UPROBE_COPY_INSN, &uprobe->flags); + +@@ -1166,6 +1162,15 @@ static int __uprobe_register(struct inode *inode, loff_t offset, + if (offset > i_size_read(inode)) + return -EINVAL; + ++ /* ++ * This ensures that copy_from_page(), copy_to_page() and ++ * __update_ref_ctr() can't cross page boundary. ++ */ ++ if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) ++ return -EINVAL; ++ if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) ++ return -EINVAL; ++ + retry: + uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); + if (!uprobe) +@@ -2014,6 +2019,9 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) + uprobe_opcode_t opcode; + int result; + ++ if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) ++ return -EINVAL; ++ + pagefault_disable(); + result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); + pagefault_enable(); +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c +index 458dc6eb5a68..a27d034c85cc 100644 +--- a/net/ipv4/devinet.c ++++ b/net/ipv4/devinet.c +@@ -276,6 +276,7 @@ static struct in_device *inetdev_init(struct net_device *dev) + err = devinet_sysctl_register(in_dev); + if (err) { + in_dev->dead = 1; ++ neigh_parms_release(&arp_tbl, in_dev->arp_parms); + in_dev_put(in_dev); + in_dev = NULL; + goto out; +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index 425b95eb7e87..6dc3bfa12b1e 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -1460,6 +1460,9 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net, + if (sk->sk_type != SOCK_DGRAM) + return -EPROTONOSUPPORT; + ++ if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) ++ return -EPROTONOSUPPORT; ++ + if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) || + (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP)) + return -EPROTONOSUPPORT; +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c +index 0d7c887a2b75..955662a6dee7 100644 +--- a/net/l2tp/l2tp_ip.c ++++ b/net/l2tp/l2tp_ip.c +@@ -20,7 +20,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -209,15 +208,31 @@ discard: + return 0; + } + +-static int l2tp_ip_open(struct sock *sk) ++static int l2tp_ip_hash(struct sock *sk) + { +- /* Prevent autobind. We don't have ports. */ +- inet_sk(sk)->inet_num = IPPROTO_L2TP; ++ if (sk_unhashed(sk)) { ++ write_lock_bh(&l2tp_ip_lock); ++ sk_add_node(sk, &l2tp_ip_table); ++ write_unlock_bh(&l2tp_ip_lock); ++ } ++ return 0; ++} + ++static void l2tp_ip_unhash(struct sock *sk) ++{ ++ if (sk_unhashed(sk)) ++ return; + write_lock_bh(&l2tp_ip_lock); +- sk_add_node(sk, &l2tp_ip_table); ++ sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip_lock); ++} ++ ++static int l2tp_ip_open(struct sock *sk) ++{ ++ /* Prevent autobind. We don't have ports. */ ++ inet_sk(sk)->inet_num = IPPROTO_L2TP; + ++ l2tp_ip_hash(sk); + return 0; + } + +@@ -594,8 +609,8 @@ static struct proto l2tp_ip_prot = { + .sendmsg = l2tp_ip_sendmsg, + .recvmsg = l2tp_ip_recvmsg, + .backlog_rcv = l2tp_ip_backlog_recv, +- .hash = inet_hash, +- .unhash = inet_unhash, ++ .hash = l2tp_ip_hash, ++ .unhash = l2tp_ip_unhash, + .obj_size = sizeof(struct l2tp_ip_sock), + #ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ip_setsockopt, +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c +index d148766f40d1..0fa694bd3f6a 100644 +--- a/net/l2tp/l2tp_ip6.c ++++ b/net/l2tp/l2tp_ip6.c +@@ -20,8 +20,6 @@ + #include + #include + #include +-#include +-#include + #include + #include + #include +@@ -222,15 +220,31 @@ discard: + return 0; + } + +-static int l2tp_ip6_open(struct sock *sk) ++static int l2tp_ip6_hash(struct sock *sk) + { +- /* Prevent autobind. We don't have ports. */ +- inet_sk(sk)->inet_num = IPPROTO_L2TP; ++ if (sk_unhashed(sk)) { ++ write_lock_bh(&l2tp_ip6_lock); ++ sk_add_node(sk, &l2tp_ip6_table); ++ write_unlock_bh(&l2tp_ip6_lock); ++ } ++ return 0; ++} + ++static void l2tp_ip6_unhash(struct sock *sk) ++{ ++ if (sk_unhashed(sk)) ++ return; + write_lock_bh(&l2tp_ip6_lock); +- sk_add_node(sk, &l2tp_ip6_table); ++ sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip6_lock); ++} ++ ++static int l2tp_ip6_open(struct sock *sk) ++{ ++ /* Prevent autobind. We don't have ports. */ ++ inet_sk(sk)->inet_num = IPPROTO_L2TP; + ++ l2tp_ip6_hash(sk); + return 0; + } + +@@ -728,8 +742,8 @@ static struct proto l2tp_ip6_prot = { + .sendmsg = l2tp_ip6_sendmsg, + .recvmsg = l2tp_ip6_recvmsg, + .backlog_rcv = l2tp_ip6_backlog_recv, +- .hash = inet6_hash, +- .unhash = inet_unhash, ++ .hash = l2tp_ip6_hash, ++ .unhash = l2tp_ip6_unhash, + .obj_size = sizeof(struct l2tp_ip6_sock), + #ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ipv6_setsockopt, +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index 582a3e4dfce2..7bd6c8199ca6 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -1275,7 +1275,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags, + /* Wait for children sockets to appear; these are the new sockets + * created upon connection establishment. + */ +- timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); ++ timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK); + prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); + + while ((connected = vsock_dequeue_accept(listener)) == NULL && diff --git a/patch/kernel/odroidxu4-current/patch-5.4.46-47.patch b/patch/kernel/odroidxu4-current/patch-5.4.46-47.patch new file mode 100644 index 000000000..e1f2fb313 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.46-47.patch @@ -0,0 +1,4758 @@ +diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt +index ca983328976b..f65b51523014 100644 +--- a/Documentation/lzo.txt ++++ b/Documentation/lzo.txt +@@ -159,11 +159,15 @@ Byte sequences + distance = 16384 + (H << 14) + D + state = S (copy S literals after this block) + End of stream is reached if distance == 16384 ++ In version 1 only, to prevent ambiguity with the RLE case when ++ ((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the ++ compressor must not emit block copies where distance and length ++ meet these conditions. + + In version 1 only, this instruction is also used to encode a run of +- zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. ++ zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. + In this case, it is followed by a fourth byte, X. +- run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4. ++ run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4 + + 0 0 1 L L L L L (32..63) + Copy of small block within 16kB distance (preferably less than 34B) +diff --git a/Makefile b/Makefile +index 4835d6734c3f..1da2944b842e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 46 ++SUBLEVEL = 47 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts +index ba7f3e646c26..1333a68b9373 100644 +--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts ++++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts +@@ -125,8 +125,6 @@ + bus-width = <8>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sdmmc0_default>; +- non-removable; +- mmc-ddr-1_8v; + status = "okay"; + }; + +diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h +index 8e995ec796c8..cbde9fa15792 100644 +--- a/arch/arm/include/asm/kvm_emulate.h ++++ b/arch/arm/include/asm/kvm_emulate.h +@@ -363,6 +363,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, + } + } + +-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {} ++static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; } ++static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { } + + #endif /* __ARM_KVM_EMULATE_H__ */ +diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h +index 8a37c8e89777..1b179b1f46bc 100644 +--- a/arch/arm/include/asm/kvm_host.h ++++ b/arch/arm/include/asm/kvm_host.h +@@ -421,4 +421,6 @@ static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) + return true; + } + ++#define kvm_arm_vcpu_loaded(vcpu) (false) ++ + #endif /* __ARM_KVM_HOST_H__ */ +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c +index 324352787aea..db9401581cd2 100644 +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -219,8 +219,8 @@ static struct undef_hook arm_break_hook = { + }; + + static struct undef_hook thumb_break_hook = { +- .instr_mask = 0xffff, +- .instr_val = 0xde01, ++ .instr_mask = 0xffffffff, ++ .instr_val = 0x0000de01, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = PSR_T_BIT, + .fn = break_trap, +diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h +index b263e239cb59..a45366c3909b 100644 +--- a/arch/arm64/include/asm/acpi.h ++++ b/arch/arm64/include/asm/acpi.h +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -31,14 +32,14 @@ + * is therefore used to delimit the MADT GICC structure minimum length + * appropriately. + */ +-#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \ ++#define ACPI_MADT_GICC_MIN_LENGTH offsetof( \ + struct acpi_madt_generic_interrupt, efficiency_class) + + #define BAD_MADT_GICC_ENTRY(entry, end) \ + (!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \ + (unsigned long)(entry) + (entry)->header.length > (end)) + +-#define ACPI_MADT_GICC_SPE (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \ ++#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \ + spe_interrupt) + sizeof(u16)) + + /* Basic configuration for ACPI */ +diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h +index 6ff84f1f3b4c..f47081b40523 100644 +--- a/arch/arm64/include/asm/kvm_emulate.h ++++ b/arch/arm64/include/asm/kvm_emulate.h +@@ -97,12 +97,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) + vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); + } + +-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) +-{ +- if (vcpu_has_ptrauth(vcpu)) +- vcpu_ptrauth_disable(vcpu); +-} +- + static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) + { + return vcpu->arch.vsesr_el2; +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h +index f656169db8c3..0c3bd6aff6e9 100644 +--- a/arch/arm64/include/asm/kvm_host.h ++++ b/arch/arm64/include/asm/kvm_host.h +@@ -392,8 +392,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); + * CP14 and CP15 live in the same array, as they are backed by the + * same system registers. + */ +-#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) +-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) ++#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ++ ++#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) ++#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) + + struct kvm_vm_stat { + ulong remote_tlb_flush; +@@ -677,4 +679,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); + #define kvm_arm_vcpu_sve_finalized(vcpu) \ + ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) + ++#define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu) ++ + #endif /* __ARM64_KVM_HOST_H__ */ +diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c +index 706cca23f0d2..1249f68a9418 100644 +--- a/arch/arm64/kvm/handle_exit.c ++++ b/arch/arm64/kvm/handle_exit.c +@@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) + return 1; + } + +-#define __ptrauth_save_key(regs, key) \ +-({ \ +- regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ +- regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ +-}) +- + /* + * Handle the guest trying to use a ptrauth instruction, or trying to access a + * ptrauth register. + */ + void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) + { +- struct kvm_cpu_context *ctxt; +- +- if (vcpu_has_ptrauth(vcpu)) { ++ if (vcpu_has_ptrauth(vcpu)) + vcpu_ptrauth_enable(vcpu); +- ctxt = vcpu->arch.host_cpu_context; +- __ptrauth_save_key(ctxt->sys_regs, APIA); +- __ptrauth_save_key(ctxt->sys_regs, APIB); +- __ptrauth_save_key(ctxt->sys_regs, APDA); +- __ptrauth_save_key(ctxt->sys_regs, APDB); +- __ptrauth_save_key(ctxt->sys_regs, APGA); +- } else { ++ else + kvm_inject_undefined(vcpu); +- } + } + + /* +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c +index 01a515e0171e..d43f44b3377e 100644 +--- a/arch/arm64/kvm/sys_regs.c ++++ b/arch/arm64/kvm/sys_regs.c +@@ -1280,10 +1280,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) + { ++ int reg = r->reg; ++ ++ /* See the 32bit mapping in kvm_host.h */ ++ if (p->is_aarch32) ++ reg = r->reg / 2; ++ + if (p->is_write) +- vcpu_write_sys_reg(vcpu, p->regval, r->reg); ++ vcpu_write_sys_reg(vcpu, p->regval, reg); + else +- p->regval = vcpu_read_sys_reg(vcpu, r->reg); ++ p->regval = vcpu_read_sys_reg(vcpu, reg); + return true; + } + +diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h +index 9023828ede97..ac8f65a3e75a 100644 +--- a/arch/csky/abiv2/inc/abi/entry.h ++++ b/arch/csky/abiv2/inc/abi/entry.h +@@ -13,6 +13,8 @@ + #define LSAVE_A1 28 + #define LSAVE_A2 32 + #define LSAVE_A3 36 ++#define LSAVE_A4 40 ++#define LSAVE_A5 44 + + #define KSPTOUSP + #define USPTOKSP +diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S +index 65c55f22532a..4349528fbf38 100644 +--- a/arch/csky/kernel/entry.S ++++ b/arch/csky/kernel/entry.S +@@ -170,8 +170,10 @@ csky_syscall_trace: + ldw a3, (sp, LSAVE_A3) + #if defined(__CSKYABIV2__) + subi sp, 8 +- stw r5, (sp, 0x4) +- stw r4, (sp, 0x0) ++ ldw r9, (sp, LSAVE_A4) ++ stw r9, (sp, 0x0) ++ ldw r9, (sp, LSAVE_A5) ++ stw r9, (sp, 0x4) + #else + ldw r6, (sp, LSAVE_A4) + ldw r7, (sp, LSAVE_A5) +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h +index 41204a49cf95..7b47a323dc23 100644 +--- a/arch/mips/include/asm/kvm_host.h ++++ b/arch/mips/include/asm/kvm_host.h +@@ -274,8 +274,12 @@ enum emulation_result { + #define MIPS3_PG_SHIFT 6 + #define MIPS3_PG_FRAME 0x3fffffc0 + ++#if defined(CONFIG_64BIT) ++#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) ++#else + #define VPN2_MASK 0xffffe000 +-#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID ++#endif ++#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) + #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) + #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) + #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) +diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S +index 4638d2863388..060a1acd7c6d 100644 +--- a/arch/powerpc/kernel/vmlinux.lds.S ++++ b/arch/powerpc/kernel/vmlinux.lds.S +@@ -326,12 +326,6 @@ SECTIONS + *(.branch_lt) + } + +-#ifdef CONFIG_DEBUG_INFO_BTF +- .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { +- *(.BTF) +- } +-#endif +- + .opd : AT(ADDR(.opd) - LOAD_OFFSET) { + __start_opd = .; + KEEP(*(.opd)) +diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c +index c73205172447..633711bf1cae 100644 +--- a/arch/powerpc/mm/ptdump/ptdump.c ++++ b/arch/powerpc/mm/ptdump/ptdump.c +@@ -58,6 +58,7 @@ struct pg_state { + unsigned long start_address; + unsigned long start_pa; + unsigned long last_pa; ++ unsigned long page_size; + unsigned int level; + u64 current_flags; + bool check_wx; +@@ -155,9 +156,9 @@ static void dump_addr(struct pg_state *st, unsigned long addr) + #endif + + pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); +- if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) { ++ if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) { + pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa); +- delta = PAGE_SIZE >> 10; ++ delta = st->page_size >> 10; + } else { + pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); + delta = (addr - st->start_address) >> 10; +@@ -188,7 +189,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr) + } + + static void note_page(struct pg_state *st, unsigned long addr, +- unsigned int level, u64 val) ++ unsigned int level, u64 val, unsigned long page_size) + { + u64 flag = val & pg_level[level].mask; + u64 pa = val & PTE_RPN_MASK; +@@ -200,6 +201,7 @@ static void note_page(struct pg_state *st, unsigned long addr, + st->start_address = addr; + st->start_pa = pa; + st->last_pa = pa; ++ st->page_size = page_size; + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + /* + * Dump the section of virtual memory when: +@@ -211,7 +213,7 @@ static void note_page(struct pg_state *st, unsigned long addr, + */ + } else if (flag != st->current_flags || level != st->level || + addr >= st->marker[1].start_address || +- (pa != st->last_pa + PAGE_SIZE && ++ (pa != st->last_pa + st->page_size && + (pa != st->start_pa || st->start_pa != st->last_pa))) { + + /* Check the PTE flags */ +@@ -239,6 +241,7 @@ static void note_page(struct pg_state *st, unsigned long addr, + st->start_address = addr; + st->start_pa = pa; + st->last_pa = pa; ++ st->page_size = page_size; + st->current_flags = flag; + st->level = level; + } else { +@@ -254,7 +257,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) + + for (i = 0; i < PTRS_PER_PTE; i++, pte++) { + addr = start + i * PAGE_SIZE; +- note_page(st, addr, 4, pte_val(*pte)); ++ note_page(st, addr, 4, pte_val(*pte), PAGE_SIZE); + + } + } +@@ -271,7 +274,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) + /* pmd exists */ + walk_pte(st, pmd, addr); + else +- note_page(st, addr, 3, pmd_val(*pmd)); ++ note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE); + } + } + +@@ -287,7 +290,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) + /* pud exists */ + walk_pmd(st, pud, addr); + else +- note_page(st, addr, 2, pud_val(*pud)); ++ note_page(st, addr, 2, pud_val(*pud), PUD_SIZE); + } + } + +@@ -306,7 +309,7 @@ static void walk_pagetables(struct pg_state *st) + /* pgd exists */ + walk_pud(st, pgd, addr); + else +- note_page(st, addr, 1, pgd_val(*pgd)); ++ note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE); + } + } + +@@ -361,7 +364,7 @@ static int ptdump_show(struct seq_file *m, void *v) + + /* Traverse kernel page tables */ + walk_pagetables(&st); +- note_page(&st, 0, 0, 0); ++ note_page(&st, 0, 0, 0, 0); + return 0; + } + +diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c +index fe8d396e2301..16df9cc8f360 100644 +--- a/arch/powerpc/sysdev/xive/common.c ++++ b/arch/powerpc/sysdev/xive/common.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1013,12 +1014,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq); + void xive_cleanup_irq_data(struct xive_irq_data *xd) + { + if (xd->eoi_mmio) { ++ unmap_kernel_range((unsigned long)xd->eoi_mmio, ++ 1u << xd->esb_shift); + iounmap(xd->eoi_mmio); + if (xd->eoi_mmio == xd->trig_mmio) + xd->trig_mmio = NULL; + xd->eoi_mmio = NULL; + } + if (xd->trig_mmio) { ++ unmap_kernel_range((unsigned long)xd->trig_mmio, ++ 1u << xd->esb_shift); + iounmap(xd->trig_mmio); + xd->trig_mmio = NULL; + } +diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c +index 281e0dd4c614..20e093f86329 100644 +--- a/arch/s390/pci/pci_clp.c ++++ b/arch/s390/pci/pci_clp.c +@@ -309,14 +309,13 @@ out: + + int clp_disable_fh(struct zpci_dev *zdev) + { +- u32 fh = zdev->fh; + int rc; + + if (!zdev_enabled(zdev)) + return 0; + + rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN); +- zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc); ++ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); + return rc; + } + +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index c531e3f3269e..0461ab257df6 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -1892,8 +1892,8 @@ static __initconst const u64 tnt_hw_cache_extra_regs + + static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ +- INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0), +- INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1), ++ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0), ++ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1), + EVENT_EXTRA_END + }; + +diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h +index 2ee8e469dcf5..162128cdfbf2 100644 +--- a/arch/x86/include/asm/set_memory.h ++++ b/arch/x86/include/asm/set_memory.h +@@ -85,28 +85,35 @@ void set_kernel_text_rw(void); + void set_kernel_text_ro(void); + + #ifdef CONFIG_X86_64 +-static inline int set_mce_nospec(unsigned long pfn) ++/* ++ * Prevent speculative access to the page by either unmapping ++ * it (if we do not require access to any part of the page) or ++ * marking it uncacheable (if we want to try to retrieve data ++ * from non-poisoned lines in the page). ++ */ ++static inline int set_mce_nospec(unsigned long pfn, bool unmap) + { + unsigned long decoy_addr; + int rc; + + /* +- * Mark the linear address as UC to make sure we don't log more +- * errors because of speculative access to the page. + * We would like to just call: +- * set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1); ++ * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1); + * but doing that would radically increase the odds of a + * speculative access to the poison page because we'd have + * the virtual address of the kernel 1:1 mapping sitting + * around in registers. + * Instead we get tricky. We create a non-canonical address + * that looks just like the one we want, but has bit 63 flipped. +- * This relies on set_memory_uc() properly sanitizing any __pa() ++ * This relies on set_memory_XX() properly sanitizing any __pa() + * results with __PHYSICAL_MASK or PTE_PFN_MASK. + */ + decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); + +- rc = set_memory_uc(decoy_addr, 1); ++ if (unmap) ++ rc = set_memory_np(decoy_addr, 1); ++ else ++ rc = set_memory_uc(decoy_addr, 1); + if (rc) + pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); + return rc; +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index c3f4dd4ae155..c553cafd0736 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -1117,8 +1117,7 @@ static const int amd_erratum_383[] = + + /* #1054: Instructions Retired Performance Counter May Be Inaccurate */ + static const int amd_erratum_1054[] = +- AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); +- ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); + + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 3c3f3e02683a..acbf3dbb8bf2 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -581,7 +581,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline); + static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + SPECTRE_V2_NONE; + +-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = ++static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = ++ SPECTRE_V2_USER_NONE; ++static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = + SPECTRE_V2_USER_NONE; + + #ifdef CONFIG_RETPOLINE +@@ -727,15 +729,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) + break; + } + +- /* +- * At this point, an STIBP mode other than "off" has been set. +- * If STIBP support is not being forced, check if STIBP always-on +- * is preferred. +- */ +- if (mode != SPECTRE_V2_USER_STRICT && +- boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) +- mode = SPECTRE_V2_USER_STRICT_PREFERRED; +- + /* Initialize Indirect Branch Prediction Barrier */ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); +@@ -758,23 +751,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) + pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", + static_key_enabled(&switch_mm_always_ibpb) ? + "always-on" : "conditional"); ++ ++ spectre_v2_user_ibpb = mode; + } + +- /* If enhanced IBRS is enabled no STIBP required */ +- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) ++ /* ++ * If enhanced IBRS is enabled or SMT impossible, STIBP is not ++ * required. ++ */ ++ if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + /* +- * If SMT is not possible or STIBP is not available clear the STIBP +- * mode. ++ * At this point, an STIBP mode other than "off" has been set. ++ * If STIBP support is not being forced, check if STIBP always-on ++ * is preferred. + */ +- if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) ++ if (mode != SPECTRE_V2_USER_STRICT && ++ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) ++ mode = SPECTRE_V2_USER_STRICT_PREFERRED; ++ ++ /* ++ * If STIBP is not available, clear the STIBP mode. ++ */ ++ if (!boot_cpu_has(X86_FEATURE_STIBP)) + mode = SPECTRE_V2_USER_NONE; ++ ++ spectre_v2_user_stibp = mode; ++ + set_mode: +- spectre_v2_user = mode; +- /* Only print the STIBP mode when SMT possible */ +- if (smt_possible) +- pr_info("%s\n", spectre_v2_user_strings[mode]); ++ pr_info("%s\n", spectre_v2_user_strings[mode]); + } + + static const char * const spectre_v2_strings[] = { +@@ -1007,7 +1013,7 @@ void cpu_bugs_smt_update(void) + { + mutex_lock(&spec_ctrl_mutex); + +- switch (spectre_v2_user) { ++ switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + break; + case SPECTRE_V2_USER_STRICT: +@@ -1250,14 +1256,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + { + switch (ctrl) { + case PR_SPEC_ENABLE: +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return 0; + /* + * Indirect branch speculation is always disabled in strict +- * mode. ++ * mode. It can neither be enabled if it was force-disabled ++ * by a previous prctl call. ++ + */ +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT || +- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || ++ task_spec_ib_force_disable(task)) + return -EPERM; + task_clear_spec_ib_disable(task); + task_update_spec_tif(task); +@@ -1268,10 +1279,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + * Indirect branch speculation is always allowed when + * mitigation is force disabled. + */ +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return -EPERM; +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT || +- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + return 0; + task_set_spec_ib_disable(task); + if (ctrl == PR_SPEC_FORCE_DISABLE) +@@ -1302,7 +1315,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) + { + if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) + ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); +- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) + ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); + } + #endif +@@ -1333,22 +1347,24 @@ static int ib_prctl_get(struct task_struct *task) + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return PR_SPEC_NOT_AFFECTED; + +- switch (spectre_v2_user) { +- case SPECTRE_V2_USER_NONE: ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_PRCTL: +- case SPECTRE_V2_USER_SECCOMP: ++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) ++ return PR_SPEC_DISABLE; ++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) { + if (task_spec_ib_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ib_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_STRICT: +- case SPECTRE_V2_USER_STRICT_PREFERRED: +- return PR_SPEC_DISABLE; +- default: ++ } else + return PR_SPEC_NOT_AFFECTED; +- } + } + + int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +@@ -1587,7 +1603,7 @@ static char *stibp_state(void) + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return ""; + +- switch (spectre_v2_user) { ++ switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + return ", STIBP: disabled"; + case SPECTRE_V2_USER_STRICT: +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c +index aecb15ba66cd..fd76e3733dd3 100644 +--- a/arch/x86/kernel/cpu/mce/core.c ++++ b/arch/x86/kernel/cpu/mce/core.c +@@ -533,6 +533,13 @@ bool mce_is_memory_error(struct mce *m) + } + EXPORT_SYMBOL_GPL(mce_is_memory_error); + ++static bool whole_page(struct mce *m) ++{ ++ if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV)) ++ return true; ++ return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT; ++} ++ + bool mce_is_correctable(struct mce *m) + { + if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) +@@ -601,7 +608,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, + if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { + pfn = mce->addr >> PAGE_SHIFT; + if (!memory_failure(pfn, 0)) +- set_mce_nospec(pfn); ++ set_mce_nospec(pfn, whole_page(mce)); + } + + return NOTIFY_OK; +@@ -1103,7 +1110,7 @@ static int do_memory_failure(struct mce *m) + if (ret) + pr_err("Memory error not recovered"); + else +- set_mce_nospec(m->addr >> PAGE_SHIFT); ++ set_mce_nospec(m->addr >> PAGE_SHIFT, whole_page(m)); + return ret; + } + +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index 5e94c4354d4e..571e38c9ee1d 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -428,28 +428,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, + + lockdep_assert_irqs_disabled(); + +- /* +- * If TIF_SSBD is different, select the proper mitigation +- * method. Note that if SSBD mitigation is disabled or permanentely +- * enabled this branch can't be taken because nothing can set +- * TIF_SSBD. +- */ +- if (tif_diff & _TIF_SSBD) { +- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ /* Handle change of TIF_SSBD depending on the mitigation method. */ ++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_ssb_virt_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_core_ssb_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || +- static_cpu_has(X86_FEATURE_AMD_SSBD)) { +- msr |= ssbd_tif_to_spec_ctrl(tifn); +- updmsr = true; +- } ++ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || ++ static_cpu_has(X86_FEATURE_AMD_SSBD)) { ++ updmsr |= !!(tif_diff & _TIF_SSBD); ++ msr |= ssbd_tif_to_spec_ctrl(tifn); + } + +- /* +- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, +- * otherwise avoid the MSR write. +- */ ++ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ + if (IS_ENABLED(CONFIG_SMP) && + static_branch_unlikely(&switch_to_cond_stibp)) { + updmsr |= !!(tif_diff & _TIF_SPEC_IB); +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c +index 0cc7c0b106bb..762f5c1465a6 100644 +--- a/arch/x86/kernel/reboot.c ++++ b/arch/x86/kernel/reboot.c +@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), + }, + }, ++ { /* Handle problems with rebooting on Apple MacBook6,1 */ ++ .callback = set_pci_reboot, ++ .ident = "Apple MacBook6,1", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), ++ }, ++ }, + { /* Handle problems with rebooting on Apple MacBookPro5 */ + .callback = set_pci_reboot, + .ident = "Apple MacBookPro5", +diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c +index d8673d8a779b..36a585b80d9e 100644 +--- a/arch/x86/kernel/time.c ++++ b/arch/x86/kernel/time.c +@@ -25,10 +25,6 @@ + #include + #include + +-#ifdef CONFIG_X86_64 +-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES; +-#endif +- + unsigned long profile_pc(struct pt_regs *regs) + { + unsigned long pc = instruction_pointer(regs); +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S +index e2feacf921a0..bac1a65a9d39 100644 +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -36,13 +36,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) + #ifdef CONFIG_X86_32 + OUTPUT_ARCH(i386) + ENTRY(phys_startup_32) +-jiffies = jiffies_64; + #else + OUTPUT_ARCH(i386:x86-64) + ENTRY(phys_startup_64) +-jiffies_64 = jiffies; + #endif + ++jiffies = jiffies_64; ++ + #if defined(CONFIG_X86_64) + /* + * On 64-bit, align RODATA to 2MB so we retain large page mappings for +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 518100ea5ef4..a3824ae9a634 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -343,6 +343,8 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) + { + BUG_ON((u64)(unsigned)access_mask != access_mask); + BUG_ON((mmio_mask & mmio_value) != mmio_value); ++ WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); ++ WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); + shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; + shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; + shadow_mmio_access_mask = access_mask; +@@ -580,16 +582,15 @@ static void kvm_mmu_reset_all_pte_masks(void) + * the most significant bits of legal physical address space. + */ + shadow_nonpresent_or_rsvd_mask = 0; +- low_phys_bits = boot_cpu_data.x86_cache_bits; +- if (boot_cpu_data.x86_cache_bits < +- 52 - shadow_nonpresent_or_rsvd_mask_len) { ++ low_phys_bits = boot_cpu_data.x86_phys_bits; ++ if (boot_cpu_has_bug(X86_BUG_L1TF) && ++ !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= ++ 52 - shadow_nonpresent_or_rsvd_mask_len)) { ++ low_phys_bits = boot_cpu_data.x86_cache_bits ++ - shadow_nonpresent_or_rsvd_mask_len; + shadow_nonpresent_or_rsvd_mask = +- rsvd_bits(boot_cpu_data.x86_cache_bits - +- shadow_nonpresent_or_rsvd_mask_len, +- boot_cpu_data.x86_cache_bits - 1); +- low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; +- } else +- WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF)); ++ rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); ++ } + + shadow_nonpresent_or_rsvd_lower_gfn_mask = + GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); +@@ -6247,25 +6248,16 @@ static void kvm_set_mmio_spte_mask(void) + u64 mask; + + /* +- * Set the reserved bits and the present bit of an paging-structure +- * entry to generate page fault with PFER.RSV = 1. +- */ +- +- /* +- * Mask the uppermost physical address bit, which would be reserved as +- * long as the supported physical address width is less than 52. ++ * Set a reserved PA bit in MMIO SPTEs to generate page faults with ++ * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT ++ * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports ++ * 52-bit physical addresses then there are no reserved PA bits in the ++ * PTEs and so the reserved PA approach must be disabled. + */ +- mask = 1ull << 51; +- +- /* Set the present bit. */ +- mask |= 1ull; +- +- /* +- * If reserved bit is not supported, clear the present bit to disable +- * mmio page fault. +- */ +- if (shadow_phys_bits == 52) +- mask &= ~1ull; ++ if (shadow_phys_bits < 52) ++ mask = BIT_ULL(51) | PT_PRESENT_MASK; ++ else ++ mask = 0; + + kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); + } +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index cc7da664fd39..3243a80ea32c 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -3237,8 +3237,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) + return NESTED_EXIT_HOST; + break; + case SVM_EXIT_EXCP_BASE + PF_VECTOR: +- /* When we're shadowing, trap PFs, but not async PF */ +- if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0) ++ /* Trap async PF even if not shadowing */ ++ if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason) + return NESTED_EXIT_HOST; + break; + default: +@@ -3327,7 +3327,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr + dst->iopm_base_pa = from->iopm_base_pa; + dst->msrpm_base_pa = from->msrpm_base_pa; + dst->tsc_offset = from->tsc_offset; +- dst->asid = from->asid; ++ /* asid not copied, it is handled manually for svm->vmcb. */ + dst->tlb_ctl = from->tlb_ctl; + dst->int_ctl = from->int_ctl; + dst->int_vector = from->int_vector; +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index 4a09f40b24dc..a460ddf04d60 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -302,7 +302,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) + cpu = get_cpu(); + prev = vmx->loaded_vmcs; + vmx->loaded_vmcs = vmcs; +- vmx_vcpu_load_vmcs(vcpu, cpu); ++ vmx_vcpu_load_vmcs(vcpu, cpu, prev); + vmx_sync_vmcs_host_state(vmx, prev); + put_cpu(); + +@@ -5357,7 +5357,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) + vmcs_read32(VM_EXIT_INTR_ERROR_CODE), + KVM_ISA_VMX); + +- switch (exit_reason) { ++ switch ((u16)exit_reason) { + case EXIT_REASON_EXCEPTION_NMI: + if (is_nmi(intr_info)) + return false; +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 7a2c05277f4c..5fac01865a2d 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -1286,10 +1286,12 @@ after_clear_sn: + pi_set_on(pi_desc); + } + +-void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) ++void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, ++ struct loaded_vmcs *buddy) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); + bool already_loaded = vmx->loaded_vmcs->cpu == cpu; ++ struct vmcs *prev; + + if (!already_loaded) { + loaded_vmcs_clear(vmx->loaded_vmcs); +@@ -1308,10 +1310,18 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) + local_irq_enable(); + } + +- if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { ++ prev = per_cpu(current_vmcs, cpu); ++ if (prev != vmx->loaded_vmcs->vmcs) { + per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; + vmcs_load(vmx->loaded_vmcs->vmcs); +- indirect_branch_prediction_barrier(); ++ ++ /* ++ * No indirect branch prediction barrier needed when switching ++ * the active VMCS within a guest, e.g. on nested VM-Enter. ++ * The L1 VMM can protect itself with retpolines, IBPB or IBRS. ++ */ ++ if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) ++ indirect_branch_prediction_barrier(); + } + + if (!already_loaded) { +@@ -1356,7 +1366,7 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); + +- vmx_vcpu_load_vmcs(vcpu, cpu); ++ vmx_vcpu_load_vmcs(vcpu, cpu, NULL); + + vmx_vcpu_pi_load(vcpu, cpu); + +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h +index 5a0f34b1e226..295c5f83842e 100644 +--- a/arch/x86/kvm/vmx/vmx.h ++++ b/arch/x86/kvm/vmx/vmx.h +@@ -304,7 +304,8 @@ struct kvm_vmx { + }; + + bool nested_vmx_allowed(struct kvm_vcpu *vcpu); +-void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu); ++void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, ++ struct loaded_vmcs *buddy); + void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); + int allocate_vpid(void); + void free_vpid(int vpid); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index c6d9e363dfc0..fff279fb173b 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -6833,7 +6833,7 @@ restart: + if (!ctxt->have_exception || + exception_type(ctxt->exception.vector) == EXCPT_TRAP) { + kvm_rip_write(vcpu, ctxt->eip); +- if (r && ctxt->tf) ++ if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) + r = kvm_vcpu_do_singlestep(vcpu); + __kvm_set_rflags(vcpu, ctxt->eflags); + } +@@ -7978,9 +7978,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) + kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); + } + +-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, +- unsigned long start, unsigned long end, +- bool blockable) ++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, ++ unsigned long start, unsigned long end) + { + unsigned long apic_address; + +@@ -7991,8 +7990,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); + if (start <= apic_address && apic_address < end) + kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); +- +- return 0; + } + + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c +index e723559c386a..0c67a5a94de3 100644 +--- a/arch/x86/pci/fixup.c ++++ b/arch/x86/pci/fixup.c +@@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar); + + /* + * Device [1022:7808] +diff --git a/crypto/algapi.c b/crypto/algapi.c +index bb8329e49956..fff52bc9d97d 100644 +--- a/crypto/algapi.c ++++ b/crypto/algapi.c +@@ -374,7 +374,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval) + err = wait_for_completion_killable(&larval->completion); + WARN_ON(err); + if (!err) +- crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval); ++ crypto_notify(CRYPTO_MSG_ALG_LOADED, larval); + + out: + crypto_larval_kill(&larval->alg); +diff --git a/crypto/drbg.c b/crypto/drbg.c +index b6929eb5f565..04379ca624cd 100644 +--- a/crypto/drbg.c ++++ b/crypto/drbg.c +@@ -1294,8 +1294,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) + if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { + drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags), + GFP_KERNEL); +- if (!drbg->prev) ++ if (!drbg->prev) { ++ ret = -ENOMEM; + goto fini; ++ } + drbg->fips_primed = false; + } + +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index a1a858ad4d18..f9b1a2abdbe2 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -865,6 +865,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) + "acpi_cppc"); + if (ret) { + per_cpu(cpc_desc_ptr, pr->id) = NULL; ++ kobject_put(&cpc_ptr->kobj); + goto out_free; + } + +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c +index ea9ecf3d70c2..1a5956fb2cbc 100644 +--- a/drivers/acpi/device_pm.c ++++ b/drivers/acpi/device_pm.c +@@ -186,7 +186,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) + * possibly drop references to the power resources in use. + */ + state = ACPI_STATE_D3_HOT; +- /* If _PR3 is not available, use D3hot as the target state. */ ++ /* If D3cold is not supported, use D3hot as the target state. */ + if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) + target_state = state; + } else if (!device->power.states[state].flags.valid) { +diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c +index aba0d0027586..6d7a522952bf 100644 +--- a/drivers/acpi/evged.c ++++ b/drivers/acpi/evged.c +@@ -79,6 +79,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, + struct resource r; + struct acpi_resource_irq *p = &ares->data.irq; + struct acpi_resource_extended_irq *pext = &ares->data.extended_irq; ++ char ev_name[5]; ++ u8 trigger; + + if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) + return AE_OK; +@@ -87,14 +89,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, + dev_err(dev, "unable to parse IRQ resource\n"); + return AE_ERROR; + } +- if (ares->type == ACPI_RESOURCE_TYPE_IRQ) ++ if (ares->type == ACPI_RESOURCE_TYPE_IRQ) { + gsi = p->interrupts[0]; +- else ++ trigger = p->triggering; ++ } else { + gsi = pext->interrupts[0]; ++ trigger = p->triggering; ++ } + + irq = r.start; + +- if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) { ++ switch (gsi) { ++ case 0 ... 255: ++ sprintf(ev_name, "_%c%02hhX", ++ trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); ++ ++ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) ++ break; ++ /* fall through */ ++ default: ++ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) ++ break; ++ + dev_err(dev, "cannot locate _EVT method\n"); + return AE_ERROR; + } +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 915650bf519f..2527938a30b5 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -919,12 +919,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) + + if (buffer.length && package + && package->type == ACPI_TYPE_PACKAGE +- && package->package.count) { +- int err = acpi_extract_power_resources(package, 0, +- &ps->resources); +- if (!err) +- device->power.flags.power_resources = 1; +- } ++ && package->package.count) ++ acpi_extract_power_resources(package, 0, &ps->resources); ++ + ACPI_FREE(buffer.pointer); + } + +@@ -971,14 +968,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) + acpi_bus_init_power_state(device, i); + + INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); +- if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) +- device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; + +- /* Set defaults for D0 and D3hot states (always valid) */ ++ /* Set the defaults for D0 and D3hot (always supported). */ + device->power.states[ACPI_STATE_D0].flags.valid = 1; + device->power.states[ACPI_STATE_D0].power = 100; + device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; + ++ /* ++ * Use power resources only if the D0 list of them is populated, because ++ * some platforms may provide _PR3 only to indicate D3cold support and ++ * in those cases the power resources list returned by it may be bogus. ++ */ ++ if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { ++ device->power.flags.power_resources = 1; ++ /* ++ * D3cold is supported if the D3hot list of power resources is ++ * not empty. ++ */ ++ if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) ++ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; ++ } ++ + if (acpi_bus_init_power(device)) + device->flags.power_manageable = 0; + } +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c +index c60d2c6d31d6..3a89909b50a6 100644 +--- a/drivers/acpi/sysfs.c ++++ b/drivers/acpi/sysfs.c +@@ -993,8 +993,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, + + error = kobject_init_and_add(&hotplug->kobj, + &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); +- if (error) ++ if (error) { ++ kobject_put(&hotplug->kobj); + goto err_out; ++ } + + kobject_uevent(&hotplug->kobj, KOBJ_ADD); + return; +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c +index f19a03b62365..ac97a1e2e5dd 100644 +--- a/drivers/block/floppy.c ++++ b/drivers/block/floppy.c +@@ -2902,17 +2902,17 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx, + (unsigned long long) current_req->cmd_flags)) + return BLK_STS_IOERR; + +- spin_lock_irq(&floppy_lock); +- list_add_tail(&bd->rq->queuelist, &floppy_reqs); +- spin_unlock_irq(&floppy_lock); +- + if (test_and_set_bit(0, &fdc_busy)) { + /* fdc busy, this new request will be treated when the + current one is done */ + is_alive(__func__, "old request running"); +- return BLK_STS_OK; ++ return BLK_STS_RESOURCE; + } + ++ spin_lock_irq(&floppy_lock); ++ list_add_tail(&bd->rq->queuelist, &floppy_reqs); ++ spin_unlock_irq(&floppy_lock); ++ + command_status = FD_COMMAND_NONE; + __reschedule_timeout(MAXTIMEOUT, "fd_request"); + set_fdc(0); +diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c +index c6271ce250b3..b161bdf60000 100644 +--- a/drivers/char/agp/intel-gtt.c ++++ b/drivers/char/agp/intel-gtt.c +@@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr, + unsigned int flags) + { + intel_private.driver->write_entry(addr, pg, flags); ++ readl(intel_private.gtt + pg); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); + } +@@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, + j++; + } + } +- wmb(); ++ readl(intel_private.gtt + j - 1); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); + } +@@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void) + + static void i9xx_chipset_flush(void) + { ++ wmb(); + if (intel_private.i9xx_flush_page) + writel(1, intel_private.i9xx_flush_page); + } +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c +index 9728d1282e43..36e9f38a3882 100644 +--- a/drivers/clk/clk.c ++++ b/drivers/clk/clk.c +@@ -114,7 +114,11 @@ static int clk_pm_runtime_get(struct clk_core *core) + return 0; + + ret = pm_runtime_get_sync(core->dev); +- return ret < 0 ? ret : 0; ++ if (ret < 0) { ++ pm_runtime_put_noidle(core->dev); ++ return ret; ++ } ++ return 0; + } + + static void clk_pm_runtime_put(struct clk_core *core) +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 35f8e098e9fa..fa988bd1e606 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -2507,26 +2507,27 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits); + static int cpufreq_boost_set_sw(int state) + { + struct cpufreq_policy *policy; +- int ret = -EINVAL; + + for_each_active_policy(policy) { ++ int ret; ++ + if (!policy->freq_table) +- continue; ++ return -ENXIO; + + ret = cpufreq_frequency_table_cpuinfo(policy, + policy->freq_table); + if (ret) { + pr_err("%s: Policy frequency update failed\n", + __func__); +- break; ++ return ret; + } + + ret = freq_qos_update_request(policy->max_freq_req, policy->max); + if (ret < 0) +- break; ++ return ret; + } + +- return ret; ++ return 0; + } + + int cpufreq_boost_trigger_state(int state) +diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c +index c4632d84c9a1..637be2f903d3 100644 +--- a/drivers/crypto/cavium/nitrox/nitrox_main.c ++++ b/drivers/crypto/cavium/nitrox/nitrox_main.c +@@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev) + + struct nitrox_device *nitrox_get_first_device(void) + { +- struct nitrox_device *ndev = NULL; ++ struct nitrox_device *ndev; + + mutex_lock(&devlist_lock); + list_for_each_entry(ndev, &ndevlist, list) { +@@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void) + break; + } + mutex_unlock(&devlist_lock); +- if (!ndev) ++ if (&ndev->list == &ndevlist) + return NULL; + + refcount_inc(&ndev->refcnt); +diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c +index 82b316b2f537..ac420b201dd8 100644 +--- a/drivers/crypto/virtio/virtio_crypto_algs.c ++++ b/drivers/crypto/virtio/virtio_crypto_algs.c +@@ -353,13 +353,18 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, + int err; + unsigned long flags; + struct scatterlist outhdr, iv_sg, status_sg, **sgs; +- int i; + u64 dst_len; + unsigned int num_out = 0, num_in = 0; + int sg_total; + uint8_t *iv; ++ struct scatterlist *sg; + + src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (src_nents < 0) { ++ pr_err("Invalid number of src SG.\n"); ++ return src_nents; ++ } ++ + dst_nents = sg_nents(req->dst); + + pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n", +@@ -405,6 +410,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, + goto free; + } + ++ dst_len = min_t(unsigned int, req->nbytes, dst_len); + pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n", + req->nbytes, dst_len); + +@@ -445,12 +451,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, + vc_sym_req->iv = iv; + + /* Source data */ +- for (i = 0; i < src_nents; i++) +- sgs[num_out++] = &req->src[i]; ++ for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--) ++ sgs[num_out++] = sg; + + /* Destination data */ +- for (i = 0; i < dst_nents; i++) +- sgs[num_out + num_in++] = &req->dst[i]; ++ for (sg = req->dst; sg; sg = sg_next(sg)) ++ sgs[num_out + num_in++] = sg; + + /* Status */ + sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status)); +@@ -580,10 +586,11 @@ static void virtio_crypto_ablkcipher_finalize_req( + scatterwalk_map_and_copy(req->info, req->dst, + req->nbytes - AES_BLOCK_SIZE, + AES_BLOCK_SIZE, 0); +- crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine, +- req, err); + kzfree(vc_sym_req->iv); + virtcrypto_clear_request(&vc_sym_req->base); ++ ++ crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine, ++ req, err); + } + + static struct virtio_crypto_algo virtio_crypto_algs[] = { { +diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c +index c370d5457e6b..c0c5b6ecdb2e 100644 +--- a/drivers/edac/i10nm_base.c ++++ b/drivers/edac/i10nm_base.c +@@ -162,7 +162,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci) + mtr, mcddrtcfg, imc->mc, i, j); + + if (IS_DIMM_PRESENT(mtr)) +- ndimms += skx_get_dimm_info(mtr, 0, dimm, ++ ndimms += skx_get_dimm_info(mtr, 0, 0, dimm, + imc, i, j); + else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) + ndimms += skx_get_nvdimm_info(dimm, imc, i, j, +diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c +index 0fcf3785e8f3..77cd370bd62f 100644 +--- a/drivers/edac/skx_base.c ++++ b/drivers/edac/skx_base.c +@@ -151,27 +151,23 @@ static const struct x86_cpu_id skx_cpuids[] = { + }; + MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); + +-#define SKX_GET_MTMTR(dev, reg) \ +- pci_read_config_dword((dev), 0x87c, &(reg)) +- +-static bool skx_check_ecc(struct pci_dev *pdev) ++static bool skx_check_ecc(u32 mcmtr) + { +- u32 mtmtr; +- +- SKX_GET_MTMTR(pdev, mtmtr); +- +- return !!GET_BITFIELD(mtmtr, 2, 2); ++ return !!GET_BITFIELD(mcmtr, 2, 2); + } + + static int skx_get_dimm_config(struct mem_ctl_info *mci) + { + struct skx_pvt *pvt = mci->pvt_info; ++ u32 mtr, mcmtr, amap, mcddrtcfg; + struct skx_imc *imc = pvt->imc; +- u32 mtr, amap, mcddrtcfg; + struct dimm_info *dimm; + int i, j; + int ndimms; + ++ /* Only the mcmtr on the first channel is effective */ ++ pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr); ++ + for (i = 0; i < SKX_NUM_CHANNELS; i++) { + ndimms = 0; + pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); +@@ -182,14 +178,14 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci) + pci_read_config_dword(imc->chan[i].cdev, + 0x80 + 4 * j, &mtr); + if (IS_DIMM_PRESENT(mtr)) { +- ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j); ++ ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j); + } else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) { + ndimms += skx_get_nvdimm_info(dimm, imc, i, j, + EDAC_MOD_STR); + nvdimm_count++; + } + } +- if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { ++ if (ndimms && !skx_check_ecc(mcmtr)) { + skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); + return -ENODEV; + } +diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c +index a04349c6d17e..2177ad765bd1 100644 +--- a/drivers/edac/skx_common.c ++++ b/drivers/edac/skx_common.c +@@ -283,7 +283,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add, + #define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows") + #define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols") + +-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, ++int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, + struct skx_imc *imc, int chan, int dimmno) + { + int banks = 16, ranks, rows, cols, npages; +@@ -303,8 +303,8 @@ int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, + imc->mc, chan, dimmno, size, npages, + banks, 1 << ranks, rows, cols); + +- imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); +- imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); ++ imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0); ++ imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9); + imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); + imc->chan[chan].dimms[dimmno].rowbits = rows; + imc->chan[chan].dimms[dimmno].colbits = cols; +diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h +index 08cc971a50ea..fed337c12954 100644 +--- a/drivers/edac/skx_common.h ++++ b/drivers/edac/skx_common.h +@@ -126,7 +126,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type, + + int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm); + +-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, ++int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, + struct skx_imc *imc, int chan, int dimmno); + + int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index aff3dfb4d7ba..d187585db97a 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -522,8 +522,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, + NULL, "%s", short_name); + kfree(short_name); +- if (ret) ++ if (ret) { ++ kobject_put(&new_var->kobj); + return ret; ++ } + + kobject_uevent(&new_var->kobj, KOBJ_ADD); + if (efivar_entry_add(new_var, &efivar_sysfs_list)) { +diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c +index 35a5f8f8eea5..e48d971ffb61 100644 +--- a/drivers/firmware/imx/imx-scu.c ++++ b/drivers/firmware/imx/imx-scu.c +@@ -38,6 +38,7 @@ struct imx_sc_ipc { + struct device *dev; + struct mutex lock; + struct completion done; ++ bool fast_ipc; + + /* temporarily store the SCU msg */ + u32 *msg; +@@ -115,6 +116,26 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg) + struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc; + struct imx_sc_rpc_msg *hdr; + u32 *data = msg; ++ int i; ++ ++ if (!sc_ipc->msg) { ++ dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n", ++ sc_chan->idx, *data); ++ return; ++ } ++ ++ if (sc_ipc->fast_ipc) { ++ hdr = msg; ++ sc_ipc->rx_size = hdr->size; ++ sc_ipc->msg[0] = *data++; ++ ++ for (i = 1; i < sc_ipc->rx_size; i++) ++ sc_ipc->msg[i] = *data++; ++ ++ complete(&sc_ipc->done); ++ ++ return; ++ } + + if (sc_chan->idx == 0) { + hdr = msg; +@@ -137,20 +158,22 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg) + + static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg) + { +- struct imx_sc_rpc_msg *hdr = msg; ++ struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg; + struct imx_sc_chan *sc_chan; + u32 *data = msg; + int ret; ++ int size; + int i; + + /* Check size */ +- if (hdr->size > IMX_SC_RPC_MAX_MSG) ++ if (hdr.size > IMX_SC_RPC_MAX_MSG) + return -EINVAL; + +- dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc, +- hdr->func, hdr->size); ++ dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc, ++ hdr.func, hdr.size); + +- for (i = 0; i < hdr->size; i++) { ++ size = sc_ipc->fast_ipc ? 1 : hdr.size; ++ for (i = 0; i < size; i++) { + sc_chan = &sc_ipc->chans[i % 4]; + + /* +@@ -162,8 +185,10 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg) + * Wait for tx_done before every send to ensure that no + * queueing happens at the mailbox channel level. + */ +- wait_for_completion(&sc_chan->tx_done); +- reinit_completion(&sc_chan->tx_done); ++ if (!sc_ipc->fast_ipc) { ++ wait_for_completion(&sc_chan->tx_done); ++ reinit_completion(&sc_chan->tx_done); ++ } + + ret = mbox_send_message(sc_chan->ch, &data[i]); + if (ret < 0) +@@ -187,7 +212,8 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp) + mutex_lock(&sc_ipc->lock); + reinit_completion(&sc_ipc->done); + +- sc_ipc->msg = msg; ++ if (have_resp) ++ sc_ipc->msg = msg; + sc_ipc->count = 0; + ret = imx_scu_ipc_write(sc_ipc, msg); + if (ret < 0) { +@@ -209,6 +235,7 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp) + } + + out: ++ sc_ipc->msg = NULL; + mutex_unlock(&sc_ipc->lock); + + dev_dbg(sc_ipc->dev, "RPC SVC done\n"); +@@ -224,6 +251,8 @@ static int imx_scu_probe(struct platform_device *pdev) + struct imx_sc_chan *sc_chan; + struct mbox_client *cl; + char *chan_name; ++ struct of_phandle_args args; ++ int num_channel; + int ret; + int i; + +@@ -231,11 +260,20 @@ static int imx_scu_probe(struct platform_device *pdev) + if (!sc_ipc) + return -ENOMEM; + +- for (i = 0; i < SCU_MU_CHAN_NUM; i++) { +- if (i < 4) ++ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes", ++ "#mbox-cells", 0, &args); ++ if (ret) ++ return ret; ++ ++ sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu"); ++ ++ num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM; ++ for (i = 0; i < num_channel; i++) { ++ if (i < num_channel / 2) + chan_name = kasprintf(GFP_KERNEL, "tx%d", i); + else +- chan_name = kasprintf(GFP_KERNEL, "rx%d", i - 4); ++ chan_name = kasprintf(GFP_KERNEL, "rx%d", ++ i - num_channel / 2); + + if (!chan_name) + return -ENOMEM; +@@ -247,13 +285,15 @@ static int imx_scu_probe(struct platform_device *pdev) + cl->knows_txdone = true; + cl->rx_callback = imx_scu_rx_callback; + +- /* Initial tx_done completion as "done" */ +- cl->tx_done = imx_scu_tx_done; +- init_completion(&sc_chan->tx_done); +- complete(&sc_chan->tx_done); ++ if (!sc_ipc->fast_ipc) { ++ /* Initial tx_done completion as "done" */ ++ cl->tx_done = imx_scu_tx_done; ++ init_completion(&sc_chan->tx_done); ++ complete(&sc_chan->tx_done); ++ } + + sc_chan->sc_ipc = sc_ipc; +- sc_chan->idx = i % 4; ++ sc_chan->idx = i % (num_channel / 2); + sc_chan->ch = mbox_request_channel_byname(cl, chan_name); + if (IS_ERR(sc_chan->ch)) { + ret = PTR_ERR(sc_chan->ch); +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +index 968d9b2705d0..6d0cc90401c0 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +@@ -619,6 +619,14 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) + GFP_KERNEL | + __GFP_NORETRY | + __GFP_NOWARN); ++ /* ++ * Using __get_user_pages_fast() with a read-only ++ * access is questionable. A read-only page may be ++ * COW-broken, and then this might end up giving ++ * the wrong side of the COW.. ++ * ++ * We may or may not care. ++ */ + if (pvec) /* defer to worker if malloc fails */ + pinned = __get_user_pages_fast(obj->userptr.ptr, + num_pages, +diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h +index 5a95100fa18b..03b05c54722d 100644 +--- a/drivers/gpu/drm/vkms/vkms_drv.h ++++ b/drivers/gpu/drm/vkms/vkms_drv.h +@@ -121,11 +121,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, + enum drm_plane_type type, int index); + + /* Gem stuff */ +-struct drm_gem_object *vkms_gem_create(struct drm_device *dev, +- struct drm_file *file, +- u32 *handle, +- u64 size); +- + vm_fault_t vkms_gem_fault(struct vm_fault *vmf); + + int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, +diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c +index 6489bfe0a149..8ba8b87d0c99 100644 +--- a/drivers/gpu/drm/vkms/vkms_gem.c ++++ b/drivers/gpu/drm/vkms/vkms_gem.c +@@ -95,10 +95,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf) + return ret; + } + +-struct drm_gem_object *vkms_gem_create(struct drm_device *dev, +- struct drm_file *file, +- u32 *handle, +- u64 size) ++static struct drm_gem_object *vkms_gem_create(struct drm_device *dev, ++ struct drm_file *file, ++ u32 *handle, ++ u64 size) + { + struct vkms_gem_object *obj; + int ret; +@@ -111,7 +111,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev, + return ERR_CAST(obj); + + ret = drm_gem_handle_create(file, &obj->gem, handle); +- drm_gem_object_put_unlocked(&obj->gem); + if (ret) + return ERR_PTR(ret); + +@@ -140,6 +139,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, + args->size = gem_obj->size; + args->pitch = pitch; + ++ drm_gem_object_put_unlocked(gem_obj); ++ + DRM_DEBUG_DRIVER("Created object of size %lld\n", size); + + return 0; +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c +index f2a2d1246c19..adb08c3fc085 100644 +--- a/drivers/infiniband/core/uverbs_main.c ++++ b/drivers/infiniband/core/uverbs_main.c +@@ -307,6 +307,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, + spin_lock_irq(&ev_queue->lock); + if (!list_empty(&ev_queue->event_list)) + pollflags = EPOLLIN | EPOLLRDNORM; ++ else if (ev_queue->is_closed) ++ pollflags = EPOLLERR; + spin_unlock_irq(&ev_queue->lock); + + return pollflags; +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index 4d2036209b45..758dae8d6500 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = { + "LEN005b", /* P50 */ + "LEN005e", /* T560 */ + "LEN006c", /* T470s */ ++ "LEN007a", /* T470s */ + "LEN0071", /* T480 */ + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ + "LEN0073", /* X1 Carbon G5 (Elantech) */ +diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c +index a5ab774da4cc..fca908ba4841 100644 +--- a/drivers/input/touchscreen/mms114.c ++++ b/drivers/input/touchscreen/mms114.c +@@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg, + if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL) + BUG(); + +- /* Write register: use repeated start */ ++ /* Write register */ + xfer[0].addr = client->addr; +- xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART; ++ xfer[0].flags = client->flags & I2C_M_TEN; + xfer[0].len = 1; + xfer[0].buf = &buf; + + /* Read data */ + xfer[1].addr = client->addr; +- xfer[1].flags = I2C_M_RD; ++ xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD; + xfer[1].len = len; + xfer[1].buf = val; + +@@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client, + const void *match_data; + int error; + +- if (!i2c_check_functionality(client->adapter, +- I2C_FUNC_PROTOCOL_MANGLING)) { +- dev_err(&client->dev, +- "Need i2c bus that supports protocol mangling\n"); ++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { ++ dev_err(&client->dev, "Not supported I2C adapter\n"); + return -ENODEV; + } + +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c +index ebb387aa5158..20eed28ea60d 100644 +--- a/drivers/mmc/core/sdio.c ++++ b/drivers/mmc/core/sdio.c +@@ -584,7 +584,7 @@ try_again: + */ + err = mmc_send_io_op_cond(host, ocr, &rocr); + if (err) +- goto err; ++ return err; + + /* + * For SPI, enable CRC as appropriate. +@@ -592,17 +592,15 @@ try_again: + if (mmc_host_is_spi(host)) { + err = mmc_spi_set_crc(host, use_spi_crc); + if (err) +- goto err; ++ return err; + } + + /* + * Allocate card structure. + */ + card = mmc_alloc_card(host, NULL); +- if (IS_ERR(card)) { +- err = PTR_ERR(card); +- goto err; +- } ++ if (IS_ERR(card)) ++ return PTR_ERR(card); + + if ((rocr & R4_MEMORY_PRESENT) && + mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) { +@@ -610,19 +608,15 @@ try_again: + + if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || + memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) { +- mmc_remove_card(card); +- pr_debug("%s: Perhaps the card was replaced\n", +- mmc_hostname(host)); +- return -ENOENT; ++ err = -ENOENT; ++ goto mismatch; + } + } else { + card->type = MMC_TYPE_SDIO; + + if (oldcard && oldcard->type != MMC_TYPE_SDIO) { +- mmc_remove_card(card); +- pr_debug("%s: Perhaps the card was replaced\n", +- mmc_hostname(host)); +- return -ENOENT; ++ err = -ENOENT; ++ goto mismatch; + } + } + +@@ -677,7 +671,7 @@ try_again: + if (!oldcard && card->type == MMC_TYPE_SD_COMBO) { + err = mmc_sd_get_csd(host, card); + if (err) +- return err; ++ goto remove; + + mmc_decode_cid(card); + } +@@ -704,7 +698,12 @@ try_again: + mmc_set_timing(card->host, MMC_TIMING_SD_HS); + } + +- goto finish; ++ if (oldcard) ++ mmc_remove_card(card); ++ else ++ host->card = card; ++ ++ return 0; + } + + /* +@@ -718,9 +717,8 @@ try_again: + /* Retry init sequence, but without R4_18V_PRESENT. */ + retries = 0; + goto try_again; +- } else { +- goto remove; + } ++ return err; + } + + /* +@@ -731,16 +729,14 @@ try_again: + goto remove; + + if (oldcard) { +- int same = (card->cis.vendor == oldcard->cis.vendor && +- card->cis.device == oldcard->cis.device); +- mmc_remove_card(card); +- if (!same) { +- pr_debug("%s: Perhaps the card was replaced\n", +- mmc_hostname(host)); +- return -ENOENT; ++ if (card->cis.vendor == oldcard->cis.vendor && ++ card->cis.device == oldcard->cis.device) { ++ mmc_remove_card(card); ++ card = oldcard; ++ } else { ++ err = -ENOENT; ++ goto mismatch; + } +- +- card = oldcard; + } + card->ocr = ocr_card; + mmc_fixup_device(card, sdio_fixup_methods); +@@ -801,16 +797,15 @@ try_again: + err = -EINVAL; + goto remove; + } +-finish: +- if (!oldcard) +- host->card = card; ++ ++ host->card = card; + return 0; + ++mismatch: ++ pr_debug("%s: Perhaps the card was replaced\n", mmc_hostname(host)); + remove: +- if (!oldcard) ++ if (oldcard != card) + mmc_remove_card(card); +- +-err: + return err; + } + +diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c +index 8e83ae6920ae..0953bd8a4f79 100644 +--- a/drivers/mmc/host/mmci_stm32_sdmmc.c ++++ b/drivers/mmc/host/mmci_stm32_sdmmc.c +@@ -162,6 +162,9 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) + static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) + { + writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); ++ ++ if (!data->host_cookie) ++ sdmmc_idma_unprep_data(host, data, 0); + } + + static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired) +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c +index 0148f8e6bb37..8b2a6a362c60 100644 +--- a/drivers/mmc/host/sdhci-msm.c ++++ b/drivers/mmc/host/sdhci-msm.c +@@ -1112,6 +1112,12 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) + /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ + msm_host->use_cdr = true; + ++ /* ++ * Clear tuning_done flag before tuning to ensure proper ++ * HS400 settings. ++ */ ++ msm_host->tuning_done = 0; ++ + /* + * For HS400 tuning in HS200 timing requires: + * - select MCLK/2 in VENDOR_SPEC +diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c +index dec5a99f52cf..25083f010a7a 100644 +--- a/drivers/mmc/host/tmio_mmc_core.c ++++ b/drivers/mmc/host/tmio_mmc_core.c +@@ -1285,12 +1285,14 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) + cancel_work_sync(&host->done); + cancel_delayed_work_sync(&host->delayed_reset_work); + tmio_mmc_release_dma(host); ++ tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); + +- pm_runtime_dont_use_autosuspend(&pdev->dev); + if (host->native_hotplug) + pm_runtime_put_noidle(&pdev->dev); +- pm_runtime_put_sync(&pdev->dev); ++ + pm_runtime_disable(&pdev->dev); ++ pm_runtime_dont_use_autosuspend(&pdev->dev); ++ pm_runtime_put_noidle(&pdev->dev); + } + EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); + +diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c +index 0c72ec5546c3..aec9c8ae694c 100644 +--- a/drivers/mmc/host/uniphier-sd.c ++++ b/drivers/mmc/host/uniphier-sd.c +@@ -614,11 +614,6 @@ static int uniphier_sd_probe(struct platform_device *pdev) + } + } + +- ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, +- dev_name(dev), host); +- if (ret) +- goto free_host; +- + if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) + host->dma_ops = &uniphier_sd_internal_dma_ops; + else +@@ -646,8 +641,15 @@ static int uniphier_sd_probe(struct platform_device *pdev) + if (ret) + goto free_host; + ++ ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, ++ dev_name(dev), host); ++ if (ret) ++ goto remove_host; ++ + return 0; + ++remove_host: ++ tmio_mmc_host_remove(host); + free_host: + tmio_mmc_host_free(host); + +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index aaa03ce5796f..5a42ddeecfe5 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -4536,12 +4536,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, + dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); + break; + } +- dev_info(dev, "Partner protocol version is %d\n", +- crq->version_exchange_rsp.version); +- if (be16_to_cpu(crq->version_exchange_rsp.version) < +- ibmvnic_version) +- ibmvnic_version = ++ ibmvnic_version = + be16_to_cpu(crq->version_exchange_rsp.version); ++ dev_info(dev, "Partner protocol version is %d\n", ++ ibmvnic_version); + send_cap_queries(adapter); + break; + case QUERY_CAPABILITY_RSP: +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +index c28cbae42331..2c80205dc939 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +@@ -152,6 +152,10 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) + mlx5e_close_cq(&c->xskicosq.cq); + mlx5e_close_xdpsq(&c->xsksq); + mlx5e_close_cq(&c->xsksq.cq); ++ ++ memset(&c->xskrq, 0, sizeof(c->xskrq)); ++ memset(&c->xsksq, 0, sizeof(c->xsksq)); ++ memset(&c->xskicosq, 0, sizeof(c->xskicosq)); + } + + void mlx5e_activate_xsk(struct mlx5e_channel *c) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c +index f63beb399837..f628887d8af8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c +@@ -193,15 +193,23 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev) + + void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) + { ++ bool err_detected = false; ++ ++ /* Mark the device as fatal in order to abort FW commands */ ++ if ((check_fatal_sensors(dev) || force) && ++ dev->state == MLX5_DEVICE_STATE_UP) { ++ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; ++ err_detected = true; ++ } + mutex_lock(&dev->intf_state_mutex); +- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) +- goto unlock; ++ if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) ++ goto unlock;/* a previous error is still being handled */ + if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) { + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + goto unlock; + } + +- if (check_fatal_sensors(dev) || force) { ++ if (check_fatal_sensors(dev) || force) { /* protected state setting */ + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + mlx5_cmd_flush(dev); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index e4a690128b3a..7c0a726277b0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -794,6 +794,11 @@ err_disable: + + static void mlx5_pci_close(struct mlx5_core_dev *dev) + { ++ /* health work might still be active, and it needs pci bar in ++ * order to know the NIC state. Therefore, drain the health WQ ++ * before removing the pci bars ++ */ ++ mlx5_drain_health_wq(dev); + iounmap(dev->iseg); + pci_clear_master(dev->pdev); + release_bar(dev->pdev); +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +index 35a1dc89c28a..71c90c8a9e94 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +@@ -390,8 +390,7 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev, + static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev, + int trip, enum thermal_trend *trend) + { +- struct mlxsw_thermal_module *tz = tzdev->devdata; +- struct mlxsw_thermal *thermal = tz->parent; ++ struct mlxsw_thermal *thermal = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) + return -EINVAL; +@@ -592,6 +591,22 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip, + return 0; + } + ++static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev, ++ int trip, enum thermal_trend *trend) ++{ ++ struct mlxsw_thermal_module *tz = tzdev->devdata; ++ struct mlxsw_thermal *thermal = tz->parent; ++ ++ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) ++ return -EINVAL; ++ ++ if (tzdev == thermal->tz_highest_dev) ++ return 1; ++ ++ *trend = THERMAL_TREND_STABLE; ++ return 0; ++} ++ + static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { + .bind = mlxsw_thermal_module_bind, + .unbind = mlxsw_thermal_module_unbind, +@@ -603,7 +618,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { + .set_trip_temp = mlxsw_thermal_module_trip_temp_set, + .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, + .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, +- .get_trend = mlxsw_thermal_trend_get, ++ .get_trend = mlxsw_thermal_module_trend_get, + }; + + static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, +@@ -642,7 +657,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = { + .set_trip_temp = mlxsw_thermal_module_trip_temp_set, + .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, + .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, +- .get_trend = mlxsw_thermal_trend_get, ++ .get_trend = mlxsw_thermal_module_trend_get, + }; + + static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev, +diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c +index b16a1221d19b..fb182bec8f06 100644 +--- a/drivers/net/net_failover.c ++++ b/drivers/net/net_failover.c +@@ -61,7 +61,8 @@ static int net_failover_open(struct net_device *dev) + return 0; + + err_standby_open: +- dev_close(primary_dev); ++ if (primary_dev) ++ dev_close(primary_dev); + err_primary_open: + netif_tx_disable(dev); + return err; +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 6e9a59e3d822..46bdd0df2eb8 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -1908,8 +1908,11 @@ drop: + skb->dev = tun->dev; + break; + case IFF_TAP: +- if (!frags) +- skb->protocol = eth_type_trans(skb, tun->dev); ++ if (frags && !pskb_may_pull(skb, ETH_HLEN)) { ++ err = -ENOMEM; ++ goto drop; ++ } ++ skb->protocol = eth_type_trans(skb, tun->dev); + break; + } + +@@ -1966,9 +1969,12 @@ drop: + } + + if (frags) { ++ u32 headlen; ++ + /* Exercise flow dissector code path. */ +- u32 headlen = eth_get_headlen(tun->dev, skb->data, +- skb_headlen(skb)); ++ skb_push(skb, ETH_HLEN); ++ headlen = eth_get_headlen(tun->dev, skb->data, ++ skb_headlen(skb)); + + if (unlikely(headlen > skb_headlen(skb))) { + this_cpu_inc(tun->pcpu_stats->rx_dropped); +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index ae59fca96032..03434db36b5c 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -1924,6 +1924,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, + ns_olen = request->len - skb_network_offset(request) - + sizeof(struct ipv6hdr) - sizeof(*ns); + for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { ++ if (!ns->opt[i + 1]) { ++ kfree_skb(reply); ++ return NULL; ++ } + if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { + daddr = ns->opt + i + sizeof(struct nd_opt_hdr); + break; +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c +index dd0c32379375..4ed21dad6a8e 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.c ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c +@@ -612,6 +612,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, + hif_dev->remain_skb = nskb; + spin_unlock(&hif_dev->rx_lock); + } else { ++ if (pool_index == MAX_PKT_NUM_IN_TRANSFER) { ++ dev_err(&hif_dev->udev->dev, ++ "ath9k_htc: over RX MAX_PKT_NUM\n"); ++ goto err; ++ } + nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); + if (!nskb) { + dev_err(&hif_dev->udev->dev, +@@ -638,9 +643,9 @@ err: + + static void ath9k_hif_usb_rx_cb(struct urb *urb) + { +- struct sk_buff *skb = (struct sk_buff *) urb->context; +- struct hif_device_usb *hif_dev = +- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); ++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context; ++ struct hif_device_usb *hif_dev = rx_buf->hif_dev; ++ struct sk_buff *skb = rx_buf->skb; + int ret; + + if (!skb) +@@ -680,14 +685,15 @@ resubmit: + return; + free: + kfree_skb(skb); ++ kfree(rx_buf); + } + + static void ath9k_hif_usb_reg_in_cb(struct urb *urb) + { +- struct sk_buff *skb = (struct sk_buff *) urb->context; ++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context; ++ struct hif_device_usb *hif_dev = rx_buf->hif_dev; ++ struct sk_buff *skb = rx_buf->skb; + struct sk_buff *nskb; +- struct hif_device_usb *hif_dev = +- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + int ret; + + if (!skb) +@@ -745,6 +751,7 @@ resubmit: + return; + free: + kfree_skb(skb); ++ kfree(rx_buf); + urb->context = NULL; + } + +@@ -790,7 +797,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) + init_usb_anchor(&hif_dev->mgmt_submitted); + + for (i = 0; i < MAX_TX_URB_NUM; i++) { +- tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL); ++ tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL); + if (!tx_buf) + goto err; + +@@ -827,8 +834,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev) + + static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + { +- struct urb *urb = NULL; ++ struct rx_buf *rx_buf = NULL; + struct sk_buff *skb = NULL; ++ struct urb *urb = NULL; + int i, ret; + + init_usb_anchor(&hif_dev->rx_submitted); +@@ -836,6 +844,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + + for (i = 0; i < MAX_RX_URB_NUM; i++) { + ++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); ++ if (!rx_buf) { ++ ret = -ENOMEM; ++ goto err_rxb; ++ } ++ + /* Allocate URB */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb == NULL) { +@@ -850,11 +864,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + goto err_skb; + } + ++ rx_buf->hif_dev = hif_dev; ++ rx_buf->skb = skb; ++ + usb_fill_bulk_urb(urb, hif_dev->udev, + usb_rcvbulkpipe(hif_dev->udev, + USB_WLAN_RX_PIPE), + skb->data, MAX_RX_BUF_SIZE, +- ath9k_hif_usb_rx_cb, skb); ++ ath9k_hif_usb_rx_cb, rx_buf); + + /* Anchor URB */ + usb_anchor_urb(urb, &hif_dev->rx_submitted); +@@ -880,6 +897,8 @@ err_submit: + err_skb: + usb_free_urb(urb); + err_urb: ++ kfree(rx_buf); ++err_rxb: + ath9k_hif_usb_dealloc_rx_urbs(hif_dev); + return ret; + } +@@ -891,14 +910,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev) + + static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) + { +- struct urb *urb = NULL; ++ struct rx_buf *rx_buf = NULL; + struct sk_buff *skb = NULL; ++ struct urb *urb = NULL; + int i, ret; + + init_usb_anchor(&hif_dev->reg_in_submitted); + + for (i = 0; i < MAX_REG_IN_URB_NUM; i++) { + ++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); ++ if (!rx_buf) { ++ ret = -ENOMEM; ++ goto err_rxb; ++ } ++ + /* Allocate URB */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb == NULL) { +@@ -913,11 +939,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) + goto err_skb; + } + ++ rx_buf->hif_dev = hif_dev; ++ rx_buf->skb = skb; ++ + usb_fill_int_urb(urb, hif_dev->udev, + usb_rcvintpipe(hif_dev->udev, + USB_REG_IN_PIPE), + skb->data, MAX_REG_IN_BUF_SIZE, +- ath9k_hif_usb_reg_in_cb, skb, 1); ++ ath9k_hif_usb_reg_in_cb, rx_buf, 1); + + /* Anchor URB */ + usb_anchor_urb(urb, &hif_dev->reg_in_submitted); +@@ -943,6 +972,8 @@ err_submit: + err_skb: + usb_free_urb(urb); + err_urb: ++ kfree(rx_buf); ++err_rxb: + ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); + return ret; + } +@@ -973,7 +1004,7 @@ err: + return -ENOMEM; + } + +-static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) ++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) + { + usb_kill_anchored_urbs(&hif_dev->regout_submitted); + ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); +@@ -1341,8 +1372,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) + + if (hif_dev->flags & HIF_USB_READY) { + ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); +- ath9k_htc_hw_free(hif_dev->htc_handle); + ath9k_hif_usb_dev_deinit(hif_dev); ++ ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv); ++ ath9k_htc_hw_free(hif_dev->htc_handle); + } + + usb_set_intfdata(interface, NULL); +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h +index 7846916aa01d..5985aa15ca93 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.h ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.h +@@ -86,6 +86,11 @@ struct tx_buf { + struct list_head list; + }; + ++struct rx_buf { ++ struct sk_buff *skb; ++ struct hif_device_usb *hif_dev; ++}; ++ + #define HIF_USB_TX_STOP BIT(0) + #define HIF_USB_TX_FLUSH BIT(1) + +@@ -133,5 +138,6 @@ struct hif_device_usb { + + int ath9k_hif_usb_init(void); + void ath9k_hif_usb_exit(void); ++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev); + + #endif /* HTC_USB_H */ +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c +index d961095ab01f..40a065028ebe 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c +@@ -931,8 +931,9 @@ err_init: + int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, + u16 devid, char *product, u32 drv_info) + { +- struct ieee80211_hw *hw; ++ struct hif_device_usb *hif_dev; + struct ath9k_htc_priv *priv; ++ struct ieee80211_hw *hw; + int ret; + + hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops); +@@ -967,7 +968,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, + return 0; + + err_init: +- ath9k_deinit_wmi(priv); ++ ath9k_stop_wmi(priv); ++ hif_dev = (struct hif_device_usb *)htc_handle->hif_dev; ++ ath9k_hif_usb_dealloc_urbs(hif_dev); ++ ath9k_destoy_wmi(priv); + err_free: + ieee80211_free_hw(hw); + return ret; +@@ -982,7 +986,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) + htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED; + + ath9k_deinit_device(htc_handle->drv_priv); +- ath9k_deinit_wmi(htc_handle->drv_priv); ++ ath9k_stop_wmi(htc_handle->drv_priv); + ieee80211_free_hw(htc_handle->drv_priv->hw); + } + } +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +index 9cec5c216e1f..118e5550b10c 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +@@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, + * which are not PHY_ERROR (short radar pulses have a length of 3) + */ + if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { +- ath_warn(common, +- "Short RX data len, dropping (dlen: %d)\n", +- rs_datalen); ++ ath_dbg(common, ANY, ++ "Short RX data len, dropping (dlen: %d)\n", ++ rs_datalen); + goto rx_next; + } + +diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c +index d091c8ebdcf0..d2e062eaf561 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_hst.c ++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c +@@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target, + + if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { + epid = svc_rspmsg->endpoint_id; ++ if (epid < 0 || epid >= ENDPOINT_MAX) ++ return; ++ + service_id = be16_to_cpu(svc_rspmsg->service_id); + max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); + endpoint = &target->endpoint[epid]; +@@ -170,7 +173,6 @@ static int htc_config_pipe_credits(struct htc_target *target) + time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); + if (!time_left) { + dev_err(target->dev, "HTC credit config timeout\n"); +- kfree_skb(skb); + return -ETIMEDOUT; + } + +@@ -206,7 +208,6 @@ static int htc_setup_complete(struct htc_target *target) + time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); + if (!time_left) { + dev_err(target->dev, "HTC start timeout\n"); +- kfree_skb(skb); + return -ETIMEDOUT; + } + +@@ -279,7 +280,6 @@ int htc_connect_service(struct htc_target *target, + if (!time_left) { + dev_err(target->dev, "Service connection timeout for: %d\n", + service_connreq->service_id); +- kfree_skb(skb); + return -ETIMEDOUT; + } + +diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c +index cdc146091194..e7a3127395be 100644 +--- a/drivers/net/wireless/ath/ath9k/wmi.c ++++ b/drivers/net/wireless/ath/ath9k/wmi.c +@@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) + return wmi; + } + +-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv) ++void ath9k_stop_wmi(struct ath9k_htc_priv *priv) + { + struct wmi *wmi = priv->wmi; + + mutex_lock(&wmi->op_mutex); + wmi->stopped = true; + mutex_unlock(&wmi->op_mutex); ++} + ++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv) ++{ + kfree(priv->wmi); + } + +@@ -336,7 +339,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, + ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", + wmi_cmd_to_name(cmd_id)); + mutex_unlock(&wmi->op_mutex); +- kfree_skb(skb); + return -ETIMEDOUT; + } + +diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h +index 380175d5ecd7..d8b912206232 100644 +--- a/drivers/net/wireless/ath/ath9k/wmi.h ++++ b/drivers/net/wireless/ath/ath9k/wmi.h +@@ -179,7 +179,6 @@ struct wmi { + }; + + struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv); +-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv); + int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, + enum htc_endpoint_id *wmi_ctrl_epid); + int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, +@@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, + void ath9k_wmi_event_tasklet(unsigned long data); + void ath9k_fatal_work(struct work_struct *work); + void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv); ++void ath9k_stop_wmi(struct ath9k_htc_priv *priv); ++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv); + + #define WMI_CMD(_wmi_cmd) \ + do { \ +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +index ed367b0a185c..f49887379c43 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +@@ -281,7 +281,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) + int regulatory_type; + + /* Checking for required sections */ +- if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { ++ if (mvm->trans->cfg->nvm_type == IWL_NVM) { + if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || + !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { + IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 779132aef0fb..c73e8095a849 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -4621,10 +4621,10 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, + + /* + * Some controllers might not implement link active reporting. In this +- * case, we wait for 1000 + 100 ms. ++ * case, we wait for 1000 ms + any delay requested by the caller. + */ + if (!pdev->link_active_reporting) { +- msleep(1100); ++ msleep(timeout + delay); + return true; + } + +diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c +index 010f541a5002..0896b3614eb1 100644 +--- a/drivers/remoteproc/remoteproc_core.c ++++ b/drivers/remoteproc/remoteproc_core.c +@@ -511,7 +511,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, + + /* Initialise vdev subdevice */ + snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index); +- rvdev->dev.parent = rproc->dev.parent; ++ rvdev->dev.parent = &rproc->dev; + rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset; + rvdev->dev.release = rproc_rvdev_release; + dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name); +diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c +index 31a62a0b470e..380d52672035 100644 +--- a/drivers/remoteproc/remoteproc_virtio.c ++++ b/drivers/remoteproc/remoteproc_virtio.c +@@ -375,6 +375,18 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) + goto out; + } + } ++ } else { ++ struct device_node *np = rproc->dev.parent->of_node; ++ ++ /* ++ * If we don't have dedicated buffer, just attempt to re-assign ++ * the reserved memory from our parent. A default memory-region ++ * at index 0 from the parent's memory-regions is assigned for ++ * the rvdev dev to allocate from. Failure is non-critical and ++ * the allocations will fall back to global pools, so don't ++ * check return value either. ++ */ ++ of_reserved_mem_device_init_by_idx(dev, np, 0); + } + + /* Allocate virtio device */ +diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c +index 85f77c1ed23c..4a09f21cb235 100644 +--- a/drivers/scsi/lpfc/lpfc_ct.c ++++ b/drivers/scsi/lpfc/lpfc_ct.c +@@ -462,7 +462,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) + struct lpfc_nodelist *ndlp; + + if ((vport->port_type != LPFC_NPIV_PORT) || +- (fc4_type == FC_TYPE_FCP) || + !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { + + ndlp = lpfc_setup_disc_node(vport, Did); +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c +index d86838801805..3d48024082ba 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c +@@ -4227,6 +4227,7 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance) + struct fusion_context *fusion; + struct megasas_cmd *cmd_mfi; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; ++ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; + u16 smid; + bool refire_cmd = 0; + u8 result; +@@ -4284,6 +4285,11 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance) + break; + } + ++ scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) ++ cmd_fusion->io_request; ++ if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) ++ result = RETURN_CMD; ++ + switch (result) { + case REFIRE_CMD: + megasas_fire_cmd_fusion(instance, req_desc); +@@ -4481,7 +4487,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, + if (!timeleft) { + dev_err(&instance->pdev->dev, + "task mgmt type 0x%x timed out\n", type); +- cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE; + mutex_unlock(&instance->reset_mutex); + rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c +index 7a3531856491..d0afe0b1599f 100644 +--- a/drivers/spi/spi-bcm-qspi.c ++++ b/drivers/spi/spi-bcm-qspi.c +@@ -670,7 +670,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) + if (buf) + buf[tp.byte] = read_rxram_slot_u8(qspi, slot); + dev_dbg(&qspi->pdev->dev, "RD %02x\n", +- buf ? buf[tp.byte] : 0xff); ++ buf ? buf[tp.byte] : 0x0); + } else { + u16 *buf = tp.trans->rx_buf; + +@@ -678,7 +678,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) + buf[tp.byte / 2] = read_rxram_slot_u16(qspi, + slot); + dev_dbg(&qspi->pdev->dev, "RD %04x\n", +- buf ? buf[tp.byte] : 0xffff); ++ buf ? buf[tp.byte / 2] : 0x0); + } + + update_qspi_trans_byte_count(qspi, &tp, +@@ -733,13 +733,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) + while (!tstatus && slot < MSPI_NUM_CDRAM) { + if (tp.trans->bits_per_word <= 8) { + const u8 *buf = tp.trans->tx_buf; +- u8 val = buf ? buf[tp.byte] : 0xff; ++ u8 val = buf ? buf[tp.byte] : 0x00; + + write_txram_slot_u8(qspi, slot, val); + dev_dbg(&qspi->pdev->dev, "WR %02x\n", val); + } else { + const u16 *buf = tp.trans->tx_buf; +- u16 val = buf ? buf[tp.byte / 2] : 0xffff; ++ u16 val = buf ? buf[tp.byte / 2] : 0x0000; + + write_txram_slot_u16(qspi, slot, val); + dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); +@@ -1220,6 +1220,11 @@ int bcm_qspi_probe(struct platform_device *pdev, + } + + qspi = spi_master_get_devdata(master); ++ ++ qspi->clk = devm_clk_get_optional(&pdev->dev, NULL); ++ if (IS_ERR(qspi->clk)) ++ return PTR_ERR(qspi->clk); ++ + qspi->pdev = pdev; + qspi->trans_pos.trans = NULL; + qspi->trans_pos.byte = 0; +@@ -1332,13 +1337,6 @@ int bcm_qspi_probe(struct platform_device *pdev, + qspi->soc_intc = NULL; + } + +- qspi->clk = devm_clk_get(&pdev->dev, NULL); +- if (IS_ERR(qspi->clk)) { +- dev_warn(dev, "unable to get clock\n"); +- ret = PTR_ERR(qspi->clk); +- goto qspi_probe_err; +- } +- + ret = clk_prepare_enable(qspi->clk); + if (ret) { + dev_err(dev, "failed to prepare clock\n"); +diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c +index b4070c0de3df..c88f5d99c906 100644 +--- a/drivers/spi/spi-bcm2835.c ++++ b/drivers/spi/spi-bcm2835.c +@@ -1330,7 +1330,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) + goto out_clk_disable; + } + +- err = devm_spi_register_controller(&pdev->dev, ctlr); ++ err = spi_register_controller(ctlr); + if (err) { + dev_err(&pdev->dev, "could not register SPI controller: %d\n", + err); +@@ -1355,6 +1355,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev) + + bcm2835_debugfs_remove(bs); + ++ spi_unregister_controller(ctlr); ++ + /* Clear FIFOs, and disable the HW block */ + bcm2835_wr(bs, BCM2835_SPI_CS, + BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); +diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c +index a2162ff56a12..c331efd6e86b 100644 +--- a/drivers/spi/spi-bcm2835aux.c ++++ b/drivers/spi/spi-bcm2835aux.c +@@ -569,7 +569,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) + goto out_clk_disable; + } + +- err = devm_spi_register_master(&pdev->dev, master); ++ err = spi_register_master(master); + if (err) { + dev_err(&pdev->dev, "could not register SPI master: %d\n", err); + goto out_clk_disable; +@@ -593,6 +593,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev) + + bcm2835aux_debugfs_remove(bs); + ++ spi_unregister_master(master); ++ + bcm2835aux_spi_reset_hw(bs); + + /* disable the HW block by releasing the clock */ +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index d2ca3b357cfe..82c5c027ec4c 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -128,12 +128,20 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable) + { + struct dw_spi *dws = spi_controller_get_devdata(spi->controller); + struct chip_data *chip = spi_get_ctldata(spi); ++ bool cs_high = !!(spi->mode & SPI_CS_HIGH); + + /* Chip select logic is inverted from spi_set_cs() */ + if (chip && chip->cs_control) + chip->cs_control(!enable); + +- if (!enable) ++ /* ++ * DW SPI controller demands any native CS being set in order to ++ * proceed with data transfer. So in order to activate the SPI ++ * communications we must set a corresponding bit in the Slave ++ * Enable register no matter whether the SPI core is configured to ++ * support active-high or active-low CS level. ++ */ ++ if (cs_high == enable) + dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); + else if (dws->cs_override) + dw_writel(dws, DW_SPI_SER, 0); +@@ -524,7 +532,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) + } + } + +- ret = devm_spi_register_controller(dev, master); ++ ret = spi_register_controller(master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_dma_exit; +@@ -548,6 +556,8 @@ void dw_spi_remove_host(struct dw_spi *dws) + { + dw_spi_debugfs_remove(dws); + ++ spi_unregister_controller(dws->master); ++ + if (dws->dma_ops && dws->dma_ops->dma_exit) + dws->dma_ops->dma_exit(dws); + +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index 723145673206..d0d6f1bda1b6 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -1880,7 +1880,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); +- status = devm_spi_register_controller(&pdev->dev, controller); ++ status = spi_register_controller(controller); + if (status != 0) { + dev_err(&pdev->dev, "problem registering spi controller\n"); + goto out_error_pm_runtime_enabled; +@@ -1889,7 +1889,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) + return status; + + out_error_pm_runtime_enabled: +- pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + out_error_clock_enabled: +@@ -1916,6 +1915,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) + + pm_runtime_get_sync(&pdev->dev); + ++ spi_unregister_controller(drv_data->controller); ++ + /* Disable the SSP at the peripheral and SOC level */ + pxa2xx_spi_write(drv_data, SSCR0, 0); + clk_disable_unprepare(ssp->clk); +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index c186d3a944cd..6bfbf0cfcf63 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -2581,6 +2581,8 @@ void spi_unregister_controller(struct spi_controller *ctlr) + struct spi_controller *found; + int id = ctlr->bus_num; + ++ device_for_each_child(&ctlr->dev, NULL, __unregister); ++ + /* First make sure that this controller was ever added */ + mutex_lock(&board_lock); + found = idr_find(&spi_master_idr, id); +@@ -2593,7 +2595,6 @@ void spi_unregister_controller(struct spi_controller *ctlr) + list_del(&ctlr->list); + mutex_unlock(&board_lock); + +- device_for_each_child(&ctlr->dev, NULL, __unregister); + device_unregister(&ctlr->dev); + /* free bus id */ + mutex_lock(&board_lock); +diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c +index be8d9702cbb2..4b84fd4483e1 100644 +--- a/drivers/video/fbdev/vt8500lcdfb.c ++++ b/drivers/video/fbdev/vt8500lcdfb.c +@@ -230,6 +230,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info) + info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) + for (i = 0; i < 256; i++) + vt8500lcd_setcolreg(i, 0, 0, 0, 0, info); ++ fallthrough; + case FB_BLANK_UNBLANK: + if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR || + info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) +diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c +index 3be07807edcd..e30f9427b335 100644 +--- a/drivers/video/fbdev/w100fb.c ++++ b/drivers/video/fbdev/w100fb.c +@@ -588,6 +588,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) + memsize=par->mach->mem->size; + memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize); + vfree(par->saved_extmem); ++ par->saved_extmem = NULL; + } + if (par->saved_intmem) { + memsize=MEM_INT_SIZE; +@@ -596,6 +597,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) + else + memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize); + vfree(par->saved_intmem); ++ par->saved_intmem = NULL; + } + } + +diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c +index 8ed89f032ebf..e0e62149a6f4 100644 +--- a/drivers/watchdog/imx_sc_wdt.c ++++ b/drivers/watchdog/imx_sc_wdt.c +@@ -177,6 +177,11 @@ static int imx_sc_wdt_probe(struct platform_device *pdev) + wdog->timeout = DEFAULT_TIMEOUT; + + watchdog_init_timeout(wdog, 0, dev); ++ ++ ret = imx_sc_wdt_set_timeout(wdog, wdog->timeout); ++ if (ret) ++ return ret; ++ + watchdog_stop_on_reboot(wdog); + watchdog_stop_on_unregister(wdog); + +diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c +index c57c71b7d53d..ffe9bd843922 100644 +--- a/drivers/xen/pvcalls-back.c ++++ b/drivers/xen/pvcalls-back.c +@@ -1087,7 +1087,8 @@ static void set_backend_state(struct xenbus_device *dev, + case XenbusStateInitialised: + switch (state) { + case XenbusStateConnected: +- backend_connect(dev); ++ if (backend_connect(dev)) ++ return; + xenbus_switch_state(dev, XenbusStateConnected); + break; + case XenbusStateClosing: +diff --git a/fs/aio.c b/fs/aio.c +index 4115d5ad6b90..47bb7b5685ba 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -176,6 +176,7 @@ struct fsync_iocb { + struct file *file; + struct work_struct work; + bool datasync; ++ struct cred *creds; + }; + + struct poll_iocb { +@@ -1589,8 +1590,11 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb, + static void aio_fsync_work(struct work_struct *work) + { + struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); ++ const struct cred *old_cred = override_creds(iocb->fsync.creds); + + iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); ++ revert_creds(old_cred); ++ put_cred(iocb->fsync.creds); + iocb_put(iocb); + } + +@@ -1604,6 +1608,10 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, + if (unlikely(!req->file->f_op->fsync)) + return -EINVAL; + ++ req->creds = prepare_creds(); ++ if (!req->creds) ++ return -ENOMEM; ++ + req->datasync = datasync; + INIT_WORK(&req->work, aio_fsync_work); + schedule_work(&req->work); +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index c8f304cae3f3..06b1a86d76b1 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -2747,7 +2747,9 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, + * response size smaller. + */ + req->MaxOutputResponse = cpu_to_le32(max_response_size); +- ++ req->sync_hdr.CreditCharge = ++ cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), ++ SMB2_MAX_BUFFER_SIZE)); + if (is_fsctl) + req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); + else +diff --git a/fs/fat/inode.c b/fs/fat/inode.c +index d40cbad16659..3d5ad11aacc5 100644 +--- a/fs/fat/inode.c ++++ b/fs/fat/inode.c +@@ -1519,6 +1519,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b, + goto out; + } + ++ if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) { ++ if (!silent) ++ fat_msg(sb, KERN_ERR, "bogus number of FAT sectors"); ++ goto out; ++ } ++ + error = 0; + + out: +diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c +index 8303b44a5068..d2ed4dc4434c 100644 +--- a/fs/gfs2/lops.c ++++ b/fs/gfs2/lops.c +@@ -504,12 +504,12 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, + unsigned int bsize = sdp->sd_sb.sb_bsize, off; + unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; + unsigned int shift = PAGE_SHIFT - bsize_shift; +- unsigned int max_bio_size = 2 * 1024 * 1024; ++ unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; + struct gfs2_journal_extent *je; + int sz, ret = 0; + struct bio *bio = NULL; + struct page *page = NULL; +- bool bio_chained = false, done = false; ++ bool done = false; + errseq_t since; + + memset(head, 0, sizeof(*head)); +@@ -532,10 +532,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, + off = 0; + } + +- if (!bio || (bio_chained && !off) || +- bio->bi_iter.bi_size >= max_bio_size) { +- /* start new bio */ +- } else { ++ if (bio && (off || block < blocks_submitted + max_blocks)) { + sector_t sector = dblock << sdp->sd_fsb2bb_shift; + + if (bio_end_sector(bio) == sector) { +@@ -548,19 +545,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, + (PAGE_SIZE - off) >> bsize_shift; + + bio = gfs2_chain_bio(bio, blocks); +- bio_chained = true; + goto add_block_to_new_bio; + } + } + + if (bio) { +- blocks_submitted = block + 1; ++ blocks_submitted = block; + submit_bio(bio); + } + + bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); + bio->bi_opf = REQ_OP_READ; +- bio_chained = false; + add_block_to_new_bio: + sz = bio_add_page(bio, page, bsize, off); + BUG_ON(sz != bsize); +@@ -568,7 +563,7 @@ block_added: + off += bsize; + if (off == PAGE_SIZE) + page = NULL; +- if (blocks_submitted < 2 * max_bio_size >> bsize_shift) { ++ if (blocks_submitted <= blocks_read + max_blocks) { + /* Keep at least one bio in flight */ + continue; + } +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 2050100e6e84..7fa3cd3fff4d 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -3498,8 +3498,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, + + ret = 0; + if (!pages || nr_pages > got_pages) { +- kfree(vmas); +- kfree(pages); ++ kvfree(vmas); ++ kvfree(pages); + pages = kvmalloc_array(nr_pages, sizeof(struct page *), + GFP_KERNEL); + vmas = kvmalloc_array(nr_pages, +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index 445eef41bfaf..91b58c897f92 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -2780,6 +2780,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) + if (!nilfs->ns_writer) + return -ENOMEM; + ++ inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); ++ + err = nilfs_segctor_start_thread(nilfs->ns_writer); + if (err) { + kfree(nilfs->ns_writer); +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c +index deb13f0a0f7d..d24548ed31b9 100644 +--- a/fs/notify/fanotify/fanotify.c ++++ b/fs/notify/fanotify/fanotify.c +@@ -171,6 +171,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, + if (!fsnotify_iter_should_report_type(iter_info, type)) + continue; + mark = iter_info->marks[type]; ++ ++ /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */ ++ marks_ignored_mask |= mark->ignored_mask; ++ + /* + * If the event is on dir and this mark doesn't care about + * events on dir, don't send it! +@@ -188,7 +192,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, + continue; + + marks_mask |= mark->mask; +- marks_ignored_mask |= mark->ignored_mask; + } + + test_mask = event_mask & marks_mask & ~marks_ignored_mask; +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c +index b801c6353100..ec5eca5a96f4 100644 +--- a/fs/overlayfs/copy_up.c ++++ b/fs/overlayfs/copy_up.c +@@ -40,7 +40,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) + { + ssize_t list_size, size, value_size = 0; + char *buf, *name, *value = NULL; +- int uninitialized_var(error); ++ int error = 0; + size_t slen; + + if (!(old->d_inode->i_opflags & IOP_XATTR) || +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index dbe43a50caf2..3f0c89001fcf 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -448,7 +448,7 @@ const struct inode_operations proc_link_inode_operations = { + + struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) + { +- struct inode *inode = new_inode_pseudo(sb); ++ struct inode *inode = new_inode(sb); + + if (inode) { + inode->i_ino = de->low_ino; +diff --git a/fs/proc/self.c b/fs/proc/self.c +index 57c0a1047250..32af065397f8 100644 +--- a/fs/proc/self.c ++++ b/fs/proc/self.c +@@ -43,7 +43,7 @@ int proc_setup_self(struct super_block *s) + inode_lock(root_inode); + self = d_alloc_name(s->s_root, "self"); + if (self) { +- struct inode *inode = new_inode_pseudo(s); ++ struct inode *inode = new_inode(s); + if (inode) { + inode->i_ino = self_inum; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); +diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c +index f61ae53533f5..fac9e50b33a6 100644 +--- a/fs/proc/thread_self.c ++++ b/fs/proc/thread_self.c +@@ -43,7 +43,7 @@ int proc_setup_thread_self(struct super_block *s) + inode_lock(root_inode); + thread_self = d_alloc_name(s->s_root, "thread-self"); + if (thread_self) { +- struct inode *inode = new_inode_pseudo(s); ++ struct inode *inode = new_inode(s); + if (inode) { + inode->i_ino = thread_self_inum; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index dae64600ccbf..b6d7347ccda7 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -496,10 +496,12 @@ + __start___modver = .; \ + KEEP(*(__modver)) \ + __stop___modver = .; \ +- . = ALIGN((align)); \ +- __end_rodata = .; \ + } \ +- . = ALIGN((align)); ++ \ ++ BTF \ ++ \ ++ . = ALIGN((align)); \ ++ __end_rodata = .; + + /* RODATA & RO_DATA provided for backward compatibility. + * All archs are supposed to use RO_DATA() */ +@@ -588,6 +590,20 @@ + __stop___ex_table = .; \ + } + ++/* ++ * .BTF ++ */ ++#ifdef CONFIG_DEBUG_INFO_BTF ++#define BTF \ ++ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ ++ __start_BTF = .; \ ++ *(.BTF) \ ++ __stop_BTF = .; \ ++ } ++#else ++#define BTF ++#endif ++ + /* + * Init task + */ +diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h +index f236f5b931b2..7fdd7f355b52 100644 +--- a/include/linux/elfnote.h ++++ b/include/linux/elfnote.h +@@ -54,7 +54,7 @@ + .popsection ; + + #define ELFNOTE(name, type, desc) \ +- ELFNOTE_START(name, type, "") \ ++ ELFNOTE_START(name, type, "a") \ + desc ; \ + ELFNOTE_END + +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index 678b0a5797a0..21aa6d736e99 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -1376,8 +1376,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, + } + #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ + +-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, +- unsigned long start, unsigned long end, bool blockable); ++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, ++ unsigned long start, unsigned long end); + + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE + int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 53bad834adf5..3285dae06c03 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -694,6 +694,7 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) + } + + extern void kvfree(const void *addr); ++extern void kvfree_sensitive(const void *addr, size_t len); + + /* + * Mapcount of compound page as a whole, does not include mapped sub-pages. +diff --git a/include/linux/padata.h b/include/linux/padata.h +index cccab7a59787..fa35dcfbd13f 100644 +--- a/include/linux/padata.h ++++ b/include/linux/padata.h +@@ -145,7 +145,8 @@ struct padata_shell { + /** + * struct padata_instance - The overall control structure. + * +- * @cpu_notifier: cpu hotplug notifier. ++ * @cpu_online_node: Linkage for CPU online callback. ++ * @cpu_dead_node: Linkage for CPU offline callback. + * @parallel_wq: The workqueue used for parallel work. + * @serial_wq: The workqueue used for serial work. + * @pslist: List of padata_shell objects attached to this instance. +@@ -160,7 +161,8 @@ struct padata_shell { + * @flags: padata flags. + */ + struct padata_instance { +- struct hlist_node node; ++ struct hlist_node cpu_online_node; ++ struct hlist_node cpu_dead_node; + struct workqueue_struct *parallel_wq; + struct workqueue_struct *serial_wq; + struct list_head pslist; +diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h +index 86281ac7c305..860e0f843c12 100644 +--- a/include/linux/set_memory.h ++++ b/include/linux/set_memory.h +@@ -26,7 +26,7 @@ static inline int set_direct_map_default_noflush(struct page *page) + #endif + + #ifndef set_mce_nospec +-static inline int set_mce_nospec(unsigned long pfn) ++static inline int set_mce_nospec(unsigned long pfn, bool unmap) + { + return 0; + } +diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h +index d0019d3395cf..59802eb8d2cc 100644 +--- a/include/net/inet_hashtables.h ++++ b/include/net/inet_hashtables.h +@@ -185,6 +185,12 @@ static inline spinlock_t *inet_ehash_lockp( + + int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); + ++static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h) ++{ ++ kfree(h->lhash2); ++ h->lhash2 = NULL; ++} ++ + static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) + { + kvfree(hashinfo->ehash_locks); +diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c +index 7ae5dddd1fe6..3b495773de5a 100644 +--- a/kernel/bpf/sysfs_btf.c ++++ b/kernel/bpf/sysfs_btf.c +@@ -9,15 +9,15 @@ + #include + + /* See scripts/link-vmlinux.sh, gen_btf() func for details */ +-extern char __weak _binary__btf_vmlinux_bin_start[]; +-extern char __weak _binary__btf_vmlinux_bin_end[]; ++extern char __weak __start_BTF[]; ++extern char __weak __stop_BTF[]; + + static ssize_t + btf_vmlinux_read(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t len) + { +- memcpy(buf, _binary__btf_vmlinux_bin_start + off, len); ++ memcpy(buf, __start_BTF + off, len); + return len; + } + +@@ -30,15 +30,14 @@ static struct kobject *btf_kobj; + + static int __init btf_vmlinux_init(void) + { +- if (!_binary__btf_vmlinux_bin_start) ++ if (!__start_BTF) + return 0; + + btf_kobj = kobject_create_and_add("btf", kernel_kobj); + if (!btf_kobj) + return -ENOMEM; + +- bin_attr_btf_vmlinux.size = _binary__btf_vmlinux_bin_end - +- _binary__btf_vmlinux_bin_start; ++ bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF; + + return sysfs_create_bin_file(btf_kobj, &bin_attr_btf_vmlinux); + } +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 7382fc95d41e..aaaf50b25cc9 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -93,11 +93,11 @@ static void remote_function(void *data) + * @info: the function call argument + * + * Calls the function @func when the task is currently running. This might +- * be on the current CPU, which just calls the function directly ++ * be on the current CPU, which just calls the function directly. This will ++ * retry due to any failures in smp_call_function_single(), such as if the ++ * task_cpu() goes offline concurrently. + * +- * returns: @func return value, or +- * -ESRCH - when the process isn't running +- * -EAGAIN - when the process moved away ++ * returns @func return value or -ESRCH when the process isn't running + */ + static int + task_function_call(struct task_struct *p, remote_function_f func, void *info) +@@ -110,11 +110,16 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) + }; + int ret; + +- do { +- ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); +- if (!ret) +- ret = data.ret; +- } while (ret == -EAGAIN); ++ for (;;) { ++ ret = smp_call_function_single(task_cpu(p), remote_function, ++ &data, 1); ++ ret = !ret ? data.ret : -EAGAIN; ++ ++ if (ret != -EAGAIN) ++ break; ++ ++ cond_resched(); ++ } + + return ret; + } +diff --git a/kernel/padata.c b/kernel/padata.c +index c4b774331e46..92a4867e8adc 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -782,7 +782,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) + struct padata_instance *pinst; + int ret; + +- pinst = hlist_entry_safe(node, struct padata_instance, node); ++ pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); + if (!pinst_has_cpu(pinst, cpu)) + return 0; + +@@ -797,7 +797,7 @@ static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) + struct padata_instance *pinst; + int ret; + +- pinst = hlist_entry_safe(node, struct padata_instance, node); ++ pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); + if (!pinst_has_cpu(pinst, cpu)) + return 0; + +@@ -813,8 +813,9 @@ static enum cpuhp_state hp_online; + static void __padata_free(struct padata_instance *pinst) + { + #ifdef CONFIG_HOTPLUG_CPU +- cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node); +- cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); ++ cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, ++ &pinst->cpu_dead_node); ++ cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); + #endif + + WARN_ON(!list_empty(&pinst->pslist)); +@@ -1020,9 +1021,10 @@ static struct padata_instance *padata_alloc(const char *name, + mutex_init(&pinst->lock); + + #ifdef CONFIG_HOTPLUG_CPU +- cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); ++ cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, ++ &pinst->cpu_online_node); + cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, +- &pinst->node); ++ &pinst->cpu_dead_node); + #endif + + put_online_cpus(); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 193b6ab74d7f..8a0e6bdba50d 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -2678,7 +2678,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr) + /* + * We don't care about NUMA placement if we don't have memory. + */ +- if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) ++ if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) + return; + + /* +diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c +index 717c940112f9..8ad5ba2b86e2 100644 +--- a/lib/lzo/lzo1x_compress.c ++++ b/lib/lzo/lzo1x_compress.c +@@ -268,6 +268,19 @@ m_len_done: + *op++ = (M4_MARKER | ((m_off >> 11) & 8) + | (m_len - 2)); + else { ++ if (unlikely(((m_off & 0x403f) == 0x403f) ++ && (m_len >= 261) ++ && (m_len <= 264)) ++ && likely(bitstream_version)) { ++ // Under lzo-rle, block copies ++ // for 261 <= length <= 264 and ++ // (distance & 0x80f3) == 0x80f3 ++ // can result in ambiguous ++ // output. Adjust length ++ // to 260 to prevent ambiguity. ++ ip -= m_len - 260; ++ m_len = 260; ++ } + m_len -= M4_MAX_LEN; + *op++ = (M4_MARKER | ((m_off >> 11) & 8)); + while (unlikely(m_len > 255)) { +diff --git a/mm/gup.c b/mm/gup.c +index 745b4036cdfd..4a8e969a6e59 100644 +--- a/mm/gup.c ++++ b/mm/gup.c +@@ -161,13 +161,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, + } + + /* +- * FOLL_FORCE can write to even unwritable pte's, but only +- * after we've gone through a COW cycle and they are dirty. ++ * FOLL_FORCE or a forced COW break can write even to unwritable pte's, ++ * but only after we've gone through a COW cycle and they are dirty. + */ + static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) + { +- return pte_write(pte) || +- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); ++ return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte)); ++} ++ ++/* ++ * A (separate) COW fault might break the page the other way and ++ * get_user_pages() would return the page from what is now the wrong ++ * VM. So we need to force a COW break at GUP time even for reads. ++ */ ++static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags) ++{ ++ return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET); + } + + static struct page *follow_page_pte(struct vm_area_struct *vma, +@@ -823,12 +832,18 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + goto out; + } + if (is_vm_hugetlb_page(vma)) { ++ if (should_force_cow_break(vma, foll_flags)) ++ foll_flags |= FOLL_WRITE; + i = follow_hugetlb_page(mm, vma, pages, vmas, + &start, &nr_pages, i, +- gup_flags, nonblocking); ++ foll_flags, nonblocking); + continue; + } + } ++ ++ if (should_force_cow_break(vma, foll_flags)) ++ foll_flags |= FOLL_WRITE; ++ + retry: + /* + * If we have a pending SIGKILL, don't keep faulting pages and +@@ -2316,6 +2331,10 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end) + * + * If the architecture does not support this function, simply return with no + * pages pinned. ++ * ++ * Careful, careful! COW breaking can go either way, so a non-write ++ * access can get ambiguous page results. If you call this function without ++ * 'write' set, you'd better be sure that you're ok with that ambiguity. + */ + int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +@@ -2343,6 +2362,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + * + * We do not adopt an rcu_read_lock(.) here as we also want to + * block IPIs that come from THPs splitting. ++ * ++ * NOTE! We allow read-only gup_fast() here, but you'd better be ++ * careful about possible COW pages. You'll get _a_ COW page, but ++ * not necessarily the one you intended to get depending on what ++ * COW event happens after this. COW may break the page copy in a ++ * random direction. + */ + + if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && +@@ -2415,10 +2440,17 @@ int get_user_pages_fast(unsigned long start, int nr_pages, + if (unlikely(!access_ok((void __user *)start, len))) + return -EFAULT; + ++ /* ++ * The FAST_GUP case requires FOLL_WRITE even for pure reads, ++ * because get_user_pages() may need to cause an early COW in ++ * order to avoid confusing the normal COW routines. So only ++ * targets that are already writable are safe to do by just ++ * looking at the page tables. ++ */ + if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && + gup_fast_permitted(start, end)) { + local_irq_disable(); +- gup_pgd_range(addr, end, gup_flags, pages, &nr); ++ gup_pgd_range(addr, end, gup_flags | FOLL_WRITE, pages, &nr); + local_irq_enable(); + ret = nr; + } +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 0d96831b6ded..7ec5710afc99 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1454,13 +1454,12 @@ out_unlock: + } + + /* +- * FOLL_FORCE can write to even unwritable pmd's, but only +- * after we've gone through a COW cycle and they are dirty. ++ * FOLL_FORCE or a forced COW break can write even to unwritable pmd's, ++ * but only after we've gone through a COW cycle and they are dirty. + */ + static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) + { +- return pmd_write(pmd) || +- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); ++ return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); + } + + struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, +diff --git a/mm/slub.c b/mm/slub.c +index af44807d5b05..fca33abd6c42 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -5776,8 +5776,10 @@ static int sysfs_slab_add(struct kmem_cache *s) + + s->kobj.kset = kset; + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); +- if (err) ++ if (err) { ++ kobject_put(&s->kobj); + goto out; ++ } + + err = sysfs_create_group(&s->kobj, &slab_attr_group); + if (err) +diff --git a/mm/util.c b/mm/util.c +index 3ad6db9a722e..ab358c64bbd3 100644 +--- a/mm/util.c ++++ b/mm/util.c +@@ -594,6 +594,24 @@ void kvfree(const void *addr) + } + EXPORT_SYMBOL(kvfree); + ++/** ++ * kvfree_sensitive - Free a data object containing sensitive information. ++ * @addr: address of the data object to be freed. ++ * @len: length of the data object. ++ * ++ * Use the special memzero_explicit() function to clear the content of a ++ * kvmalloc'ed object containing sensitive data to make sure that the ++ * compiler won't optimize out the data clearing. ++ */ ++void kvfree_sensitive(const void *addr, size_t len) ++{ ++ if (likely(!ZERO_OR_NULL_PTR(addr))) { ++ memzero_explicit((void *)addr, len); ++ kvfree(addr); ++ } ++} ++EXPORT_SYMBOL(kvfree_sensitive); ++ + static inline void *__page_rmapping(struct page *page) + { + unsigned long mapping; +diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c +index 37908561a64b..b18cdf03edb3 100644 +--- a/net/bridge/br_arp_nd_proxy.c ++++ b/net/bridge/br_arp_nd_proxy.c +@@ -276,6 +276,10 @@ static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p, + ns_olen = request->len - (skb_network_offset(request) + + sizeof(struct ipv6hdr)) - sizeof(*ns); + for (i = 0; i < ns_olen - 1; i += (ns->opt[i + 1] << 3)) { ++ if (!ns->opt[i + 1]) { ++ kfree_skb(reply); ++ return; ++ } + if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { + daddr = ns->opt + i + sizeof(struct nd_opt_hdr); + break; +diff --git a/net/dccp/proto.c b/net/dccp/proto.c +index 5bad08dc4316..cb61a9d281f6 100644 +--- a/net/dccp/proto.c ++++ b/net/dccp/proto.c +@@ -1139,14 +1139,14 @@ static int __init dccp_init(void) + inet_hashinfo_init(&dccp_hashinfo); + rc = inet_hashinfo2_init_mod(&dccp_hashinfo); + if (rc) +- goto out_fail; ++ goto out_free_percpu; + rc = -ENOBUFS; + dccp_hashinfo.bind_bucket_cachep = + kmem_cache_create("dccp_bind_bucket", + sizeof(struct inet_bind_bucket), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!dccp_hashinfo.bind_bucket_cachep) +- goto out_free_percpu; ++ goto out_free_hashinfo2; + + /* + * Size and allocate the main established and bind bucket +@@ -1242,6 +1242,8 @@ out_free_dccp_ehash: + free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); + out_free_bind_bucket_cachep: + kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); ++out_free_hashinfo2: ++ inet_hashinfo2_free_mod(&dccp_hashinfo); + out_free_percpu: + percpu_counter_destroy(&dccp_orphan_count); + out_fail: +@@ -1265,6 +1267,7 @@ static void __exit dccp_fini(void) + kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); + dccp_ackvec_exit(); + dccp_sysctl_exit(); ++ inet_hashinfo2_free_mod(&dccp_hashinfo); + percpu_counter_destroy(&dccp_orphan_count); + } + +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c +index f12fa8da6127..1b851fd82613 100644 +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -2455,6 +2455,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) + " %zd bytes, size of tnode: %zd bytes.\n", + LEAF_SIZE, TNODE_SIZE(0)); + ++ rcu_read_lock(); + for (h = 0; h < FIB_TABLE_HASHSZ; h++) { + struct hlist_head *head = &net->ipv4.fib_table_hash[h]; + struct fib_table *tb; +@@ -2474,7 +2475,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) + trie_show_usage(seq, t->stats); + #endif + } ++ cond_resched_rcu(); + } ++ rcu_read_unlock(); + + return 0; + } +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c +index 18d05403d3b5..5af97b4f5df3 100644 +--- a/net/ipv6/ipv6_sockglue.c ++++ b/net/ipv6/ipv6_sockglue.c +@@ -183,14 +183,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, + retv = -EBUSY; + break; + } +- } +- if (sk->sk_protocol == IPPROTO_TCP && +- sk->sk_prot != &tcpv6_prot) { +- retv = -EBUSY; ++ } else if (sk->sk_protocol == IPPROTO_TCP) { ++ if (sk->sk_prot != &tcpv6_prot) { ++ retv = -EBUSY; ++ break; ++ } ++ } else { + break; + } +- if (sk->sk_protocol != IPPROTO_TCP) +- break; ++ + if (sk->sk_state != TCP_ESTABLISHED) { + retv = -ENOTCONN; + break; +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index bc734cfaa29e..c87af430107a 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -228,7 +228,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + { + struct sctp_association *asoc = t->asoc; + struct dst_entry *dst = NULL; +- struct flowi6 *fl6 = &fl->u.ip6; ++ struct flowi _fl; ++ struct flowi6 *fl6 = &_fl.u.ip6; + struct sctp_bind_addr *bp; + struct ipv6_pinfo *np = inet6_sk(sk); + struct sctp_sockaddr_entry *laddr; +@@ -238,7 +239,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + enum sctp_scope scope; + __u8 matchlen = 0; + +- memset(fl6, 0, sizeof(struct flowi6)); ++ memset(&_fl, 0, sizeof(_fl)); + fl6->daddr = daddr->v6.sin6_addr; + fl6->fl6_dport = daddr->v6.sin6_port; + fl6->flowi6_proto = IPPROTO_SCTP; +@@ -276,8 +277,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + rcu_read_unlock(); + + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); +- if (!asoc || saddr) ++ if (!asoc || saddr) { ++ t->dst = dst; ++ memcpy(fl, &_fl, sizeof(_fl)); + goto out; ++ } + + bp = &asoc->base.bind_addr; + scope = sctp_scope(daddr); +@@ -300,6 +304,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + if ((laddr->a.sa.sa_family == AF_INET6) && + (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) { + rcu_read_unlock(); ++ t->dst = dst; ++ memcpy(fl, &_fl, sizeof(_fl)); + goto out; + } + } +@@ -338,6 +344,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + if (!IS_ERR_OR_NULL(dst)) + dst_release(dst); + dst = bdst; ++ t->dst = dst; ++ memcpy(fl, &_fl, sizeof(_fl)); + break; + } + +@@ -351,6 +359,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + dst_release(dst); + dst = bdst; + matchlen = bmatchlen; ++ t->dst = dst; ++ memcpy(fl, &_fl, sizeof(_fl)); + } + rcu_read_unlock(); + +@@ -359,14 +369,12 @@ out: + struct rt6_info *rt; + + rt = (struct rt6_info *)dst; +- t->dst = dst; + t->dst_cookie = rt6_get_cookie(rt); + pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n", + &rt->rt6i_dst.addr, rt->rt6i_dst.plen, +- &fl6->saddr); ++ &fl->u.ip6.saddr); + } else { + t->dst = NULL; +- + pr_debug("no route\n"); + } + } +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c +index 681ffb3545db..237c88eeb538 100644 +--- a/net/sctp/protocol.c ++++ b/net/sctp/protocol.c +@@ -409,7 +409,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + { + struct sctp_association *asoc = t->asoc; + struct rtable *rt; +- struct flowi4 *fl4 = &fl->u.ip4; ++ struct flowi _fl; ++ struct flowi4 *fl4 = &_fl.u.ip4; + struct sctp_bind_addr *bp; + struct sctp_sockaddr_entry *laddr; + struct dst_entry *dst = NULL; +@@ -419,7 +420,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + + if (t->dscp & SCTP_DSCP_SET_MASK) + tos = t->dscp & SCTP_DSCP_VAL_MASK; +- memset(fl4, 0x0, sizeof(struct flowi4)); ++ memset(&_fl, 0x0, sizeof(_fl)); + fl4->daddr = daddr->v4.sin_addr.s_addr; + fl4->fl4_dport = daddr->v4.sin_port; + fl4->flowi4_proto = IPPROTO_SCTP; +@@ -438,8 +439,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + &fl4->saddr); + + rt = ip_route_output_key(sock_net(sk), fl4); +- if (!IS_ERR(rt)) ++ if (!IS_ERR(rt)) { + dst = &rt->dst; ++ t->dst = dst; ++ memcpy(fl, &_fl, sizeof(_fl)); ++ } + + /* If there is no association or if a source address is passed, no + * more validation is required. +@@ -502,27 +506,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, + false); + if (!odev || odev->ifindex != fl4->flowi4_oif) { +- if (!dst) ++ if (!dst) { + dst = &rt->dst; +- else ++ t->dst = dst; ++ memcpy(fl, &_fl, sizeof(_fl)); ++ } else { + dst_release(&rt->dst); ++ } + continue; + } + + dst_release(dst); + dst = &rt->dst; ++ t->dst = dst; ++ memcpy(fl, &_fl, sizeof(_fl)); + break; + } + + out_unlock: + rcu_read_unlock(); + out: +- t->dst = dst; +- if (dst) ++ if (dst) { + pr_debug("rt_dst:%pI4, rt_src:%pI4\n", +- &fl4->daddr, &fl4->saddr); +- else ++ &fl->u.ip4.daddr, &fl->u.ip4.saddr); ++ } else { ++ t->dst = NULL; + pr_debug("no route\n"); ++ } + } + + /* For v4, the source address is cached in the route entry(dst). So no need +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index ffd3262b7a41..58fe6556cdf5 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -147,29 +147,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk) + skb_orphan(chunk->skb); + } + ++#define traverse_and_process() \ ++do { \ ++ msg = chunk->msg; \ ++ if (msg == prev_msg) \ ++ continue; \ ++ list_for_each_entry(c, &msg->chunks, frag_list) { \ ++ if ((clear && asoc->base.sk == c->skb->sk) || \ ++ (!clear && asoc->base.sk != c->skb->sk)) \ ++ cb(c); \ ++ } \ ++ prev_msg = msg; \ ++} while (0) ++ + static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, ++ bool clear, + void (*cb)(struct sctp_chunk *)) + + { ++ struct sctp_datamsg *msg, *prev_msg = NULL; + struct sctp_outq *q = &asoc->outqueue; ++ struct sctp_chunk *chunk, *c; + struct sctp_transport *t; +- struct sctp_chunk *chunk; + + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) + list_for_each_entry(chunk, &t->transmitted, transmitted_list) +- cb(chunk); ++ traverse_and_process(); + + list_for_each_entry(chunk, &q->retransmit, transmitted_list) +- cb(chunk); ++ traverse_and_process(); + + list_for_each_entry(chunk, &q->sacked, transmitted_list) +- cb(chunk); ++ traverse_and_process(); + + list_for_each_entry(chunk, &q->abandoned, transmitted_list) +- cb(chunk); ++ traverse_and_process(); + + list_for_each_entry(chunk, &q->out_chunk_list, list) +- cb(chunk); ++ traverse_and_process(); + } + + static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk, +@@ -9461,9 +9476,9 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, + * paths won't try to lock it and then oldsk. + */ + lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); +- sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); ++ sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w); + sctp_assoc_migrate(assoc, newsk); +- sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); ++ sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w); + + /* If the association on the newsk is already closed before accept() + * is called, set RCV_SHUTDOWN flag. +diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh +index aa1386079f0c..8b6325c2dfc5 100755 +--- a/scripts/link-vmlinux.sh ++++ b/scripts/link-vmlinux.sh +@@ -113,9 +113,6 @@ vmlinux_link() + gen_btf() + { + local pahole_ver +- local bin_arch +- local bin_format +- local bin_file + + if ! [ -x "$(command -v ${PAHOLE})" ]; then + echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available" +@@ -133,17 +130,16 @@ gen_btf() + info "BTF" ${2} + LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1} + +- # dump .BTF section into raw binary file to link with final vmlinux +- bin_arch=$(LANG=C ${OBJDUMP} -f ${1} | grep architecture | \ +- cut -d, -f1 | cut -d' ' -f2) +- bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \ +- awk '{print $4}') +- bin_file=.btf.vmlinux.bin +- ${OBJCOPY} --change-section-address .BTF=0 \ +- --set-section-flags .BTF=alloc -O binary \ +- --only-section=.BTF ${1} $bin_file +- ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \ +- --rename-section .data=.BTF $bin_file ${2} ++ # Create ${2} which contains just .BTF section but no symbols. Add ++ # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all ++ # deletes all symbols including __start_BTF and __stop_BTF, which will ++ # be redefined in the linker script. Add 2>/dev/null to suppress GNU ++ # objcopy warnings: "empty loadable segment detected at ..." ++ ${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \ ++ --strip-all ${1} ${2} 2>/dev/null ++ # Change e_type to ET_REL so that it can be used to link final vmlinux. ++ # Unlike GNU ld, lld does not allow an ET_EXEC input. ++ printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none + } + + # Create ${2} .o file with all symbols from the ${1} object file +diff --git a/security/keys/internal.h b/security/keys/internal.h +index 7e9914943616..1ca8bfaed0e8 100644 +--- a/security/keys/internal.h ++++ b/security/keys/internal.h +@@ -350,15 +350,4 @@ static inline void key_check(const struct key *key) + #define key_check(key) do {} while(0) + + #endif +- +-/* +- * Helper function to clear and free a kvmalloc'ed memory object. +- */ +-static inline void __kvzfree(const void *addr, size_t len) +-{ +- if (addr) { +- memset((void *)addr, 0, len); +- kvfree(addr); +- } +-} + #endif /* _INTERNAL_H */ +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index 5e01192e222a..edde63a63007 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -142,10 +142,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, + + key_ref_put(keyring_ref); + error3: +- if (payload) { +- memzero_explicit(payload, plen); +- kvfree(payload); +- } ++ kvfree_sensitive(payload, plen); + error2: + kfree(description); + error: +@@ -360,7 +357,7 @@ long keyctl_update_key(key_serial_t id, + + key_ref_put(key_ref); + error2: +- __kvzfree(payload, plen); ++ kvfree_sensitive(payload, plen); + error: + return ret; + } +@@ -914,7 +911,7 @@ can_read_key: + */ + if (ret > key_data_len) { + if (unlikely(key_data)) +- __kvzfree(key_data, key_data_len); ++ kvfree_sensitive(key_data, key_data_len); + key_data_len = ret; + continue; /* Allocate buffer */ + } +@@ -923,7 +920,7 @@ can_read_key: + ret = -EFAULT; + break; + } +- __kvzfree(key_data, key_data_len); ++ kvfree_sensitive(key_data, key_data_len); + + key_put_out: + key_put(key); +@@ -1225,10 +1222,7 @@ long keyctl_instantiate_key_common(key_serial_t id, + keyctl_change_reqkey_auth(NULL); + + error2: +- if (payload) { +- memzero_explicit(payload, plen); +- kvfree(payload); +- } ++ kvfree_sensitive(payload, plen); + error: + return ret; + } +diff --git a/security/smack/smack.h b/security/smack/smack.h +index 62529f382942..335d2411abe4 100644 +--- a/security/smack/smack.h ++++ b/security/smack/smack.h +@@ -148,7 +148,6 @@ struct smk_net4addr { + struct smack_known *smk_label; /* label */ + }; + +-#if IS_ENABLED(CONFIG_IPV6) + /* + * An entry in the table identifying IPv6 hosts. + */ +@@ -159,9 +158,7 @@ struct smk_net6addr { + int smk_masks; /* mask size */ + struct smack_known *smk_label; /* label */ + }; +-#endif /* CONFIG_IPV6 */ + +-#ifdef SMACK_IPV6_PORT_LABELING + /* + * An entry in the table identifying ports. + */ +@@ -174,7 +171,6 @@ struct smk_port_label { + short smk_sock_type; /* Socket type */ + short smk_can_reuse; + }; +-#endif /* SMACK_IPV6_PORT_LABELING */ + + struct smack_known_list_elem { + struct list_head list; +@@ -335,9 +331,7 @@ extern struct smack_known smack_known_web; + extern struct mutex smack_known_lock; + extern struct list_head smack_known_list; + extern struct list_head smk_net4addr_list; +-#if IS_ENABLED(CONFIG_IPV6) + extern struct list_head smk_net6addr_list; +-#endif /* CONFIG_IPV6 */ + + extern struct mutex smack_onlycap_lock; + extern struct list_head smack_onlycap_list; +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index ad22066eba04..12c0fa85d9f8 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -51,10 +51,8 @@ + #define SMK_RECEIVING 1 + #define SMK_SENDING 2 + +-#ifdef SMACK_IPV6_PORT_LABELING +-DEFINE_MUTEX(smack_ipv6_lock); ++static DEFINE_MUTEX(smack_ipv6_lock); + static LIST_HEAD(smk_ipv6_port_list); +-#endif + static struct kmem_cache *smack_inode_cache; + struct kmem_cache *smack_rule_cache; + int smack_enabled; +@@ -2326,7 +2324,6 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip) + return NULL; + } + +-#if IS_ENABLED(CONFIG_IPV6) + /* + * smk_ipv6_localhost - Check for local ipv6 host address + * @sip: the address +@@ -2394,7 +2391,6 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip) + + return NULL; + } +-#endif /* CONFIG_IPV6 */ + + /** + * smack_netlabel - Set the secattr on a socket +@@ -2483,7 +2479,6 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap) + return smack_netlabel(sk, sk_lbl); + } + +-#if IS_ENABLED(CONFIG_IPV6) + /** + * smk_ipv6_check - check Smack access + * @subject: subject Smack label +@@ -2516,7 +2511,6 @@ static int smk_ipv6_check(struct smack_known *subject, + rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc); + return rc; + } +-#endif /* CONFIG_IPV6 */ + + #ifdef SMACK_IPV6_PORT_LABELING + /** +@@ -2605,6 +2599,7 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address) + mutex_unlock(&smack_ipv6_lock); + return; + } ++#endif + + /** + * smk_ipv6_port_check - check Smack port access +@@ -2667,7 +2662,6 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address, + + return smk_ipv6_check(skp, object, address, act); + } +-#endif /* SMACK_IPV6_PORT_LABELING */ + + /** + * smack_inode_setsecurity - set smack xattrs +@@ -2842,24 +2836,21 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, + return 0; + if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) { + struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; +-#ifdef SMACK_IPV6_SECMARK_LABELING +- struct smack_known *rsp; +-#endif ++ struct smack_known *rsp = NULL; + + if (addrlen < SIN6_LEN_RFC2133) + return 0; +-#ifdef SMACK_IPV6_SECMARK_LABELING +- rsp = smack_ipv6host_label(sip); ++ if (__is_defined(SMACK_IPV6_SECMARK_LABELING)) ++ rsp = smack_ipv6host_label(sip); + if (rsp != NULL) { + struct socket_smack *ssp = sock->sk->sk_security; + + rc = smk_ipv6_check(ssp->smk_out, rsp, sip, + SMK_CONNECTING); + } +-#endif +-#ifdef SMACK_IPV6_PORT_LABELING +- rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING); +-#endif ++ if (__is_defined(SMACK_IPV6_PORT_LABELING)) ++ rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING); ++ + return rc; + } + if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) +diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c +index e3e05c04dbd1..c21b656b3263 100644 +--- a/security/smack/smackfs.c ++++ b/security/smack/smackfs.c +@@ -878,11 +878,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + else + rule += strlen(skp->smk_known) + 1; + ++ if (rule > data + count) { ++ rc = -EOVERFLOW; ++ goto out; ++ } ++ + ret = sscanf(rule, "%d", &maplevel); + if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) + goto out; + + rule += SMK_DIGITLEN; ++ if (rule > data + count) { ++ rc = -EOVERFLOW; ++ goto out; ++ } ++ + ret = sscanf(rule, "%d", &catlen); + if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM) + goto out; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 5c74ea2bb44b..ec501fbaabe4 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -136,6 +136,16 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) + } + EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); + ++static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_group *group = &substream->self_group; ++ ++ if (substream->pcm->nonatomic) ++ mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING); ++ else ++ spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING); ++} ++ + /** + * snd_pcm_stream_unlock_irq - Unlock the PCM stream + * @substream: PCM substream +@@ -1994,6 +2004,12 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) + } + pcm_file = f.file->private_data; + substream1 = pcm_file->substream; ++ ++ if (substream == substream1) { ++ res = -EINVAL; ++ goto _badf; ++ } ++ + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) { + res = -ENOMEM; +@@ -2022,7 +2038,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) + snd_pcm_stream_unlock_irq(substream); + + snd_pcm_group_lock_irq(target_group, nonatomic); +- snd_pcm_stream_lock(substream1); ++ snd_pcm_stream_lock_nested(substream1); + snd_pcm_group_assign(substream1, target_group); + refcount_inc(&target_group->refs); + snd_pcm_stream_unlock(substream1); +@@ -2038,7 +2054,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) + + static void relink_to_local(struct snd_pcm_substream *substream) + { +- snd_pcm_stream_lock(substream); ++ snd_pcm_stream_lock_nested(substream); + snd_pcm_group_assign(substream, &substream->self_group); + snd_pcm_stream_unlock(substream); + } +diff --git a/sound/firewire/fireface/ff-protocol-latter.c b/sound/firewire/fireface/ff-protocol-latter.c +index 0e4c3a9ed5e4..76ae568489ef 100644 +--- a/sound/firewire/fireface/ff-protocol-latter.c ++++ b/sound/firewire/fireface/ff-protocol-latter.c +@@ -107,18 +107,18 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate) + int err; + + // Set the number of data blocks transferred in a second. +- if (rate % 32000 == 0) +- code = 0x00; ++ if (rate % 48000 == 0) ++ code = 0x04; + else if (rate % 44100 == 0) + code = 0x02; +- else if (rate % 48000 == 0) +- code = 0x04; ++ else if (rate % 32000 == 0) ++ code = 0x00; + else + return -EINVAL; + + if (rate >= 64000 && rate < 128000) + code |= 0x08; +- else if (rate >= 128000 && rate < 192000) ++ else if (rate >= 128000) + code |= 0x10; + + reg = cpu_to_le32(code); +@@ -140,7 +140,7 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate) + if (curr_rate == rate) + break; + } +- if (count == 10) ++ if (count > 10) + return -ETIMEDOUT; + + for (i = 0; i < ARRAY_SIZE(amdtp_rate_table); ++i) { +diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c +index 9be89377171b..b4e9b0de3b42 100644 +--- a/sound/isa/es1688/es1688.c ++++ b/sound/isa/es1688/es1688.c +@@ -267,8 +267,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard, + return error; + } + error = snd_es1688_probe(card, dev); +- if (error < 0) ++ if (error < 0) { ++ snd_card_free(card); + return error; ++ } + pnp_set_card_drvdata(pcard, card); + snd_es968_pnp_is_probed = 1; + return 0; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index da4d21445e80..df5afac0b600 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -8156,6 +8156,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + ALC225_STANDARD_PINS, + {0x12, 0xb7a60130}, + {0x17, 0x90170110}), ++ SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC, ++ {0x14, 0x01014010}, ++ {0x17, 0x90170120}, ++ {0x18, 0x02a11030}, ++ {0x19, 0x02a1103f}, ++ {0x21, 0x0221101f}), + {} + }; + +diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c +index 8600c5439e1e..2e4aa23b5a60 100644 +--- a/sound/soc/codecs/max9867.c ++++ b/sound/soc/codecs/max9867.c +@@ -46,13 +46,13 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max9867_micboost_tlv, + + static const struct snd_kcontrol_new max9867_snd_controls[] = { + SOC_DOUBLE_R_TLV("Master Playback Volume", MAX9867_LEFTVOL, +- MAX9867_RIGHTVOL, 0, 41, 1, max9867_master_tlv), ++ MAX9867_RIGHTVOL, 0, 40, 1, max9867_master_tlv), + SOC_DOUBLE_R_TLV("Line Capture Volume", MAX9867_LEFTLINELVL, + MAX9867_RIGHTLINELVL, 0, 15, 1, max9867_line_tlv), + SOC_DOUBLE_R_TLV("Mic Capture Volume", MAX9867_LEFTMICGAIN, + MAX9867_RIGHTMICGAIN, 0, 20, 1, max9867_mic_tlv), + SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", MAX9867_LEFTMICGAIN, +- MAX9867_RIGHTMICGAIN, 5, 4, 0, max9867_micboost_tlv), ++ MAX9867_RIGHTMICGAIN, 5, 3, 0, max9867_micboost_tlv), + SOC_SINGLE("Digital Sidetone Volume", MAX9867_SIDETONE, 0, 31, 1), + SOC_SINGLE_TLV("Digital Playback Volume", MAX9867_DACLEVEL, 0, 15, 1, + max9867_dac_tlv), +diff --git a/sound/usb/card.c b/sound/usb/card.c +index 54f9ce38471e..f9a64e9526f5 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -810,9 +810,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) + if (chip == (void *)-1L) + return 0; + +- chip->autosuspended = !!PMSG_IS_AUTO(message); +- if (!chip->autosuspended) +- snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); + if (!chip->num_suspended_intf++) { + list_for_each_entry(as, &chip->pcm_list, list) { + snd_usb_pcm_suspend(as); +@@ -825,6 +822,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) + snd_usb_mixer_suspend(mixer); + } + ++ if (!PMSG_IS_AUTO(message) && !chip->system_suspend) { ++ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); ++ chip->system_suspend = chip->num_suspended_intf; ++ } ++ + return 0; + } + +@@ -838,10 +840,10 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) + + if (chip == (void *)-1L) + return 0; +- if (--chip->num_suspended_intf) +- return 0; + + atomic_inc(&chip->active); /* avoid autopm */ ++ if (chip->num_suspended_intf > 1) ++ goto out; + + list_for_each_entry(as, &chip->pcm_list, list) { + err = snd_usb_pcm_resume(as); +@@ -863,9 +865,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) + snd_usbmidi_resume(p); + } + +- if (!chip->autosuspended) ++ out: ++ if (chip->num_suspended_intf == chip->system_suspend) { + snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); +- chip->autosuspended = 0; ++ chip->system_suspend = 0; ++ } ++ chip->num_suspended_intf--; + + err_out: + atomic_dec(&chip->active); /* allow autopm after this point */ +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h +index bbae11605a4c..042a5e8eb79d 100644 +--- a/sound/usb/quirks-table.h ++++ b/sound/usb/quirks-table.h +@@ -25,6 +25,26 @@ + .idProduct = prod, \ + .bInterfaceClass = USB_CLASS_VENDOR_SPEC + ++/* HP Thunderbolt Dock Audio Headset */ ++{ ++ USB_DEVICE(0x03f0, 0x0269), ++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { ++ .vendor_name = "HP", ++ .product_name = "Thunderbolt Dock Audio Headset", ++ .profile_name = "HP-Thunderbolt-Dock-Audio-Headset", ++ .ifnum = QUIRK_NO_INTERFACE ++ } ++}, ++/* HP Thunderbolt Dock Audio Module */ ++{ ++ USB_DEVICE(0x03f0, 0x0567), ++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { ++ .vendor_name = "HP", ++ .product_name = "Thunderbolt Dock Audio Module", ++ .profile_name = "HP-Thunderbolt-Dock-Audio-Module", ++ .ifnum = QUIRK_NO_INTERFACE ++ } ++}, + /* FTDI devices */ + { + USB_DEVICE(0x0403, 0xb8d8), +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h +index e360680f45f3..55a2119c2411 100644 +--- a/sound/usb/usbaudio.h ++++ b/sound/usb/usbaudio.h +@@ -26,7 +26,7 @@ struct snd_usb_audio { + struct usb_interface *pm_intf; + u32 usb_id; + struct mutex mutex; +- unsigned int autosuspended:1; ++ unsigned int system_suspend; + atomic_t active; + atomic_t shutdown; + atomic_t usage_count; +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c +index 91cab5f669d2..92b07be0b48b 100644 +--- a/tools/perf/util/probe-event.c ++++ b/tools/perf/util/probe-event.c +@@ -1757,8 +1757,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev) + fmt1_str = strtok_r(argv0_str, ":", &fmt); + fmt2_str = strtok_r(NULL, "/", &fmt); + fmt3_str = strtok_r(NULL, " \t", &fmt); +- if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL +- || fmt3_str == NULL) { ++ if (fmt1_str == NULL || fmt2_str == NULL || fmt3_str == NULL) { + semantic_error("Failed to parse event name: %s\n", argv[0]); + ret = -EINVAL; + goto out; +diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc +index 021c03fd885d..23465823532b 100644 +--- a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc ++++ b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc +@@ -14,6 +14,8 @@ if [ ! -f set_event ]; then + exit_unsupported + fi + ++[ -f error_log ] || exit_unsupported ++ + ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter' + + exit 0 +diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c +index 6dee9e636a95..422e7761254d 100644 +--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c ++++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c +@@ -115,6 +115,7 @@ static struct option long_options[] = { + { "tcp", no_argument, 0, 't' }, + { "udp", no_argument, 0, 'u' }, + { "ip", no_argument, 0, 'i' }, ++ { NULL, 0, NULL, 0 }, + }; + + static int next_port = 19999; +diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json +index 0f89cd50a94b..152ffa45e857 100644 +--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json ++++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json +@@ -54,7 +54,7 @@ + "setup": [ + "$TC qdisc add dev $DEV2 ingress" + ], +- "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 parent ffff: handle 0xffffffff flower action ok", ++ "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress handle 0xffffffff flower action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV2 ingress", + "matchPattern": "filter protocol ip pref 1 flower.*handle 0xffffffff", +@@ -99,9 +99,9 @@ + }, + "setup": [ + "$TC qdisc add dev $DEV2 ingress", +- "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop" ++ "$TC filter add dev $DEV2 protocol ip prio 1 ingress flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop" + ], +- "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop", ++ "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 ingress flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop", + "expExitCode": "2", + "verifyCmd": "$TC -s filter show dev $DEV2 ingress", + "matchPattern": "filter protocol ip pref 1 flower chain 0 handle", +diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py +index 6a2bd2cf528e..995f66ce43eb 100755 +--- a/tools/testing/selftests/tc-testing/tdc_batch.py ++++ b/tools/testing/selftests/tc-testing/tdc_batch.py +@@ -72,21 +72,21 @@ mac_prefix = args.mac_prefix + + def format_add_filter(device, prio, handle, skip, src_mac, dst_mac, + share_action): +- return ("filter add dev {} {} protocol ip parent ffff: handle {} " ++ return ("filter add dev {} {} protocol ip ingress handle {} " + " flower {} src_mac {} dst_mac {} action drop {}".format( + device, prio, handle, skip, src_mac, dst_mac, share_action)) + + + def format_rep_filter(device, prio, handle, skip, src_mac, dst_mac, + share_action): +- return ("filter replace dev {} {} protocol ip parent ffff: handle {} " ++ return ("filter replace dev {} {} protocol ip ingress handle {} " + " flower {} src_mac {} dst_mac {} action drop {}".format( + device, prio, handle, skip, src_mac, dst_mac, share_action)) + + + def format_del_filter(device, prio, handle, skip, src_mac, dst_mac, + share_action): +- return ("filter del dev {} {} protocol ip parent ffff: handle {} " ++ return ("filter del dev {} {} protocol ip ingress handle {} " + "flower".format(device, prio, handle)) + + +diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c +index 0a356aa91aa1..f2047fc69006 100644 +--- a/virt/kvm/arm/aarch32.c ++++ b/virt/kvm/arm/aarch32.c +@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = { + [7] = { 4, 4 }, /* FIQ, unused */ + }; + ++static bool pre_fault_synchronize(struct kvm_vcpu *vcpu) ++{ ++ preempt_disable(); ++ if (kvm_arm_vcpu_loaded(vcpu)) { ++ kvm_arch_vcpu_put(vcpu); ++ return true; ++ } ++ ++ preempt_enable(); ++ return false; ++} ++ ++static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded) ++{ ++ if (loaded) { ++ kvm_arch_vcpu_load(vcpu, smp_processor_id()); ++ preempt_enable(); ++ } ++} ++ + /* + * When an exception is taken, most CPSR fields are left unchanged in the + * handler. However, some are explicitly overridden (e.g. M[4:0]). +@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) + + void kvm_inject_undef32(struct kvm_vcpu *vcpu) + { ++ bool loaded = pre_fault_synchronize(vcpu); ++ + prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); ++ post_fault_synchronize(vcpu, loaded); + } + + /* +@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, + u32 vect_offset; + u32 *far, *fsr; + bool is_lpae; ++ bool loaded; ++ ++ loaded = pre_fault_synchronize(vcpu); + + if (is_pabt) { + vect_offset = 12; +@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, + /* no need to shuffle FS[4] into DFSR[10] as its 0 */ + *fsr = DFSR_FSC_EXTABT_nLPAE; + } ++ ++ post_fault_synchronize(vcpu, loaded); + } + + void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c +index 86c6aa1cb58e..986fbc3cf667 100644 +--- a/virt/kvm/arm/arm.c ++++ b/virt/kvm/arm/arm.c +@@ -354,6 +354,16 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) + return kvm_vgic_vcpu_init(vcpu); + } + ++#ifdef CONFIG_ARM64 ++#define __ptrauth_save_key(regs, key) \ ++({ \ ++ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ ++ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ ++}) ++#else ++#define __ptrauth_save_key(regs, key) do { } while (0) ++#endif ++ + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + { + int *last_ran; +@@ -386,7 +396,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + else + vcpu_set_wfe_traps(vcpu); + +- vcpu_ptrauth_setup_lazy(vcpu); ++ if (vcpu_has_ptrauth(vcpu)) { ++ struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context; ++ ++ __ptrauth_save_key(ctxt->sys_regs, APIA); ++ __ptrauth_save_key(ctxt->sys_regs, APIB); ++ __ptrauth_save_key(ctxt->sys_regs, APDA); ++ __ptrauth_save_key(ctxt->sys_regs, APDB); ++ __ptrauth_save_key(ctxt->sys_regs, APGA); ++ ++ vcpu_ptrauth_disable(vcpu); ++ } + } + + void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 03c681568ab1..d5d4cd581af3 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -157,10 +157,9 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); + static unsigned long long kvm_createvm_count; + static unsigned long long kvm_active_vms; + +-__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, +- unsigned long start, unsigned long end, bool blockable) ++__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, ++ unsigned long start, unsigned long end) + { +- return 0; + } + + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) +@@ -381,6 +380,18 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) + return container_of(mn, struct kvm, mmu_notifier); + } + ++static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, ++ struct mm_struct *mm, ++ unsigned long start, unsigned long end) ++{ ++ struct kvm *kvm = mmu_notifier_to_kvm(mn); ++ int idx; ++ ++ idx = srcu_read_lock(&kvm->srcu); ++ kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); ++ srcu_read_unlock(&kvm->srcu, idx); ++} ++ + static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address, +@@ -405,7 +416,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, + { + struct kvm *kvm = mmu_notifier_to_kvm(mn); + int need_tlb_flush = 0, idx; +- int ret; + + idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); +@@ -422,14 +432,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, + kvm_flush_remote_tlbs(kvm); + + spin_unlock(&kvm->mmu_lock); +- +- ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start, +- range->end, +- mmu_notifier_range_blockable(range)); +- + srcu_read_unlock(&kvm->srcu, idx); + +- return ret; ++ return 0; + } + + static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, +@@ -535,6 +540,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, + } + + static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { ++ .invalidate_range = kvm_mmu_notifier_invalidate_range, + .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, + .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, + .clear_flush_young = kvm_mmu_notifier_clear_flush_young, diff --git a/patch/kernel/odroidxu4-current/patch-5.4.47-48.patch b/patch/kernel/odroidxu4-current/patch-5.4.47-48.patch new file mode 100644 index 000000000..89086a00f --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.47-48.patch @@ -0,0 +1,9809 @@ +diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +index b6a7e7397b8b..b944fe067188 100644 +--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt ++++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +@@ -16,6 +16,9 @@ Required properties: + Documentation/devicetree/bindings/graph.txt. This port should be connected + to the input port of an attached HDMI or LVDS encoder chip. + ++Optional properties: ++- pinctrl-names: Contain "default" and "sleep". ++ + Example: + + dpi0: dpi@1401d000 { +@@ -26,6 +29,9 @@ dpi0: dpi@1401d000 { + <&mmsys CLK_MM_DPI_ENGINE>, + <&apmixedsys CLK_APMIXED_TVDPLL>; + clock-names = "pixel", "engine", "pll"; ++ pinctrl-names = "default", "sleep"; ++ pinctrl-0 = <&dpi_pin_func>; ++ pinctrl-1 = <&dpi_pin_idle>; + + port { + dpi0_out: endpoint { +diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt +index 4833904d32a5..a18e996fa54b 100644 +--- a/Documentation/virt/kvm/api.txt ++++ b/Documentation/virt/kvm/api.txt +@@ -4444,9 +4444,11 @@ EOI was received. + #define KVM_EXIT_HYPERV_SYNIC 1 + #define KVM_EXIT_HYPERV_HCALL 2 + __u32 type; ++ __u32 pad1; + union { + struct { + __u32 msr; ++ __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; +diff --git a/Makefile b/Makefile +index 1da2944b842e..fee4101b5d22 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 47 ++SUBLEVEL = 48 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +@@ -587,12 +587,8 @@ KBUILD_MODULES := + KBUILD_BUILTIN := 1 + + # If we have only "make modules", don't compile built-in objects. +-# When we're building modules with modversions, we need to consider +-# the built-in objects during the descend as well, in order to +-# make sure the checksums are up to date before we record them. +- + ifeq ($(MAKECMDGOALS),modules) +- KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) ++ KBUILD_BUILTIN := + endif + + # If we have "make modules", compile modules +@@ -1282,6 +1278,13 @@ ifdef CONFIG_MODULES + + all: modules + ++# When we're building modules with modversions, we need to consider ++# the built-in objects during the descend as well, in order to ++# make sure the checksums are up to date before we record them. ++ifdef CONFIG_MODVERSIONS ++ KBUILD_BUILTIN := 1 ++endif ++ + # Build modules + # + # A module can be listed more than once in obj-m resulting in +diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h +index af2c0063dc75..b771bf1b5352 100644 +--- a/arch/alpha/include/asm/io.h ++++ b/arch/alpha/include/asm/io.h +@@ -322,14 +322,18 @@ static inline int __is_mmio(const volatile void __iomem *addr) + #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) + extern inline unsigned int ioread8(void __iomem *addr) + { +- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); ++ unsigned int ret; ++ mb(); ++ ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); + mb(); + return ret; + } + + extern inline unsigned int ioread16(void __iomem *addr) + { +- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); ++ unsigned int ret; ++ mb(); ++ ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); + mb(); + return ret; + } +@@ -370,7 +374,9 @@ extern inline void outw(u16 b, unsigned long port) + #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) + extern inline unsigned int ioread32(void __iomem *addr) + { +- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); ++ unsigned int ret; ++ mb(); ++ ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); + mb(); + return ret; + } +@@ -415,14 +421,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr) + + extern inline u8 readb(const volatile void __iomem *addr) + { +- u8 ret = __raw_readb(addr); ++ u8 ret; ++ mb(); ++ ret = __raw_readb(addr); + mb(); + return ret; + } + + extern inline u16 readw(const volatile void __iomem *addr) + { +- u16 ret = __raw_readw(addr); ++ u16 ret; ++ mb(); ++ ret = __raw_readw(addr); + mb(); + return ret; + } +@@ -463,14 +473,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) + + extern inline u32 readl(const volatile void __iomem *addr) + { +- u32 ret = __raw_readl(addr); ++ u32 ret; ++ mb(); ++ ret = __raw_readl(addr); + mb(); + return ret; + } + + extern inline u64 readq(const volatile void __iomem *addr) + { +- u64 ret = __raw_readq(addr); ++ u64 ret; ++ mb(); ++ ret = __raw_readq(addr); + mb(); + return ret; + } +@@ -499,14 +513,44 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) + #define outb_p outb + #define outw_p outw + #define outl_p outl +-#define readb_relaxed(addr) __raw_readb(addr) +-#define readw_relaxed(addr) __raw_readw(addr) +-#define readl_relaxed(addr) __raw_readl(addr) +-#define readq_relaxed(addr) __raw_readq(addr) +-#define writeb_relaxed(b, addr) __raw_writeb(b, addr) +-#define writew_relaxed(b, addr) __raw_writew(b, addr) +-#define writel_relaxed(b, addr) __raw_writel(b, addr) +-#define writeq_relaxed(b, addr) __raw_writeq(b, addr) ++ ++extern u8 readb_relaxed(const volatile void __iomem *addr); ++extern u16 readw_relaxed(const volatile void __iomem *addr); ++extern u32 readl_relaxed(const volatile void __iomem *addr); ++extern u64 readq_relaxed(const volatile void __iomem *addr); ++ ++#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) ++extern inline u8 readb_relaxed(const volatile void __iomem *addr) ++{ ++ mb(); ++ return __raw_readb(addr); ++} ++ ++extern inline u16 readw_relaxed(const volatile void __iomem *addr) ++{ ++ mb(); ++ return __raw_readw(addr); ++} ++#endif ++ ++#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) ++extern inline u32 readl_relaxed(const volatile void __iomem *addr) ++{ ++ mb(); ++ return __raw_readl(addr); ++} ++ ++extern inline u64 readq_relaxed(const volatile void __iomem *addr) ++{ ++ mb(); ++ return __raw_readq(addr); ++} ++#endif ++ ++#define writeb_relaxed writeb ++#define writew_relaxed writew ++#define writel_relaxed writel ++#define writeq_relaxed writeq + + /* + * String version of IO memory access ops: +diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c +index c025a3e5e357..938de13adfbf 100644 +--- a/arch/alpha/kernel/io.c ++++ b/arch/alpha/kernel/io.c +@@ -16,21 +16,27 @@ + unsigned int + ioread8(void __iomem *addr) + { +- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); ++ unsigned int ret; ++ mb(); ++ ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); + mb(); + return ret; + } + + unsigned int ioread16(void __iomem *addr) + { +- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); ++ unsigned int ret; ++ mb(); ++ ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); + mb(); + return ret; + } + + unsigned int ioread32(void __iomem *addr) + { +- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); ++ unsigned int ret; ++ mb(); ++ ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); + mb(); + return ret; + } +@@ -148,28 +154,36 @@ EXPORT_SYMBOL(__raw_writeq); + + u8 readb(const volatile void __iomem *addr) + { +- u8 ret = __raw_readb(addr); ++ u8 ret; ++ mb(); ++ ret = __raw_readb(addr); + mb(); + return ret; + } + + u16 readw(const volatile void __iomem *addr) + { +- u16 ret = __raw_readw(addr); ++ u16 ret; ++ mb(); ++ ret = __raw_readw(addr); + mb(); + return ret; + } + + u32 readl(const volatile void __iomem *addr) + { +- u32 ret = __raw_readl(addr); ++ u32 ret; ++ mb(); ++ ret = __raw_readl(addr); + mb(); + return ret; + } + + u64 readq(const volatile void __iomem *addr) + { +- u64 ret = __raw_readq(addr); ++ u64 ret; ++ mb(); ++ ret = __raw_readq(addr); + mb(); + return ret; + } +@@ -207,6 +221,38 @@ EXPORT_SYMBOL(writew); + EXPORT_SYMBOL(writel); + EXPORT_SYMBOL(writeq); + ++/* ++ * The _relaxed functions must be ordered w.r.t. each other, but they don't ++ * have to be ordered w.r.t. other memory accesses. ++ */ ++u8 readb_relaxed(const volatile void __iomem *addr) ++{ ++ mb(); ++ return __raw_readb(addr); ++} ++ ++u16 readw_relaxed(const volatile void __iomem *addr) ++{ ++ mb(); ++ return __raw_readw(addr); ++} ++ ++u32 readl_relaxed(const volatile void __iomem *addr) ++{ ++ mb(); ++ return __raw_readl(addr); ++} ++ ++u64 readq_relaxed(const volatile void __iomem *addr) ++{ ++ mb(); ++ return __raw_readq(addr); ++} ++ ++EXPORT_SYMBOL(readb_relaxed); ++EXPORT_SYMBOL(readw_relaxed); ++EXPORT_SYMBOL(readl_relaxed); ++EXPORT_SYMBOL(readq_relaxed); + + /* + * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. +diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts +index 1333a68b9373..b8db77b7f5d8 100644 +--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts ++++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts +@@ -40,7 +40,7 @@ + + ahb { + usb0: gadget@300000 { +- atmel,vbus-gpio = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>; ++ atmel,vbus-gpio = <&pioA PIN_PB11 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usba_vbus>; + status = "okay"; +diff --git a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi +index ce87d2ff27aa..4b9c4cab0314 100644 +--- a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi ++++ b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi +@@ -68,7 +68,7 @@ + + i2c_cm36651: i2c-gpio-2 { + compatible = "i2c-gpio"; +- gpios = <&gpf0 0 GPIO_ACTIVE_LOW>, <&gpf0 1 GPIO_ACTIVE_LOW>; ++ gpios = <&gpf0 0 GPIO_ACTIVE_HIGH>, <&gpf0 1 GPIO_ACTIVE_HIGH>; + i2c-gpio,delay-us = <2>; + #address-cells = <1>; + #size-cells = <0>; +diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi +index 8ff70b856334..d419b77201f7 100644 +--- a/arch/arm/boot/dts/s5pv210-aries.dtsi ++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi +@@ -454,6 +454,7 @@ + pinctrl-names = "default"; + cap-sd-highspeed; + cap-mmc-highspeed; ++ keep-power-in-suspend; + + mmc-pwrseq = <&wifi_pwrseq>; + non-removable; +diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c +index e512e606eabd..5ea3421fa1e8 100644 +--- a/arch/arm/mach-tegra/tegra.c ++++ b/arch/arm/mach-tegra/tegra.c +@@ -106,8 +106,8 @@ static const char * const tegra_dt_board_compat[] = { + }; + + DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") +- .l2c_aux_val = 0x3c400001, +- .l2c_aux_mask = 0xc20fc3fe, ++ .l2c_aux_val = 0x3c400000, ++ .l2c_aux_mask = 0xc20fc3ff, + .smp = smp_ops(tegra_smp_ops), + .map_io = tegra_map_common_io, + .init_early = tegra_init_early, +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S +index 5461d589a1e2..60ac7c5999a9 100644 +--- a/arch/arm/mm/proc-macros.S ++++ b/arch/arm/mm/proc-macros.S +@@ -5,6 +5,7 @@ + * VMA_VM_FLAGS + * VM_EXEC + */ ++#include + #include + #include + +@@ -30,7 +31,7 @@ + * act_mm - get current->active_mm + */ + .macro act_mm, rd +- bic \rd, sp, #8128 ++ bic \rd, sp, #(THREAD_SIZE - 1) & ~63 + bic \rd, \rd, #63 + ldr \rd, [\rd, #TI_TASK] + .if (TSK_ACTIVE_MM > IMM12_MASK) +diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h +index 665c78e0665a..3e7dda6f1ab1 100644 +--- a/arch/arm64/include/asm/cacheflush.h ++++ b/arch/arm64/include/asm/cacheflush.h +@@ -79,7 +79,7 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) + * IPI all online CPUs so that they undergo a context synchronization + * event and are forced to refetch the new instructions. + */ +-#ifdef CONFIG_KGDB ++ + /* + * KGDB performs cache maintenance with interrupts disabled, so we + * will deadlock trying to IPI the secondary CPUs. In theory, we can +@@ -89,9 +89,9 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) + * the patching operation, so we don't need extra IPIs here anyway. + * In which case, add a KGDB-specific bodge and return early. + */ +- if (kgdb_connected && irqs_disabled()) ++ if (in_dbg_master()) + return; +-#endif ++ + kick_all_cpus_sync(); + } + +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 13ebe2bad79f..41dd4b1f0ccb 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -456,6 +456,7 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD]; + extern pgd_t init_pg_end[]; + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; ++extern pgd_t idmap_pg_end[]; + extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; + + extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S +index 989b1944cb71..bdb5ec341900 100644 +--- a/arch/arm64/kernel/head.S ++++ b/arch/arm64/kernel/head.S +@@ -393,13 +393,19 @@ __create_page_tables: + + /* + * Since the page tables have been populated with non-cacheable +- * accesses (MMU disabled), invalidate the idmap and swapper page +- * tables again to remove any speculatively loaded cache lines. ++ * accesses (MMU disabled), invalidate those tables again to ++ * remove any speculatively loaded cache lines. + */ ++ dmb sy ++ + adrp x0, idmap_pg_dir ++ adrp x1, idmap_pg_end ++ sub x1, x1, x0 ++ bl __inval_dcache_area ++ ++ adrp x0, init_pg_dir + adrp x1, init_pg_end + sub x1, x1, x0 +- dmb sy + bl __inval_dcache_area + + ret x28 +diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c +index d801a7094076..a612da533ea2 100644 +--- a/arch/arm64/kernel/insn.c ++++ b/arch/arm64/kernel/insn.c +@@ -1508,16 +1508,10 @@ static u32 aarch64_encode_immediate(u64 imm, + u32 insn) + { + unsigned int immr, imms, n, ones, ror, esz, tmp; +- u64 mask = ~0UL; +- +- /* Can't encode full zeroes or full ones */ +- if (!imm || !~imm) +- return AARCH64_BREAK_FAULT; ++ u64 mask; + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: +- if (upper_32_bits(imm)) +- return AARCH64_BREAK_FAULT; + esz = 32; + break; + case AARCH64_INSN_VARIANT_64BIT: +@@ -1529,6 +1523,12 @@ static u32 aarch64_encode_immediate(u64 imm, + return AARCH64_BREAK_FAULT; + } + ++ mask = GENMASK(esz - 1, 0); ++ ++ /* Can't encode full zeroes, full ones, or value wider than the mask */ ++ if (!imm || imm == mask || imm & ~mask) ++ return AARCH64_BREAK_FAULT; ++ + /* + * Inverse of Replicate(). Try to spot a repeating pattern + * with a pow2 stride. +diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S +index aa76f7259668..e1af25dbc57e 100644 +--- a/arch/arm64/kernel/vmlinux.lds.S ++++ b/arch/arm64/kernel/vmlinux.lds.S +@@ -142,6 +142,7 @@ SECTIONS + . = ALIGN(PAGE_SIZE); + idmap_pg_dir = .; + . += IDMAP_DIR_SIZE; ++ idmap_pg_end = .; + + #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + tramp_pg_dir = .; +diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h +index de1470c4d829..1149251ea58d 100644 +--- a/arch/m68k/include/asm/mac_via.h ++++ b/arch/m68k/include/asm/mac_via.h +@@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping; + + struct irq_desc; + ++extern void via_l2_flush(int writeback); + extern void via_register_interrupts(void); + extern void via_irq_enable(int); + extern void via_irq_disable(int); +diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c +index 611f73bfc87c..d0126ab01360 100644 +--- a/arch/m68k/mac/config.c ++++ b/arch/m68k/mac/config.c +@@ -59,7 +59,6 @@ extern void iop_preinit(void); + extern void iop_init(void); + extern void via_init(void); + extern void via_init_clock(irq_handler_t func); +-extern void via_flush_cache(void); + extern void oss_init(void); + extern void psc_init(void); + extern void baboon_init(void); +@@ -130,21 +129,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record) + return unknown; + } + +-/* +- * Flip into 24bit mode for an instant - flushes the L2 cache card. We +- * have to disable interrupts for this. Our IRQ handlers will crap +- * themselves if they take an IRQ in 24bit mode! +- */ +- +-static void mac_cache_card_flush(int writeback) +-{ +- unsigned long flags; +- +- local_irq_save(flags); +- via_flush_cache(); +- local_irq_restore(flags); +-} +- + void __init config_mac(void) + { + if (!MACH_IS_MAC) +@@ -175,9 +159,8 @@ void __init config_mac(void) + * not. + */ + +- if (macintosh_config->ident == MAC_MODEL_IICI +- || macintosh_config->ident == MAC_MODEL_IIFX) +- mach_l2_flush = mac_cache_card_flush; ++ if (macintosh_config->ident == MAC_MODEL_IICI) ++ mach_l2_flush = via_l2_flush; + } + + +diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c +index 3c2cfcb74982..1f0fad2a98a0 100644 +--- a/arch/m68k/mac/via.c ++++ b/arch/m68k/mac/via.c +@@ -294,10 +294,14 @@ void via_debug_dump(void) + * the system into 24-bit mode for an instant. + */ + +-void via_flush_cache(void) ++void via_l2_flush(int writeback) + { ++ unsigned long flags; ++ ++ local_irq_save(flags); + via2[gBufB] &= ~VIA2B_vMode32; + via2[gBufB] |= VIA2B_vMode32; ++ local_irq_restore(flags); + } + + /* +diff --git a/arch/mips/Makefile b/arch/mips/Makefile +index cdc09b71febe..5403a91ce098 100644 +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -285,12 +285,23 @@ ifdef CONFIG_64BIT + endif + endif + ++# When linking a 32-bit executable the LLVM linker cannot cope with a ++# 32-bit load address that has been sign-extended to 64 bits. Simply ++# remove the upper 32 bits then, as it is safe to do so with other ++# linkers. ++ifdef CONFIG_64BIT ++ load-ld = $(load-y) ++else ++ load-ld = $(subst 0xffffffff,0x,$(load-y)) ++endif ++ + KBUILD_AFLAGS += $(cflags-y) + KBUILD_CFLAGS += $(cflags-y) +-KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) ++KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld) + KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) + + bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ ++ LINKER_LOAD_ADDRESS=$(load-ld) \ + VMLINUX_ENTRY_ADDRESS=$(entry-y) \ + PLATFORM="$(platform-y)" \ + ITS_INPUTS="$(its-y)" +diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile +index d859f079b771..378cbfb31ee7 100644 +--- a/arch/mips/boot/compressed/Makefile ++++ b/arch/mips/boot/compressed/Makefile +@@ -90,7 +90,7 @@ ifneq ($(zload-y),) + VMLINUZ_LOAD_ADDRESS := $(zload-y) + else + VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ +- $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) ++ $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS)) + endif + UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS) + +diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig +index 90ee0084d786..e41f4841cb4d 100644 +--- a/arch/mips/configs/loongson3_defconfig ++++ b/arch/mips/configs/loongson3_defconfig +@@ -231,7 +231,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y + CONFIG_MEDIA_USB_SUPPORT=y + CONFIG_USB_VIDEO_CLASS=m + CONFIG_DRM=y +-CONFIG_DRM_RADEON=y ++CONFIG_DRM_RADEON=m + CONFIG_FB_RADEON=y + CONFIG_LCD_CLASS_DEVICE=y + CONFIG_LCD_PLATFORM=m +diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h +index 983a6a7f43a1..3e26b0c7391b 100644 +--- a/arch/mips/include/asm/cpu-features.h ++++ b/arch/mips/include/asm/cpu-features.h +@@ -288,10 +288,12 @@ + # define cpu_has_mips32r6 __isa_ge_or_flag(6, MIPS_CPU_ISA_M32R6) + #endif + #ifndef cpu_has_mips64r1 +-# define cpu_has_mips64r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1) ++# define cpu_has_mips64r1 (cpu_has_64bits && \ ++ __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1)) + #endif + #ifndef cpu_has_mips64r2 +-# define cpu_has_mips64r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2) ++# define cpu_has_mips64r2 (cpu_has_64bits && \ ++ __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2)) + #endif + #ifndef cpu_has_mips64r6 + # define cpu_has_mips64r6 __isa_ge_and_flag(6, MIPS_CPU_ISA_M64R6) +diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h +index bdbdc19a2b8f..3afdb39d092a 100644 +--- a/arch/mips/include/asm/mipsregs.h ++++ b/arch/mips/include/asm/mipsregs.h +@@ -750,7 +750,7 @@ + + /* MAAR bit definitions */ + #define MIPS_MAAR_VH (_U64CAST_(1) << 63) +-#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) ++#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12) + #define MIPS_MAAR_ADDR_SHIFT 12 + #define MIPS_MAAR_S (_ULCAST_(1) << 1) + #define MIPS_MAAR_VL (_ULCAST_(1) << 0) +diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S +index efde27c99414..9c5f8a5d097f 100644 +--- a/arch/mips/kernel/genex.S ++++ b/arch/mips/kernel/genex.S +@@ -474,20 +474,20 @@ NESTED(nmi_handler, PT_SIZE, sp) + .endm + + .macro __build_clear_fpe ++ CLI ++ TRACE_IRQS_OFF + .set push + /* gas fails to assemble cfc1 for some archs (octeon).*/ \ + .set mips1 + SET_HARDFLOAT + cfc1 a1, fcr31 + .set pop +- CLI +- TRACE_IRQS_OFF + .endm + + .macro __build_clear_msa_fpe +- _cfcmsa a1, MSA_CSR + CLI + TRACE_IRQS_OFF ++ _cfcmsa a1, MSA_CSR + .endm + + .macro __build_clear_ade +diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c +index e5ea3db23d6b..a9eab83d9148 100644 +--- a/arch/mips/kernel/mips-cm.c ++++ b/arch/mips/kernel/mips-cm.c +@@ -119,9 +119,9 @@ static char *cm2_causes[32] = { + "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07", + "0x08", "0x09", "0x0a", "0x0b", + "0x0c", "0x0d", "0x0e", "0x0f", +- "0x10", "0x11", "0x12", "0x13", +- "0x14", "0x15", "0x16", "INTVN_WR_ERR", +- "INTVN_RD_ERR", "0x19", "0x1a", "0x1b", ++ "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13", ++ "0x14", "0x15", "0x16", "0x17", ++ "0x18", "0x19", "0x1a", "0x1b", + "0x1c", "0x1d", "0x1e", "0x1f" + }; + +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c +index 5eec13b8d222..7b06e6ee6817 100644 +--- a/arch/mips/kernel/setup.c ++++ b/arch/mips/kernel/setup.c +@@ -653,7 +653,17 @@ static void __init arch_mem_init(char **cmdline_p) + crashk_res.end - crashk_res.start + 1); + #endif + device_tree_init(); ++ ++ /* ++ * In order to reduce the possibility of kernel panic when failed to ++ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate ++ * low memory as small as possible before plat_swiotlb_setup(), so ++ * make sparse_init() using top-down allocation. ++ */ ++ memblock_set_bottom_up(false); + sparse_init(); ++ memblock_set_bottom_up(true); ++ + plat_swiotlb_setup(); + + dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); +diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c +index 37e9413a393d..caa01457dce6 100644 +--- a/arch/mips/kernel/time.c ++++ b/arch/mips/kernel/time.c +@@ -18,12 +18,82 @@ + #include + #include + #include ++#include ++#include + + #include + #include + #include + #include + ++#ifdef CONFIG_CPU_FREQ ++ ++static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref); ++static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq); ++static unsigned long glb_lpj_ref; ++static unsigned long glb_lpj_ref_freq; ++ ++static int cpufreq_callback(struct notifier_block *nb, ++ unsigned long val, void *data) ++{ ++ struct cpufreq_freqs *freq = data; ++ struct cpumask *cpus = freq->policy->cpus; ++ unsigned long lpj; ++ int cpu; ++ ++ /* ++ * Skip lpj numbers adjustment if the CPU-freq transition is safe for ++ * the loops delay. (Is this possible?) ++ */ ++ if (freq->flags & CPUFREQ_CONST_LOOPS) ++ return NOTIFY_OK; ++ ++ /* Save the initial values of the lpjes for future scaling. */ ++ if (!glb_lpj_ref) { ++ glb_lpj_ref = boot_cpu_data.udelay_val; ++ glb_lpj_ref_freq = freq->old; ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(pcp_lpj_ref, cpu) = ++ cpu_data[cpu].udelay_val; ++ per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; ++ } ++ } ++ ++ /* ++ * Adjust global lpj variable and per-CPU udelay_val number in ++ * accordance with the new CPU frequency. ++ */ ++ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || ++ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { ++ loops_per_jiffy = cpufreq_scale(glb_lpj_ref, ++ glb_lpj_ref_freq, ++ freq->new); ++ ++ for_each_cpu(cpu, cpus) { ++ lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), ++ per_cpu(pcp_lpj_ref_freq, cpu), ++ freq->new); ++ cpu_data[cpu].udelay_val = (unsigned int)lpj; ++ } ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block cpufreq_notifier = { ++ .notifier_call = cpufreq_callback, ++}; ++ ++static int __init register_cpufreq_notifier(void) ++{ ++ return cpufreq_register_notifier(&cpufreq_notifier, ++ CPUFREQ_TRANSITION_NOTIFIER); ++} ++core_initcall(register_cpufreq_notifier); ++ ++#endif /* CONFIG_CPU_FREQ */ ++ + /* + * forward reference + */ +diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S +index 33ee0d18fb0a..eb9d7af93836 100644 +--- a/arch/mips/kernel/vmlinux.lds.S ++++ b/arch/mips/kernel/vmlinux.lds.S +@@ -50,7 +50,7 @@ SECTIONS + /* . = 0xa800000000300000; */ + . = 0xffffffff80300000; + #endif +- . = VMLINUX_LOAD_ADDRESS; ++ . = LINKER_LOAD_ADDRESS; + /* read-only */ + _text = .; /* Text and read-only data */ + .text : { +diff --git a/arch/mips/tools/elf-entry.c b/arch/mips/tools/elf-entry.c +index adde79ce7fc0..dbd14ff05b4c 100644 +--- a/arch/mips/tools/elf-entry.c ++++ b/arch/mips/tools/elf-entry.c +@@ -51,11 +51,14 @@ int main(int argc, const char *argv[]) + nread = fread(&hdr, 1, sizeof(hdr), file); + if (nread != sizeof(hdr)) { + perror("Unable to read input file"); ++ fclose(file); + return EXIT_FAILURE; + } + +- if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) ++ if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) { ++ fclose(file); + die("Input is not an ELF\n"); ++ } + + switch (hdr.ehdr32.e_ident[EI_CLASS]) { + case ELFCLASS32: +@@ -67,6 +70,7 @@ int main(int argc, const char *argv[]) + entry = be32toh(hdr.ehdr32.e_entry); + break; + default: ++ fclose(file); + die("Invalid ELF encoding\n"); + } + +@@ -83,14 +87,17 @@ int main(int argc, const char *argv[]) + entry = be64toh(hdr.ehdr64.e_entry); + break; + default: ++ fclose(file); + die("Invalid ELF encoding\n"); + } + break; + + default: ++ fclose(file); + die("Invalid ELF class\n"); + } + + printf("0x%016" PRIx64 "\n", entry); ++ fclose(file); + return EXIT_SUCCESS; + } +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index 3dc5aecdd853..44431dc06982 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -171,7 +171,7 @@ config PPC + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU + select HAVE_ARCH_JUMP_LABEL +- select HAVE_ARCH_KASAN if PPC32 ++ select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 + select HAVE_ARCH_KGDB + select HAVE_ARCH_MMAP_RND_BITS + select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT +diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h +index 1a2c80e8be84..6eb311eb818b 100644 +--- a/arch/powerpc/include/asm/book3s/32/kup.h ++++ b/arch/powerpc/include/asm/book3s/32/kup.h +@@ -2,6 +2,7 @@ + #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H + #define _ASM_POWERPC_BOOK3S_32_KUP_H + ++#include + #include + + #ifdef __ASSEMBLY__ +@@ -75,7 +76,7 @@ + + .macro kuap_check current, gpr + #ifdef CONFIG_PPC_KUAP_DEBUG +- lwz \gpr, KUAP(thread) ++ lwz \gpr, THREAD + KUAP(\current) + 999: twnei \gpr, 0 + EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) + #endif +diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h +index c814a2b55389..8d61c8f3fec4 100644 +--- a/arch/powerpc/include/asm/fadump-internal.h ++++ b/arch/powerpc/include/asm/fadump-internal.h +@@ -64,12 +64,14 @@ struct fadump_memory_range { + }; + + /* fadump memory ranges info */ ++#define RNG_NAME_SZ 16 + struct fadump_mrange_info { +- char name[16]; ++ char name[RNG_NAME_SZ]; + struct fadump_memory_range *mem_ranges; + u32 mem_ranges_sz; + u32 mem_range_cnt; + u32 max_mem_ranges; ++ bool is_static; + }; + + /* Platform specific callback functions */ +diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h +index 296e51c2f066..b68eeff77806 100644 +--- a/arch/powerpc/include/asm/kasan.h ++++ b/arch/powerpc/include/asm/kasan.h +@@ -23,17 +23,13 @@ + + #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET) + +-#define KASAN_SHADOW_END 0UL +- +-#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START) ++#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT)) + + #ifdef CONFIG_KASAN + void kasan_early_init(void); +-void kasan_mmu_init(void); + void kasan_init(void); + #else + static inline void kasan_init(void) { } +-static inline void kasan_mmu_init(void) { } + #endif + + #endif /* __ASSEMBLY */ +diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c +index 05606025a131..3551f11accf0 100644 +--- a/arch/powerpc/kernel/dt_cpu_ftrs.c ++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c +@@ -346,6 +346,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f) + { + u64 lpcr; + ++ /* ++ * Linux relies on FSCR[DSCR] being clear, so that we can take the ++ * facility unavailable interrupt and track the task's usage of DSCR. ++ * See facility_unavailable_exception(). ++ * Clear the bit here so that feat_enable() doesn't set it. ++ */ ++ f->fscr_bit_nr = -1; ++ + feat_enable(f); + + lpcr = mfspr(SPRN_LPCR); +diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c +index ed59855430b9..9b522152d8f0 100644 +--- a/arch/powerpc/kernel/fadump.c ++++ b/arch/powerpc/kernel/fadump.c +@@ -38,8 +38,17 @@ static void __init fadump_reserve_crash_area(u64 base); + + #ifndef CONFIG_PRESERVE_FA_DUMP + static DEFINE_MUTEX(fadump_mutex); +-struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 }; +-struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 }; ++struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false }; ++ ++#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */ ++#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \ ++ sizeof(struct fadump_memory_range)) ++static struct fadump_memory_range rngs[RESERVED_RNGS_CNT]; ++struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs, ++ RESERVED_RNGS_SZ, 0, ++ RESERVED_RNGS_CNT, true }; ++ ++static void __init early_init_dt_scan_reserved_ranges(unsigned long node); + + #ifdef CONFIG_CMA + static struct cma *fadump_cma; +@@ -108,6 +117,11 @@ static int __init fadump_cma_init(void) { return 1; } + int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, + int depth, void *data) + { ++ if (depth == 0) { ++ early_init_dt_scan_reserved_ranges(node); ++ return 0; ++ } ++ + if (depth != 1) + return 0; + +@@ -429,10 +443,72 @@ static int __init fadump_get_boot_mem_regions(void) + return ret; + } + ++/* ++ * Returns true, if the given range overlaps with reserved memory ranges ++ * starting at idx. Also, updates idx to index of overlapping memory range ++ * with the given memory range. ++ * False, otherwise. ++ */ ++static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx) ++{ ++ bool ret = false; ++ int i; ++ ++ for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) { ++ u64 rbase = reserved_mrange_info.mem_ranges[i].base; ++ u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size; ++ ++ if (end <= rbase) ++ break; ++ ++ if ((end > rbase) && (base < rend)) { ++ *idx = i; ++ ret = true; ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++/* ++ * Locate a suitable memory area to reserve memory for FADump. While at it, ++ * lookup reserved-ranges & avoid overlap with them, as they are used by F/W. ++ */ ++static u64 __init fadump_locate_reserve_mem(u64 base, u64 size) ++{ ++ struct fadump_memory_range *mrngs; ++ phys_addr_t mstart, mend; ++ int idx = 0; ++ u64 i, ret = 0; ++ ++ mrngs = reserved_mrange_info.mem_ranges; ++ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, ++ &mstart, &mend, NULL) { ++ pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n", ++ i, mstart, mend, base); ++ ++ if (mstart > base) ++ base = PAGE_ALIGN(mstart); ++ ++ while ((mend > base) && ((mend - base) >= size)) { ++ if (!overlaps_reserved_ranges(base, base+size, &idx)) { ++ ret = base; ++ goto out; ++ } ++ ++ base = mrngs[idx].base + mrngs[idx].size; ++ base = PAGE_ALIGN(base); ++ } ++ } ++ ++out: ++ return ret; ++} ++ + int __init fadump_reserve_mem(void) + { +- u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE; +- bool is_memblock_bottom_up = memblock_bottom_up(); ++ u64 base, size, mem_boundary, bootmem_min; + int ret = 1; + + if (!fw_dump.fadump_enabled) +@@ -453,9 +529,9 @@ int __init fadump_reserve_mem(void) + PAGE_ALIGN(fadump_calculate_reserve_size()); + #ifdef CONFIG_CMA + if (!fw_dump.nocma) { +- align = FADUMP_CMA_ALIGNMENT; + fw_dump.boot_memory_size = +- ALIGN(fw_dump.boot_memory_size, align); ++ ALIGN(fw_dump.boot_memory_size, ++ FADUMP_CMA_ALIGNMENT); + } + #endif + +@@ -523,13 +599,9 @@ int __init fadump_reserve_mem(void) + * Reserve memory at an offset closer to bottom of the RAM to + * minimize the impact of memory hot-remove operation. + */ +- memblock_set_bottom_up(true); +- base = memblock_find_in_range(base, mem_boundary, size, align); ++ base = fadump_locate_reserve_mem(base, size); + +- /* Restore the previous allocation mode */ +- memblock_set_bottom_up(is_memblock_bottom_up); +- +- if (!base) { ++ if (!base || (base + size > mem_boundary)) { + pr_err("Failed to find memory chunk for reservation!\n"); + goto error_out; + } +@@ -726,10 +798,14 @@ void fadump_free_cpu_notes_buf(void) + + static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info) + { ++ if (mrange_info->is_static) { ++ mrange_info->mem_range_cnt = 0; ++ return; ++ } ++ + kfree(mrange_info->mem_ranges); +- mrange_info->mem_ranges = NULL; +- mrange_info->mem_ranges_sz = 0; +- mrange_info->max_mem_ranges = 0; ++ memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0, ++ (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ)); + } + + /* +@@ -786,6 +862,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, + if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) { + int ret; + ++ if (mrange_info->is_static) { ++ pr_err("Reached array size limit for %s memory ranges\n", ++ mrange_info->name); ++ return -ENOSPC; ++ } ++ + ret = fadump_alloc_mem_ranges(mrange_info); + if (ret) + return ret; +@@ -1202,20 +1284,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info) + * Scan reserved-ranges to consider them while reserving/releasing + * memory for FADump. + */ +-static inline int fadump_scan_reserved_mem_ranges(void) ++static void __init early_init_dt_scan_reserved_ranges(unsigned long node) + { +- struct device_node *root; + const __be32 *prop; + int len, ret = -1; + unsigned long i; + +- root = of_find_node_by_path("/"); +- if (!root) +- return ret; ++ /* reserved-ranges already scanned */ ++ if (reserved_mrange_info.mem_range_cnt != 0) ++ return; + +- prop = of_get_property(root, "reserved-ranges", &len); ++ prop = of_get_flat_dt_prop(node, "reserved-ranges", &len); + if (!prop) +- return ret; ++ return; + + /* + * Each reserved range is an (address,size) pair, 2 cells each, +@@ -1237,7 +1318,8 @@ static inline int fadump_scan_reserved_mem_ranges(void) + } + } + +- return ret; ++ /* Compact reserved ranges */ ++ sort_and_merge_mem_ranges(&reserved_mrange_info); + } + + /* +@@ -1251,32 +1333,21 @@ static void fadump_release_memory(u64 begin, u64 end) + u64 ra_start, ra_end, tstart; + int i, ret; + +- fadump_scan_reserved_mem_ranges(); +- + ra_start = fw_dump.reserve_dump_area_start; + ra_end = ra_start + fw_dump.reserve_dump_area_size; + + /* +- * Add reserved dump area to reserved ranges list +- * and exclude all these ranges while releasing memory. ++ * If reserved ranges array limit is hit, overwrite the last reserved ++ * memory range with reserved dump area to ensure it is excluded from ++ * the memory being released (reused for next FADump registration). + */ +- ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); +- if (ret != 0) { +- /* +- * Not enough memory to setup reserved ranges but the system is +- * running shortage of memory. So, release all the memory except +- * Reserved dump area (reused for next fadump registration). +- */ +- if (begin < ra_end && end > ra_start) { +- if (begin < ra_start) +- fadump_release_reserved_area(begin, ra_start); +- if (end > ra_end) +- fadump_release_reserved_area(ra_end, end); +- } else +- fadump_release_reserved_area(begin, end); ++ if (reserved_mrange_info.mem_range_cnt == ++ reserved_mrange_info.max_mem_ranges) ++ reserved_mrange_info.mem_range_cnt--; + ++ ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); ++ if (ret != 0) + return; +- } + + /* Get the reserved ranges list in order first. */ + sort_and_merge_mem_ranges(&reserved_mrange_info); +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c +index 6620f37abe73..e13e96e665e0 100644 +--- a/arch/powerpc/kernel/prom.c ++++ b/arch/powerpc/kernel/prom.c +@@ -685,6 +685,23 @@ static void __init tm_init(void) + static void tm_init(void) { } + #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + ++#ifdef CONFIG_PPC64 ++static void __init save_fscr_to_task(void) ++{ ++ /* ++ * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we ++ * have configured via the device tree features or via __init_FSCR(). ++ * That value will then be propagated to pid 1 (init) and all future ++ * processes. ++ */ ++ if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) ++ init_task.thread.fscr = mfspr(SPRN_FSCR); ++} ++#else ++static inline void save_fscr_to_task(void) {}; ++#endif ++ ++ + void __init early_init_devtree(void *params) + { + phys_addr_t limit; +@@ -773,6 +790,8 @@ void __init early_init_devtree(void *params) + BUG(); + } + ++ save_fscr_to_task(); ++ + #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) + /* We'll later wait for secondaries to check in; there are + * NCPUS-1 non-boot CPUs :-) +diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c +index b04896a88d79..68f7446193d1 100644 +--- a/arch/powerpc/mm/init_32.c ++++ b/arch/powerpc/mm/init_32.c +@@ -175,8 +175,6 @@ void __init MMU_init(void) + btext_unmap(); + #endif + +- kasan_mmu_init(); +- + setup_kup(); + + /* Shortly after that, the entire linear mapping will be available */ +diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c +index 1cfe57b51d7e..b01d4b72eccf 100644 +--- a/arch/powerpc/mm/kasan/kasan_init_32.c ++++ b/arch/powerpc/mm/kasan/kasan_init_32.c +@@ -129,7 +129,7 @@ static void __init kasan_remap_early_shadow_ro(void) + flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END); + } + +-void __init kasan_mmu_init(void) ++static void __init kasan_mmu_init(void) + { + int ret; + struct memblock_region *reg; +@@ -156,6 +156,8 @@ void __init kasan_mmu_init(void) + + void __init kasan_init(void) + { ++ kasan_mmu_init(); ++ + kasan_remap_early_shadow_ro(); + + clear_page(kasan_early_shadow_page); +diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c +index 784cae9f5697..da9f722d9f16 100644 +--- a/arch/powerpc/mm/pgtable_32.c ++++ b/arch/powerpc/mm/pgtable_32.c +@@ -207,7 +207,7 @@ void mark_initmem_nx(void) + unsigned long numpages = PFN_UP((unsigned long)_einittext) - + PFN_DOWN((unsigned long)_sinittext); + +- if (v_block_mapped((unsigned long)_stext + 1)) ++ if (v_block_mapped((unsigned long)_sinittext)) + mmu_mark_initmem_nx(); + else + change_page_attr(page, numpages, PAGE_KERNEL); +@@ -219,7 +219,7 @@ void mark_rodata_ro(void) + struct page *page; + unsigned long numpages; + +- if (v_block_mapped((unsigned long)_sinittext)) { ++ if (v_block_mapped((unsigned long)_stext + 1)) { + mmu_mark_rodata_ro(); + ptdump_check_wx(); + return; +diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c +index c0f950a3f4e1..f4a4dfb191e7 100644 +--- a/arch/powerpc/platforms/cell/spufs/file.c ++++ b/arch/powerpc/platforms/cell/spufs/file.c +@@ -1978,8 +1978,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, + static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { +- int ret; + struct spu_context *ctx = file->private_data; ++ u32 stat, data; ++ int ret; + + if (!access_ok(buf, len)) + return -EFAULT; +@@ -1988,11 +1989,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_mbox_info_read(ctx, buf, len, pos); ++ stat = ctx->csa.prob.mb_stat_R; ++ data = ctx->csa.prob.pu_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ /* EOF if there's no entry in the mbox */ ++ if (!(stat & 0x0000ff)) ++ return 0; ++ ++ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); + } + + static const struct file_operations spufs_mbox_info_fops = { +@@ -2019,6 +2025,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ u32 stat, data; + int ret; + + if (!access_ok(buf, len)) +@@ -2028,11 +2035,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_ibox_info_read(ctx, buf, len, pos); ++ stat = ctx->csa.prob.mb_stat_R; ++ data = ctx->csa.priv2.puint_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ /* EOF if there's no entry in the ibox */ ++ if (!(stat & 0xff0000)) ++ return 0; ++ ++ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); + } + + static const struct file_operations spufs_ibox_info_fops = { +@@ -2041,6 +2053,11 @@ static const struct file_operations spufs_ibox_info_fops = { + .llseek = generic_file_llseek, + }; + ++static size_t spufs_wbox_info_cnt(struct spu_context *ctx) ++{ ++ return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); ++} ++ + static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) + { +@@ -2049,7 +2066,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, + u32 wbox_stat; + + wbox_stat = ctx->csa.prob.mb_stat_R; +- cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); ++ cnt = spufs_wbox_info_cnt(ctx); + for (i = 0; i < cnt; i++) { + data[i] = ctx->csa.spu_mailbox_data[i]; + } +@@ -2062,7 +2079,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; +- int ret; ++ u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; ++ int ret, count; + + if (!access_ok(buf, len)) + return -EFAULT; +@@ -2071,11 +2089,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_wbox_info_read(ctx, buf, len, pos); ++ count = spufs_wbox_info_cnt(ctx); ++ memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data)); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &data, ++ count * sizeof(u32)); + } + + static const struct file_operations spufs_wbox_info_fops = { +@@ -2084,27 +2104,33 @@ static const struct file_operations spufs_wbox_info_fops = { + .llseek = generic_file_llseek, + }; + +-static ssize_t __spufs_dma_info_read(struct spu_context *ctx, +- char __user *buf, size_t len, loff_t *pos) ++static void spufs_get_dma_info(struct spu_context *ctx, ++ struct spu_dma_info *info) + { +- struct spu_dma_info info; +- struct mfc_cq_sr *qp, *spuqp; + int i; + +- info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; +- info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; +- info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; +- info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; +- info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; ++ info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; ++ info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; ++ info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; ++ info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; ++ info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; + for (i = 0; i < 16; i++) { +- qp = &info.dma_info_command_data[i]; +- spuqp = &ctx->csa.priv2.spuq[i]; ++ struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; ++ struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; + + qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; + qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; + qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; + qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; + } ++} ++ ++static ssize_t __spufs_dma_info_read(struct spu_context *ctx, ++ char __user *buf, size_t len, loff_t *pos) ++{ ++ struct spu_dma_info info; ++ ++ spufs_get_dma_info(ctx, &info); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof info); +@@ -2114,6 +2140,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ struct spu_dma_info info; + int ret; + + if (!access_ok(buf, len)) +@@ -2123,11 +2150,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_dma_info_read(ctx, buf, len, pos); ++ spufs_get_dma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &info, ++ sizeof(info)); + } + + static const struct file_operations spufs_dma_info_fops = { +@@ -2136,13 +2164,31 @@ static const struct file_operations spufs_dma_info_fops = { + .llseek = no_llseek, + }; + ++static void spufs_get_proxydma_info(struct spu_context *ctx, ++ struct spu_proxydma_info *info) ++{ ++ int i; ++ ++ info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; ++ info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; ++ info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; ++ ++ for (i = 0; i < 8; i++) { ++ struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; ++ struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; ++ ++ qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; ++ qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; ++ qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; ++ qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; ++ } ++} ++ + static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) + { + struct spu_proxydma_info info; +- struct mfc_cq_sr *qp, *puqp; + int ret = sizeof info; +- int i; + + if (len < ret) + return -EINVAL; +@@ -2150,18 +2196,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, + if (!access_ok(buf, len)) + return -EFAULT; + +- info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; +- info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; +- info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; +- for (i = 0; i < 8; i++) { +- qp = &info.proxydma_info_command_data[i]; +- puqp = &ctx->csa.priv2.puq[i]; +- +- qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; +- qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; +- qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; +- qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; +- } ++ spufs_get_proxydma_info(ctx, &info); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof info); +@@ -2171,17 +2206,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ struct spu_proxydma_info info; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_proxydma_info_read(ctx, buf, len, pos); ++ spufs_get_proxydma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &info, ++ sizeof(info)); + } + + static const struct file_operations spufs_proxydma_info_fops = { +diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c +index 13e251699346..b2ba3e95bda7 100644 +--- a/arch/powerpc/platforms/powernv/smp.c ++++ b/arch/powerpc/platforms/powernv/smp.c +@@ -167,7 +167,6 @@ static void pnv_smp_cpu_kill_self(void) + /* Standard hot unplug procedure */ + + idle_task_exit(); +- current->active_mm = NULL; /* for sanity */ + cpu = smp_processor_id(); + DBG("CPU%d offline\n", cpu); + generic_set_cpu_dead(cpu); +diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c +index 16b50afe7b52..60f7205ebe40 100644 +--- a/arch/sparc/kernel/ptrace_32.c ++++ b/arch/sparc/kernel/ptrace_32.c +@@ -46,82 +46,79 @@ enum sparc_regset { + REGSET_FP, + }; + ++static int regwindow32_get(struct task_struct *target, ++ const struct pt_regs *regs, ++ u32 *uregs) ++{ ++ unsigned long reg_window = regs->u_regs[UREG_I6]; ++ int size = 16 * sizeof(u32); ++ ++ if (target == current) { ++ if (copy_from_user(uregs, (void __user *)reg_window, size)) ++ return -EFAULT; ++ } else { ++ if (access_process_vm(target, reg_window, uregs, size, ++ FOLL_FORCE) != size) ++ return -EFAULT; ++ } ++ return 0; ++} ++ ++static int regwindow32_set(struct task_struct *target, ++ const struct pt_regs *regs, ++ u32 *uregs) ++{ ++ unsigned long reg_window = regs->u_regs[UREG_I6]; ++ int size = 16 * sizeof(u32); ++ ++ if (target == current) { ++ if (copy_to_user((void __user *)reg_window, uregs, size)) ++ return -EFAULT; ++ } else { ++ if (access_process_vm(target, reg_window, uregs, size, ++ FOLL_FORCE | FOLL_WRITE) != size) ++ return -EFAULT; ++ } ++ return 0; ++} ++ + static int genregs32_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) + { + const struct pt_regs *regs = target->thread.kregs; +- unsigned long __user *reg_window; +- unsigned long *k = kbuf; +- unsigned long __user *u = ubuf; +- unsigned long reg; ++ u32 uregs[16]; ++ int ret; + + if (target == current) + flush_user_windows(); + +- pos /= sizeof(reg); +- count /= sizeof(reg); +- +- if (kbuf) { +- for (; count > 0 && pos < 16; count--) +- *k++ = regs->u_regs[pos++]; +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(*k++, ®_window[pos++])) +- return -EFAULT; +- } +- } else { +- for (; count > 0 && pos < 16; count--) { +- if (put_user(regs->u_regs[pos++], u++)) +- return -EFAULT; +- } +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(reg, ®_window[pos++]) || +- put_user(reg, u++)) +- return -EFAULT; +- } +- } +- while (count > 0) { +- switch (pos) { +- case 32: /* PSR */ +- reg = regs->psr; +- break; +- case 33: /* PC */ +- reg = regs->pc; +- break; +- case 34: /* NPC */ +- reg = regs->npc; +- break; +- case 35: /* Y */ +- reg = regs->y; +- break; +- case 36: /* WIM */ +- case 37: /* TBR */ +- reg = 0; +- break; +- default: +- goto finish; +- } ++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ regs->u_regs, ++ 0, 16 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + +- if (kbuf) +- *k++ = reg; +- else if (put_user(reg, u++)) ++ if (pos < 32 * sizeof(u32)) { ++ if (regwindow32_get(target, regs, uregs)) + return -EFAULT; +- pos++; +- count--; ++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 16 * sizeof(u32), 32 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + } +-finish: +- pos *= sizeof(reg); +- count *= sizeof(reg); + +- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, +- 38 * sizeof(reg), -1); ++ uregs[0] = regs->psr; ++ uregs[1] = regs->pc; ++ uregs[2] = regs->npc; ++ uregs[3] = regs->y; ++ uregs[4] = 0; /* WIM */ ++ uregs[5] = 0; /* TBR */ ++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 32 * sizeof(u32), 38 * sizeof(u32)); + } + + static int genregs32_set(struct task_struct *target, +@@ -130,82 +127,53 @@ static int genregs32_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + struct pt_regs *regs = target->thread.kregs; +- unsigned long __user *reg_window; +- const unsigned long *k = kbuf; +- const unsigned long __user *u = ubuf; +- unsigned long reg; ++ u32 uregs[16]; ++ u32 psr; ++ int ret; + + if (target == current) + flush_user_windows(); + +- pos /= sizeof(reg); +- count /= sizeof(reg); +- +- if (kbuf) { +- for (; count > 0 && pos < 16; count--) +- regs->u_regs[pos++] = *k++; +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (put_user(*k++, ®_window[pos++])) +- return -EFAULT; +- } +- } else { +- for (; count > 0 && pos < 16; count--) { +- if (get_user(reg, u++)) +- return -EFAULT; +- regs->u_regs[pos++] = reg; +- } +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(reg, u++) || +- put_user(reg, ®_window[pos++])) +- return -EFAULT; +- } +- } +- while (count > 0) { +- unsigned long psr; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ regs->u_regs, ++ 0, 16 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + +- if (kbuf) +- reg = *k++; +- else if (get_user(reg, u++)) ++ if (pos < 32 * sizeof(u32)) { ++ if (regwindow32_get(target, regs, uregs)) + return -EFAULT; +- +- switch (pos) { +- case 32: /* PSR */ +- psr = regs->psr; +- psr &= ~(PSR_ICC | PSR_SYSCALL); +- psr |= (reg & (PSR_ICC | PSR_SYSCALL)); +- regs->psr = psr; +- break; +- case 33: /* PC */ +- regs->pc = reg; +- break; +- case 34: /* NPC */ +- regs->npc = reg; +- break; +- case 35: /* Y */ +- regs->y = reg; +- break; +- case 36: /* WIM */ +- case 37: /* TBR */ +- break; +- default: +- goto finish; +- } +- +- pos++; +- count--; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 16 * sizeof(u32), 32 * sizeof(u32)); ++ if (ret) ++ return ret; ++ if (regwindow32_set(target, regs, uregs)) ++ return -EFAULT; ++ if (!count) ++ return 0; + } +-finish: +- pos *= sizeof(reg); +- count *= sizeof(reg); +- ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ &psr, ++ 32 * sizeof(u32), 33 * sizeof(u32)); ++ if (ret) ++ return ret; ++ regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | ++ (psr & (PSR_ICC | PSR_SYSCALL)); ++ if (!count) ++ return 0; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ ®s->pc, ++ 33 * sizeof(u32), 34 * sizeof(u32)); ++ if (ret || !count) ++ return ret; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ ®s->y, ++ 34 * sizeof(u32), 35 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, +- 38 * sizeof(reg), -1); ++ 35 * sizeof(u32), 38 * sizeof(u32)); + } + + static int fpregs32_get(struct task_struct *target, +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c +index c9d41a96468f..3f5930bfab06 100644 +--- a/arch/sparc/kernel/ptrace_64.c ++++ b/arch/sparc/kernel/ptrace_64.c +@@ -572,19 +572,13 @@ static int genregs32_get(struct task_struct *target, + for (; count > 0 && pos < 32; count--) { + if (access_process_vm(target, + (unsigned long) +- ®_window[pos], ++ ®_window[pos++], + ®, sizeof(reg), + FOLL_FORCE) + != sizeof(reg)) + return -EFAULT; +- if (access_process_vm(target, +- (unsigned long) u, +- ®, sizeof(reg), +- FOLL_FORCE | FOLL_WRITE) +- != sizeof(reg)) ++ if (put_user(reg, u++)) + return -EFAULT; +- pos++; +- u++; + } + } + } +@@ -684,12 +678,7 @@ static int genregs32_set(struct task_struct *target, + } + } else { + for (; count > 0 && pos < 32; count--) { +- if (access_process_vm(target, +- (unsigned long) +- u, +- ®, sizeof(reg), +- FOLL_FORCE) +- != sizeof(reg)) ++ if (get_user(reg, u++)) + return -EFAULT; + if (access_process_vm(target, + (unsigned long) +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index 70ffce98c568..d7c0fcc1dbf9 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -49,16 +49,17 @@ + * Position Independent Executable (PIE) so that linker won't optimize + * R_386_GOT32X relocation to its fixed symbol address. Older + * linkers generate R_386_32 relocations against locally defined symbols, +- * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less ++ * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less + * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle + * R_386_32 relocations when relocating the kernel. To generate +- * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as ++ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as + * hidden: + */ + .hidden _bss + .hidden _ebss + .hidden _got + .hidden _egot ++ .hidden _end + + __HEAD + ENTRY(startup_32) +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index 07d2002da642..50c9eeb36f0d 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -42,6 +42,7 @@ + .hidden _ebss + .hidden _got + .hidden _egot ++ .hidden _end + + __HEAD + .code32 +diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h +index 27c47d183f4b..8b58d6975d5d 100644 +--- a/arch/x86/include/asm/smap.h ++++ b/arch/x86/include/asm/smap.h +@@ -57,8 +57,10 @@ static __always_inline unsigned long smap_save(void) + { + unsigned long flags; + +- asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC, +- X86_FEATURE_SMAP) ++ asm volatile ("# smap_save\n\t" ++ ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP) ++ "pushf; pop %0; " __ASM_CLAC "\n\t" ++ "1:" + : "=rm" (flags) : : "memory", "cc"); + + return flags; +@@ -66,7 +68,10 @@ static __always_inline unsigned long smap_save(void) + + static __always_inline void smap_restore(unsigned long flags) + { +- asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP) ++ asm volatile ("# smap_restore\n\t" ++ ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP) ++ "push %0; popf\n\t" ++ "1:" + : : "g" (flags) : "memory", "cc"); + } + +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c +index 251c795b4eb3..c4bc01da820e 100644 +--- a/arch/x86/kernel/amd_nb.c ++++ b/arch/x86/kernel/amd_nb.c +@@ -18,10 +18,13 @@ + #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 + #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 + #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 ++#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 + #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 + #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec + #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 ++#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c + #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 ++#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 + + /* Protect the PCI config register pairs used for SMN and DF indirect access. */ + static DEFINE_MUTEX(smn_mutex); +@@ -32,6 +35,7 @@ static const struct pci_device_id amd_root_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, + {} + }; + +@@ -50,8 +54,10 @@ const struct pci_device_id amd_nb_misc_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, + {} + }; + EXPORT_SYMBOL_GPL(amd_nb_misc_ids); +@@ -65,7 +71,9 @@ static const struct pci_device_id amd_nb_link_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, + {} + }; +diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c +index 12df3a4abfdd..6b32ab009c19 100644 +--- a/arch/x86/kernel/irq_64.c ++++ b/arch/x86/kernel/irq_64.c +@@ -43,7 +43,7 @@ static int map_irq_stack(unsigned int cpu) + pages[i] = pfn_to_page(pa >> PAGE_SHIFT); + } + +- va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); ++ va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); + if (!va) + return -ENOMEM; + +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index fd10d91a6115..af352e228fa2 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -121,8 +121,6 @@ __ref void *alloc_low_pages(unsigned int num) + } else { + pfn = pgt_buf_end; + pgt_buf_end += num; +- printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", +- pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); + } + + for (i = 0; i < num; i++) { +diff --git a/block/blk-iocost.c b/block/blk-iocost.c +index d083f7704082..4d2bda812d9b 100644 +--- a/block/blk-iocost.c ++++ b/block/blk-iocost.c +@@ -1546,19 +1546,39 @@ skip_surplus_transfers: + if (rq_wait_pct > RQ_WAIT_BUSY_PCT || + missed_ppm[READ] > ppm_rthr || + missed_ppm[WRITE] > ppm_wthr) { ++ /* clearly missing QoS targets, slow down vrate */ + ioc->busy_level = max(ioc->busy_level, 0); + ioc->busy_level++; + } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 && + missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 && + missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) { +- /* take action iff there is contention */ +- if (nr_shortages && !nr_lagging) { ++ /* QoS targets are being met with >25% margin */ ++ if (nr_shortages) { ++ /* ++ * We're throttling while the device has spare ++ * capacity. If vrate was being slowed down, stop. ++ */ + ioc->busy_level = min(ioc->busy_level, 0); +- /* redistribute surpluses first */ +- if (!nr_surpluses) ++ ++ /* ++ * If there are IOs spanning multiple periods, wait ++ * them out before pushing the device harder. If ++ * there are surpluses, let redistribution work it ++ * out first. ++ */ ++ if (!nr_lagging && !nr_surpluses) + ioc->busy_level--; ++ } else { ++ /* ++ * Nobody is being throttled and the users aren't ++ * issuing enough IOs to saturate the device. We ++ * simply don't know how close the device is to ++ * saturation. Coast. ++ */ ++ ioc->busy_level = 0; + } + } else { ++ /* inside the hysterisis margin, we're good */ + ioc->busy_level = 0; + } + +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 757c0fd9f0cc..0550366e25d8 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -2493,18 +2493,6 @@ static void blk_mq_map_swqueue(struct request_queue *q) + * If the cpu isn't present, the cpu is mapped to first hctx. + */ + for_each_possible_cpu(i) { +- hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i]; +- /* unmapped hw queue can be remapped after CPU topo changed */ +- if (!set->tags[hctx_idx] && +- !__blk_mq_alloc_rq_map(set, hctx_idx)) { +- /* +- * If tags initialization fail for some hctx, +- * that hctx won't be brought online. In this +- * case, remap the current ctx to hctx[0] which +- * is guaranteed to always have tags allocated +- */ +- set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0; +- } + + ctx = per_cpu_ptr(q->queue_ctx, i); + for (j = 0; j < set->nr_maps; j++) { +@@ -2513,6 +2501,18 @@ static void blk_mq_map_swqueue(struct request_queue *q) + HCTX_TYPE_DEFAULT, i); + continue; + } ++ hctx_idx = set->map[j].mq_map[i]; ++ /* unmapped hw queue can be remapped after CPU topo changed */ ++ if (!set->tags[hctx_idx] && ++ !__blk_mq_alloc_rq_map(set, hctx_idx)) { ++ /* ++ * If tags initialization fail for some hctx, ++ * that hctx won't be brought online. In this ++ * case, remap the current ctx to hctx[0] which ++ * is guaranteed to always have tags allocated ++ */ ++ set->map[j].mq_map[i] = 0; ++ } + + hctx = blk_mq_map_queue_type(q, j, i); + ctx->hctxs[j] = hctx; +@@ -3304,8 +3304,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, + + prev_nr_hw_queues = set->nr_hw_queues; + set->nr_hw_queues = nr_hw_queues; +- blk_mq_update_queue_map(set); + fallback: ++ blk_mq_update_queue_map(set); + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_realloc_hw_ctxs(set, q); + if (q->nr_hw_queues != set->nr_hw_queues) { +diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c +index 8438e33aa447..fd9028a6bc20 100644 +--- a/drivers/acpi/acpica/dsfield.c ++++ b/drivers/acpi/acpica/dsfield.c +@@ -518,13 +518,20 @@ acpi_ds_create_field(union acpi_parse_object *op, + info.region_node = region_node; + + status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); ++ if (ACPI_FAILURE(status)) { ++ return_ACPI_STATUS(status); ++ } ++ + if (info.region_node->object->region.space_id == +- ACPI_ADR_SPACE_PLATFORM_COMM +- && !(region_node->object->field.internal_pcc_buffer = +- ACPI_ALLOCATE_ZEROED(info.region_node->object->region. +- length))) { +- return_ACPI_STATUS(AE_NO_MEMORY); ++ ACPI_ADR_SPACE_PLATFORM_COMM) { ++ region_node->object->field.internal_pcc_buffer = ++ ACPI_ALLOCATE_ZEROED(info.region_node->object->region. ++ length); ++ if (!region_node->object->field.internal_pcc_buffer) { ++ return_ACPI_STATUS(AE_NO_MEMORY); ++ } + } ++ + return_ACPI_STATUS(status); + } + +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c +index 5a7551d060f2..bc95a5eebd13 100644 +--- a/drivers/acpi/arm64/iort.c ++++ b/drivers/acpi/arm64/iort.c +@@ -361,6 +361,7 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, + static int iort_get_id_mapping_index(struct acpi_iort_node *node) + { + struct acpi_iort_smmu_v3 *smmu; ++ struct acpi_iort_pmcg *pmcg; + + switch (node->type) { + case ACPI_IORT_NODE_SMMU_V3: +@@ -388,6 +389,10 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node) + + return smmu->id_mapping_index; + case ACPI_IORT_NODE_PMCG: ++ pmcg = (struct acpi_iort_pmcg *)node->node_data; ++ if (pmcg->overflow_gsiv || node->mapping_count == 0) ++ return -EINVAL; ++ + return 0; + default: + return -EINVAL; +diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c +index 6d7a522952bf..ccd900690b6f 100644 +--- a/drivers/acpi/evged.c ++++ b/drivers/acpi/evged.c +@@ -94,7 +94,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, + trigger = p->triggering; + } else { + gsi = pext->interrupts[0]; +- trigger = p->triggering; ++ trigger = pext->triggering; + } + + irq = r.start; +diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c +index f02a4bdc0ca7..dd29d687cd38 100644 +--- a/drivers/bluetooth/btbcm.c ++++ b/drivers/bluetooth/btbcm.c +@@ -329,6 +329,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { + { 0x410e, "BCM43341B0" }, /* 002.001.014 */ + { 0x4204, "BCM2076B1" }, /* 002.002.004 */ + { 0x4406, "BCM4324B3" }, /* 002.004.006 */ ++ { 0x4606, "BCM4324B5" }, /* 002.006.006 */ + { 0x6109, "BCM4335C0" }, /* 003.001.009 */ + { 0x610c, "BCM4354" }, /* 003.001.012 */ + { 0x2122, "BCM4343A0" }, /* 001.001.034 */ +@@ -343,6 +344,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { + }; + + static const struct bcm_subver_table bcm_usb_subver_table[] = { ++ { 0x2105, "BCM20703A1" }, /* 001.001.005 */ + { 0x210b, "BCM43142A0" }, /* 001.001.011 */ + { 0x2112, "BCM4314A0" }, /* 001.001.018 */ + { 0x2118, "BCM20702A0" }, /* 001.001.024 */ +diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c +index e11169ad8247..8a81fbca5c9d 100644 +--- a/drivers/bluetooth/btmtkuart.c ++++ b/drivers/bluetooth/btmtkuart.c +@@ -1015,7 +1015,7 @@ static int btmtkuart_probe(struct serdev_device *serdev) + if (btmtkuart_is_standalone(bdev)) { + err = clk_prepare_enable(bdev->osc); + if (err < 0) +- return err; ++ goto err_hci_free_dev; + + if (bdev->boot) { + gpiod_set_value_cansleep(bdev->boot, 1); +@@ -1028,10 +1028,8 @@ static int btmtkuart_probe(struct serdev_device *serdev) + + /* Power on */ + err = regulator_enable(bdev->vcc); +- if (err < 0) { +- clk_disable_unprepare(bdev->osc); +- return err; +- } ++ if (err < 0) ++ goto err_clk_disable_unprepare; + + /* Reset if the reset-gpios is available otherwise the board + * -level design should be guaranteed. +@@ -1063,7 +1061,6 @@ static int btmtkuart_probe(struct serdev_device *serdev) + err = hci_register_dev(hdev); + if (err < 0) { + dev_err(&serdev->dev, "Can't register HCI device\n"); +- hci_free_dev(hdev); + goto err_regulator_disable; + } + +@@ -1072,6 +1069,11 @@ static int btmtkuart_probe(struct serdev_device *serdev) + err_regulator_disable: + if (btmtkuart_is_standalone(bdev)) + regulator_disable(bdev->vcc); ++err_clk_disable_unprepare: ++ if (btmtkuart_is_standalone(bdev)) ++ clk_disable_unprepare(bdev->osc); ++err_hci_free_dev: ++ hci_free_dev(hdev); + + return err; + } +diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c +index 7646636f2d18..94ed734c1d7e 100644 +--- a/drivers/bluetooth/hci_bcm.c ++++ b/drivers/bluetooth/hci_bcm.c +@@ -107,6 +107,7 @@ struct bcm_device { + u32 oper_speed; + int irq; + bool irq_active_low; ++ bool irq_acquired; + + #ifdef CONFIG_PM + struct hci_uart *hu; +@@ -319,6 +320,8 @@ static int bcm_request_irq(struct bcm_data *bcm) + goto unlock; + } + ++ bdev->irq_acquired = true; ++ + device_init_wakeup(bdev->dev, true); + + pm_runtime_set_autosuspend_delay(bdev->dev, +@@ -487,7 +490,7 @@ static int bcm_close(struct hci_uart *hu) + } + + if (bdev) { +- if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) { ++ if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) { + devm_free_irq(bdev->dev, bdev->irq, bdev); + device_init_wakeup(bdev->dev, false); + pm_runtime_disable(bdev->dev); +diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c +index 76f9cd039195..14e127e9a740 100644 +--- a/drivers/clk/mediatek/clk-mux.c ++++ b/drivers/clk/mediatek/clk-mux.c +@@ -160,7 +160,7 @@ struct clk *mtk_clk_register_mux(const struct mtk_mux *mux, + spinlock_t *lock) + { + struct mtk_clk_mux *clk_mux; +- struct clk_init_data init; ++ struct clk_init_data init = {}; + struct clk *clk; + + clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL); +diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c +index 654766538f93..10ce69548f1b 100644 +--- a/drivers/clocksource/dw_apb_timer.c ++++ b/drivers/clocksource/dw_apb_timer.c +@@ -222,7 +222,8 @@ static int apbt_next_event(unsigned long delta, + /** + * dw_apb_clockevent_init() - use an APB timer as a clock_event_device + * +- * @cpu: The CPU the events will be targeted at. ++ * @cpu: The CPU the events will be targeted at or -1 if CPU affiliation ++ * isn't required. + * @name: The name used for the timer and the IRQ for it. + * @rating: The rating to give the timer. + * @base: I/O base for the timer registers. +@@ -257,7 +258,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, + dw_ced->ced.max_delta_ticks = 0x7fffffff; + dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); + dw_ced->ced.min_delta_ticks = 5000; +- dw_ced->ced.cpumask = cpumask_of(cpu); ++ dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu); + dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; + dw_ced->ced.set_state_shutdown = apbt_shutdown; +diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c +index 8c28b127759f..6921b91b61ef 100644 +--- a/drivers/clocksource/dw_apb_timer_of.c ++++ b/drivers/clocksource/dw_apb_timer_of.c +@@ -147,10 +147,6 @@ static int num_called; + static int __init dw_apb_timer_init(struct device_node *timer) + { + switch (num_called) { +- case 0: +- pr_debug("%s: found clockevent timer\n", __func__); +- add_clockevent(timer); +- break; + case 1: + pr_debug("%s: found clocksource timer\n", __func__); + add_clocksource(timer); +@@ -161,6 +157,8 @@ static int __init dw_apb_timer_init(struct device_node *timer) + #endif + break; + default: ++ pr_debug("%s: found clockevent timer\n", __func__); ++ add_clockevent(timer); + break; + } + +diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c +index 2bb2683b493c..f8747322b3c7 100644 +--- a/drivers/cpuidle/sysfs.c ++++ b/drivers/cpuidle/sysfs.c +@@ -480,7 +480,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) + ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, + &kdev->kobj, "state%d", i); + if (ret) { +- kfree(kobj); ++ kobject_put(&kobj->kobj); + goto error_state; + } + cpuidle_add_s2idle_attr_group(kobj); +@@ -611,7 +611,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) + ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, + &kdev->kobj, "driver"); + if (ret) { +- kfree(kdrv); ++ kobject_put(&kdrv->kobj); + return ret; + } + +@@ -705,7 +705,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) + error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj, + "cpuidle"); + if (error) { +- kfree(kdev); ++ kobject_put(&kdev->kobj); + return error; + } + +diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig +index 8fec733f567f..63e227adbb13 100644 +--- a/drivers/crypto/ccp/Kconfig ++++ b/drivers/crypto/ccp/Kconfig +@@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD + config CRYPTO_DEV_SP_CCP + bool "Cryptographic Coprocessor device" + default y +- depends on CRYPTO_DEV_CCP_DD ++ depends on CRYPTO_DEV_CCP_DD && DMADEVICES + select HW_RANDOM + select DMA_ENGINE +- select DMADEVICES + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help +diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c +index 01dd418bdadc..fe2eadc0ce83 100644 +--- a/drivers/crypto/chelsio/chcr_algo.c ++++ b/drivers/crypto/chelsio/chcr_algo.c +@@ -2818,7 +2818,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, + unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; + unsigned int c_id = a_ctx(tfm)->tx_chan_id; + unsigned int ccm_xtra; +- unsigned char tag_offset = 0, auth_offset = 0; ++ unsigned int tag_offset = 0, auth_offset = 0; + unsigned int assoclen; + + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) +diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c +index 9e11c3480353..e68b856d03b6 100644 +--- a/drivers/crypto/stm32/stm32-crc32.c ++++ b/drivers/crypto/stm32/stm32-crc32.c +@@ -28,8 +28,10 @@ + + /* Registers values */ + #define CRC_CR_RESET BIT(0) +-#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5)) +-#define CRC_INIT_DEFAULT 0xFFFFFFFF ++#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5)) ++#define CRC_CR_REV_IN_BYTE BIT(5) ++#define CRC_CR_REV_OUT BIT(7) ++#define CRC32C_INIT_DEFAULT 0xFFFFFFFF + + #define CRC_AUTOSUSPEND_DELAY 50 + +@@ -38,8 +40,6 @@ struct stm32_crc { + struct device *dev; + void __iomem *regs; + struct clk *clk; +- u8 pending_data[sizeof(u32)]; +- size_t nb_pending_bytes; + }; + + struct stm32_crc_list { +@@ -59,14 +59,13 @@ struct stm32_crc_ctx { + + struct stm32_crc_desc_ctx { + u32 partial; /* crc32c: partial in first 4 bytes of that struct */ +- struct stm32_crc *crc; + }; + + static int stm32_crc32_cra_init(struct crypto_tfm *tfm) + { + struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); + +- mctx->key = CRC_INIT_DEFAULT; ++ mctx->key = 0; + mctx->poly = CRC32_POLY_LE; + return 0; + } +@@ -75,7 +74,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm) + { + struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); + +- mctx->key = CRC_INIT_DEFAULT; ++ mctx->key = CRC32C_INIT_DEFAULT; + mctx->poly = CRC32C_POLY_LE; + return 0; + } +@@ -94,32 +93,42 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, + return 0; + } + ++static struct stm32_crc *stm32_crc_get_next_crc(void) ++{ ++ struct stm32_crc *crc; ++ ++ spin_lock_bh(&crc_list.lock); ++ crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); ++ if (crc) ++ list_move_tail(&crc->list, &crc_list.dev_list); ++ spin_unlock_bh(&crc_list.lock); ++ ++ return crc; ++} ++ + static int stm32_crc_init(struct shash_desc *desc) + { + struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct stm32_crc *crc; + +- spin_lock_bh(&crc_list.lock); +- list_for_each_entry(crc, &crc_list.dev_list, list) { +- ctx->crc = crc; +- break; +- } +- spin_unlock_bh(&crc_list.lock); ++ crc = stm32_crc_get_next_crc(); ++ if (!crc) ++ return -ENODEV; + +- pm_runtime_get_sync(ctx->crc->dev); ++ pm_runtime_get_sync(crc->dev); + + /* Reset, set key, poly and configure in bit reverse mode */ +- writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); +- writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); +- writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); ++ writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); ++ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); ++ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, ++ crc->regs + CRC_CR); + + /* Store partial result */ +- ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR); +- ctx->crc->nb_pending_bytes = 0; ++ ctx->partial = readl_relaxed(crc->regs + CRC_DR); + +- pm_runtime_mark_last_busy(ctx->crc->dev); +- pm_runtime_put_autosuspend(ctx->crc->dev); ++ pm_runtime_mark_last_busy(crc->dev); ++ pm_runtime_put_autosuspend(crc->dev); + + return 0; + } +@@ -128,31 +137,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, + unsigned int length) + { + struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); +- struct stm32_crc *crc = ctx->crc; +- u32 *d32; +- unsigned int i; ++ struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); ++ struct stm32_crc *crc; ++ ++ crc = stm32_crc_get_next_crc(); ++ if (!crc) ++ return -ENODEV; + + pm_runtime_get_sync(crc->dev); + +- if (unlikely(crc->nb_pending_bytes)) { +- while (crc->nb_pending_bytes != sizeof(u32) && length) { +- /* Fill in pending data */ +- crc->pending_data[crc->nb_pending_bytes++] = *(d8++); ++ /* ++ * Restore previously calculated CRC for this context as init value ++ * Restore polynomial configuration ++ * Configure in register for word input data, ++ * Configure out register in reversed bit mode data. ++ */ ++ writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); ++ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); ++ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, ++ crc->regs + CRC_CR); ++ ++ if (d8 != PTR_ALIGN(d8, sizeof(u32))) { ++ /* Configure for byte data */ ++ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, ++ crc->regs + CRC_CR); ++ while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) { ++ writeb_relaxed(*d8++, crc->regs + CRC_DR); + length--; + } +- +- if (crc->nb_pending_bytes == sizeof(u32)) { +- /* Process completed pending data */ +- writel_relaxed(*(u32 *)crc->pending_data, +- crc->regs + CRC_DR); +- crc->nb_pending_bytes = 0; +- } ++ /* Configure for word data */ ++ writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, ++ crc->regs + CRC_CR); + } + +- d32 = (u32 *)d8; +- for (i = 0; i < length >> 2; i++) +- /* Process 32 bits data */ +- writel_relaxed(*(d32++), crc->regs + CRC_DR); ++ for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32)) ++ writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR); ++ ++ if (length) { ++ /* Configure for byte data */ ++ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, ++ crc->regs + CRC_CR); ++ while (length--) ++ writeb_relaxed(*d8++, crc->regs + CRC_DR); ++ } + + /* Store partial result */ + ctx->partial = readl_relaxed(crc->regs + CRC_DR); +@@ -160,22 +187,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, + pm_runtime_mark_last_busy(crc->dev); + pm_runtime_put_autosuspend(crc->dev); + +- /* Check for pending data (non 32 bits) */ +- length &= 3; +- if (likely(!length)) +- return 0; +- +- if ((crc->nb_pending_bytes + length) >= sizeof(u32)) { +- /* Shall not happen */ +- dev_err(crc->dev, "Pending data overflow\n"); +- return -EINVAL; +- } +- +- d8 = (const u8 *)d32; +- for (i = 0; i < length; i++) +- /* Store pending data */ +- crc->pending_data[crc->nb_pending_bytes++] = *(d8++); +- + return 0; + } + +@@ -204,6 +215,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data, + return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out); + } + ++static unsigned int refcnt; ++static DEFINE_MUTEX(refcnt_lock); + static struct shash_alg algs[] = { + /* CRC-32 */ + { +@@ -294,12 +307,18 @@ static int stm32_crc_probe(struct platform_device *pdev) + list_add(&crc->list, &crc_list.dev_list); + spin_unlock(&crc_list.lock); + +- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); +- if (ret) { +- dev_err(dev, "Failed to register\n"); +- clk_disable_unprepare(crc->clk); +- return ret; ++ mutex_lock(&refcnt_lock); ++ if (!refcnt) { ++ ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); ++ if (ret) { ++ mutex_unlock(&refcnt_lock); ++ dev_err(dev, "Failed to register\n"); ++ clk_disable_unprepare(crc->clk); ++ return ret; ++ } + } ++ refcnt++; ++ mutex_unlock(&refcnt_lock); + + dev_info(dev, "Initialized\n"); + +@@ -320,7 +339,10 @@ static int stm32_crc_remove(struct platform_device *pdev) + list_del(&crc->list); + spin_unlock(&crc_list.lock); + +- crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); ++ mutex_lock(&refcnt_lock); ++ if (!--refcnt) ++ crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); ++ mutex_unlock(&refcnt_lock); + + pm_runtime_disable(crc->dev); + pm_runtime_put_noidle(crc->dev); +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index cc5e56d752c8..ad7d2bce91cd 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -2317,6 +2317,15 @@ static struct amd64_family_type family_types[] = { + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, ++ [F17_M60H_CPUS] = { ++ .ctl_name = "F17h_M60h", ++ .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0, ++ .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6, ++ .ops = { ++ .early_channel_count = f17_early_channel_count, ++ .dbam_to_cs = f17_addr_mask_to_cs_size, ++ } ++ }, + [F17_M70H_CPUS] = { + .ctl_name = "F17h_M70h", + .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0, +@@ -3366,6 +3375,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) + fam_type = &family_types[F17_M30H_CPUS]; + pvt->ops = &family_types[F17_M30H_CPUS].ops; + break; ++ } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) { ++ fam_type = &family_types[F17_M60H_CPUS]; ++ pvt->ops = &family_types[F17_M60H_CPUS].ops; ++ break; + } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) { + fam_type = &family_types[F17_M70H_CPUS]; + pvt->ops = &family_types[F17_M70H_CPUS].ops; +diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h +index 8c3cda81e619..d280b91f97cb 100644 +--- a/drivers/edac/amd64_edac.h ++++ b/drivers/edac/amd64_edac.h +@@ -120,6 +120,8 @@ + #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee + #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490 + #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496 ++#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448 ++#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e + #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440 + #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 + +@@ -291,6 +293,7 @@ enum amd_families { + F17_CPUS, + F17_M10H_CPUS, + F17_M30H_CPUS, ++ F17_M60H_CPUS, + F17_M70H_CPUS, + NUM_FAMILIES, + }; +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile +index ee0661ddb25b..8c5b5529dbc0 100644 +--- a/drivers/firmware/efi/libstub/Makefile ++++ b/drivers/firmware/efi/libstub/Makefile +@@ -28,6 +28,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ + -D__NO_FORTIFY \ + $(call cc-option,-ffreestanding) \ + $(call cc-option,-fno-stack-protector) \ ++ $(call cc-option,-fno-addrsig) \ + -D__DISABLE_EXPORTS + + GCOV_PROFILE := n +diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c +index effed3a8d398..2ecb1d3e8eeb 100644 +--- a/drivers/gnss/sirf.c ++++ b/drivers/gnss/sirf.c +@@ -439,14 +439,18 @@ static int sirf_probe(struct serdev_device *serdev) + + data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff", + GPIOD_OUT_LOW); +- if (IS_ERR(data->on_off)) ++ if (IS_ERR(data->on_off)) { ++ ret = PTR_ERR(data->on_off); + goto err_put_device; ++ } + + if (data->on_off) { + data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup", + GPIOD_IN); +- if (IS_ERR(data->wakeup)) ++ if (IS_ERR(data->wakeup)) { ++ ret = PTR_ERR(data->wakeup); + goto err_put_device; ++ } + + ret = regulator_enable(data->vcc); + if (ret) +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +index 8ceb44925947..5fa5158d18ee 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +@@ -161,16 +161,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, + + struct amdgpu_bo_list_entry vm_pd; + struct list_head list, duplicates; ++ struct dma_fence *fence = NULL; + struct ttm_validate_buffer tv; + struct ww_acquire_ctx ticket; + struct amdgpu_bo_va *bo_va; +- int r; ++ long r; + + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); + + tv.bo = &bo->tbo; +- tv.num_shared = 1; ++ tv.num_shared = 2; + list_add(&tv.head, &list); + + amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); +@@ -178,28 +179,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, + r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false); + if (r) { + dev_err(adev->dev, "leaking bo va because " +- "we fail to reserve bo (%d)\n", r); ++ "we fail to reserve bo (%ld)\n", r); + return; + } + bo_va = amdgpu_vm_bo_find(vm, bo); +- if (bo_va && --bo_va->ref_count == 0) { +- amdgpu_vm_bo_rmv(adev, bo_va); +- +- if (amdgpu_vm_ready(vm)) { +- struct dma_fence *fence = NULL; ++ if (!bo_va || --bo_va->ref_count) ++ goto out_unlock; + +- r = amdgpu_vm_clear_freed(adev, vm, &fence); +- if (unlikely(r)) { +- dev_err(adev->dev, "failed to clear page " +- "tables on GEM object close (%d)\n", r); +- } ++ amdgpu_vm_bo_rmv(adev, bo_va); ++ if (!amdgpu_vm_ready(vm)) ++ goto out_unlock; + +- if (fence) { +- amdgpu_bo_fence(bo, fence, true); +- dma_fence_put(fence); +- } +- } ++ fence = dma_resv_get_excl(bo->tbo.base.resv); ++ if (fence) { ++ amdgpu_bo_fence(bo, fence, true); ++ fence = NULL; + } ++ ++ r = amdgpu_vm_clear_freed(adev, vm, &fence); ++ if (r || !fence) ++ goto out_unlock; ++ ++ amdgpu_bo_fence(bo, fence, true); ++ dma_fence_put(fence); ++ ++out_unlock: ++ if (unlikely(r < 0)) ++ dev_err(adev->dev, "failed to clear page " ++ "tables on GEM object close (%ld)\n", r); + ttm_eu_backoff_reservation(&ticket, &list); + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +index 51263b8d94b1..d1d2372ab7ca 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +@@ -370,6 +370,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, + if (current_level == level) + return count; + ++ if (adev->asic_type == CHIP_RAVEN) { ++ if (adev->rev_id < 8) { ++ if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL) ++ amdgpu_gfx_off_ctrl(adev, false); ++ else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL) ++ amdgpu_gfx_off_ctrl(adev, true); ++ } ++ } ++ + /* profile_exit setting is valid only when current mode is in profile mode */ + if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | +@@ -416,8 +425,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, + ret = smu_get_power_num_states(&adev->smu, &data); + if (ret) + return ret; +- } else if (adev->powerplay.pp_funcs->get_pp_num_states) ++ } else if (adev->powerplay.pp_funcs->get_pp_num_states) { + amdgpu_dpm_get_pp_num_states(adev, &data); ++ } else { ++ memset(&data, 0, sizeof(data)); ++ } + + buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); + for (i = 0; i < data.nums; i++) +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +index c7514f743409..6335bd4ae374 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +@@ -2867,10 +2867,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns + WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), + "CPU update of VM recommended only for large BAR system\n"); + +- if (vm->use_cpu_for_update) ++ if (vm->use_cpu_for_update) { ++ /* Sync with last SDMA update/clear before switching to CPU */ ++ r = amdgpu_bo_sync_wait(vm->root.base.bo, ++ AMDGPU_FENCE_OWNER_UNDEFINED, true); ++ if (r) ++ goto free_idr; ++ + vm->update_funcs = &amdgpu_vm_cpu_funcs; +- else ++ } else { + vm->update_funcs = &amdgpu_vm_sdma_funcs; ++ } + dma_fence_put(vm->last_update); + vm->last_update = NULL; + +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +index a428185be2c1..d05b3033b510 100644 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +@@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, + { + switch (fs) { + case 32000: +- *n = 4096; ++ case 48000: ++ case 96000: ++ case 192000: ++ *n = fs * 128 / 1000; + break; + case 44100: +- *n = 6272; +- break; +- case 48000: +- *n = 6144; ++ case 88200: ++ case 176400: ++ *n = fs * 128 / 900; + break; + } + +diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c +index 35bb825d1918..8c8c92fc82e9 100644 +--- a/drivers/gpu/drm/mcde/mcde_dsi.c ++++ b/drivers/gpu/drm/mcde/mcde_dsi.c +@@ -940,10 +940,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, + panel = NULL; + + bridge = of_drm_find_bridge(child); +- if (IS_ERR(bridge)) { +- dev_err(dev, "failed to find bridge (%ld)\n", +- PTR_ERR(bridge)); +- return PTR_ERR(bridge); ++ if (!bridge) { ++ dev_err(dev, "failed to find bridge\n"); ++ return -EINVAL; + } + } + } +diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c +index be6d95c5ff25..48de07e9059e 100644 +--- a/drivers/gpu/drm/mediatek/mtk_dpi.c ++++ b/drivers/gpu/drm/mediatek/mtk_dpi.c +@@ -10,7 +10,9 @@ + #include + #include + #include ++#include + #include ++#include + #include + #include + +@@ -73,6 +75,9 @@ struct mtk_dpi { + enum mtk_dpi_out_yc_map yc_map; + enum mtk_dpi_out_bit_num bit_num; + enum mtk_dpi_out_channel_swap channel_swap; ++ struct pinctrl *pinctrl; ++ struct pinctrl_state *pins_gpio; ++ struct pinctrl_state *pins_dpi; + int refcount; + }; + +@@ -378,6 +383,9 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi) + if (--dpi->refcount != 0) + return; + ++ if (dpi->pinctrl && dpi->pins_gpio) ++ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); ++ + mtk_dpi_disable(dpi); + clk_disable_unprepare(dpi->pixel_clk); + clk_disable_unprepare(dpi->engine_clk); +@@ -402,6 +410,9 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) + goto err_pixel; + } + ++ if (dpi->pinctrl && dpi->pins_dpi) ++ pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); ++ + mtk_dpi_enable(dpi); + return 0; + +@@ -689,6 +700,26 @@ static int mtk_dpi_probe(struct platform_device *pdev) + dpi->dev = dev; + dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev); + ++ dpi->pinctrl = devm_pinctrl_get(&pdev->dev); ++ if (IS_ERR(dpi->pinctrl)) { ++ dpi->pinctrl = NULL; ++ dev_dbg(&pdev->dev, "Cannot find pinctrl!\n"); ++ } ++ if (dpi->pinctrl) { ++ dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep"); ++ if (IS_ERR(dpi->pins_gpio)) { ++ dpi->pins_gpio = NULL; ++ dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n"); ++ } ++ if (dpi->pins_gpio) ++ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); ++ ++ dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default"); ++ if (IS_ERR(dpi->pins_dpi)) { ++ dpi->pins_dpi = NULL; ++ dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n"); ++ } ++ } + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dpi->regs = devm_ioremap_resource(dev, mem); + if (IS_ERR(dpi->regs)) { +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c +index c6430027169f..a0021fc25b27 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c +@@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) + + drm_plane_create_alpha_property(&plane->plane); + +- if (type == DRM_PLANE_TYPE_PRIMARY) +- continue; +- +- drm_object_attach_property(&plane->plane.base, +- rcdu->props.colorkey, +- RCAR_DU_COLORKEY_NONE); +- drm_plane_create_zpos_property(&plane->plane, 1, 1, 7); ++ if (type == DRM_PLANE_TYPE_PRIMARY) { ++ drm_plane_create_zpos_immutable_property(&plane->plane, ++ 0); ++ } else { ++ drm_object_attach_property(&plane->plane.base, ++ rcdu->props.colorkey, ++ RCAR_DU_COLORKEY_NONE); ++ drm_plane_create_zpos_property(&plane->plane, 1, 1, 7); ++ } + } + + return 0; +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +index 5e4faf258c31..f1a81c9b184d 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +@@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, + drm_plane_helper_add(&plane->plane, + &rcar_du_vsp_plane_helper_funcs); + +- if (type == DRM_PLANE_TYPE_PRIMARY) +- continue; +- +- drm_plane_create_alpha_property(&plane->plane); +- drm_plane_create_zpos_property(&plane->plane, 1, 1, +- vsp->num_planes - 1); ++ if (type == DRM_PLANE_TYPE_PRIMARY) { ++ drm_plane_create_zpos_immutable_property(&plane->plane, ++ 0); ++ } else { ++ drm_plane_create_alpha_property(&plane->plane); ++ drm_plane_create_zpos_property(&plane->plane, 1, 1, ++ vsp->num_planes - 1); ++ } + } + + return 0; +diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c +index 6e4c015783ff..c90d79096e8c 100644 +--- a/drivers/hv/connection.c ++++ b/drivers/hv/connection.c +@@ -67,7 +67,6 @@ static __u32 vmbus_get_next_version(__u32 current_version) + int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version) + { + int ret = 0; +- unsigned int cur_cpu; + struct vmbus_channel_initiate_contact *msg; + unsigned long flags; + +@@ -100,24 +99,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version) + + msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); + msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); +- /* +- * We want all channel messages to be delivered on CPU 0. +- * This has been the behavior pre-win8. This is not +- * perf issue and having all channel messages delivered on CPU 0 +- * would be ok. +- * For post win8 hosts, we support receiving channel messagges on +- * all the CPUs. This is needed for kexec to work correctly where +- * the CPU attempting to connect may not be CPU 0. +- */ +- if (version >= VERSION_WIN8_1) { +- cur_cpu = get_cpu(); +- msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu); +- vmbus_connection.connect_cpu = cur_cpu; +- put_cpu(); +- } else { +- msg->target_vcpu = 0; +- vmbus_connection.connect_cpu = 0; +- } ++ msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU); + + /* + * Add to list before we send the request since we may +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index fcc52797c169..d6320022af15 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -249,6 +249,13 @@ int hv_synic_cleanup(unsigned int cpu) + bool channel_found = false; + unsigned long flags; + ++ /* ++ * Hyper-V does not provide a way to change the connect CPU once ++ * it is set; we must prevent the connect CPU from going offline. ++ */ ++ if (cpu == VMBUS_CONNECT_CPU) ++ return -EBUSY; ++ + /* + * Search for channels which are bound to the CPU we're about to + * cleanup. In case we find one and vmbus is still connected we need to +diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h +index af9379a3bf89..cabcb66e7c5e 100644 +--- a/drivers/hv/hyperv_vmbus.h ++++ b/drivers/hv/hyperv_vmbus.h +@@ -212,12 +212,13 @@ enum vmbus_connect_state { + + #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT + +-struct vmbus_connection { +- /* +- * CPU on which the initial host contact was made. +- */ +- int connect_cpu; ++/* ++ * The CPU that Hyper-V will interrupt for VMBUS messages, such as ++ * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER. ++ */ ++#define VMBUS_CONNECT_CPU 0 + ++struct vmbus_connection { + u32 msg_conn_id; + + atomic_t offer_in_progress; +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index 9cdd434bb340..160ff640485b 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -1092,14 +1092,28 @@ void vmbus_on_msg_dpc(unsigned long data) + /* + * If we are handling the rescind message; + * schedule the work on the global work queue. ++ * ++ * The OFFER message and the RESCIND message should ++ * not be handled by the same serialized work queue, ++ * because the OFFER handler may call vmbus_open(), ++ * which tries to open the channel by sending an ++ * OPEN_CHANNEL message to the host and waits for ++ * the host's response; however, if the host has ++ * rescinded the channel before it receives the ++ * OPEN_CHANNEL message, the host just silently ++ * ignores the OPEN_CHANNEL message; as a result, ++ * the guest's OFFER handler hangs for ever, if we ++ * handle the RESCIND message in the same serialized ++ * work queue: the RESCIND handler can not start to ++ * run before the OFFER handler finishes. + */ +- schedule_work_on(vmbus_connection.connect_cpu, ++ schedule_work_on(VMBUS_CONNECT_CPU, + &ctx->work); + break; + + case CHANNELMSG_OFFERCHANNEL: + atomic_inc(&vmbus_connection.offer_in_progress); +- queue_work_on(vmbus_connection.connect_cpu, ++ queue_work_on(VMBUS_CONNECT_CPU, + vmbus_connection.work_queue, + &ctx->work); + break; +@@ -1146,7 +1160,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel) + + INIT_WORK(&ctx->work, vmbus_onmessage_work); + +- queue_work_on(vmbus_connection.connect_cpu, ++ queue_work_on(VMBUS_CONNECT_CPU, + vmbus_connection.work_queue, + &ctx->work); + } +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c +index 5c1dddde193c..f96fd8efb45a 100644 +--- a/drivers/hwmon/k10temp.c ++++ b/drivers/hwmon/k10temp.c +@@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = { + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, ++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + {} +diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c +index 4150301a89a5..e8377ce0a95a 100644 +--- a/drivers/macintosh/windfarm_pm112.c ++++ b/drivers/macintosh/windfarm_pm112.c +@@ -132,14 +132,6 @@ static int create_cpu_loop(int cpu) + s32 tmax; + int fmin; + +- /* Get PID params from the appropriate SAT */ +- hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); +- if (hdr == NULL) { +- printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); +- return -EINVAL; +- } +- piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; +- + /* Get FVT params to get Tmax; if not found, assume default */ + hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL); + if (hdr) { +@@ -152,6 +144,16 @@ static int create_cpu_loop(int cpu) + if (tmax < cpu_all_tmax) + cpu_all_tmax = tmax; + ++ kfree(hdr); ++ ++ /* Get PID params from the appropriate SAT */ ++ hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); ++ if (hdr == NULL) { ++ printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); ++ return -EINVAL; ++ } ++ piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; ++ + /* + * Darwin has a minimum fan speed of 1000 rpm for the 4-way and + * 515 for the 2-way. That appears to be overkill, so for now, +@@ -174,6 +176,9 @@ static int create_cpu_loop(int cpu) + pid.min = fmin; + + wf_cpu_pid_init(&cpu_pid[cpu], &pid); ++ ++ kfree(hdr); ++ + return 0; + } + +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c +index 658b0f4a01f5..68901745eb20 100644 +--- a/drivers/md/bcache/super.c ++++ b/drivers/md/bcache/super.c +@@ -789,7 +789,9 @@ static void bcache_device_free(struct bcache_device *d) + bcache_device_detach(d); + + if (disk) { +- if (disk->flags & GENHD_FL_UP) ++ bool disk_added = (disk->flags & GENHD_FL_UP) != 0; ++ ++ if (disk_added) + del_gendisk(disk); + + if (disk->queue) +@@ -797,7 +799,8 @@ static void bcache_device_free(struct bcache_device *d) + + ida_simple_remove(&bcache_device_idx, + first_minor_to_idx(disk->first_minor)); +- put_disk(disk); ++ if (disk_added) ++ put_disk(disk); + } + + bioset_exit(&d->bio_split); +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index 492bbe0584d9..ffbda729e26e 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -2957,7 +2957,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) + limits->max_segment_size = PAGE_SIZE; + + limits->logical_block_size = +- max_t(unsigned short, limits->logical_block_size, cc->sector_size); ++ max_t(unsigned, limits->logical_block_size, cc->sector_size); + limits->physical_block_size = + max_t(unsigned, limits->physical_block_size, cc->sector_size); + limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 6b69a12ca2d8..5a378a453a2d 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -7607,7 +7607,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) + */ + mddev_put(mddev); + /* Wait until bdev->bd_disk is definitely gone */ +- flush_workqueue(md_misc_wq); ++ if (work_pending(&mddev->del_work)) ++ flush_workqueue(md_misc_wq); + /* Then retry the open from the top */ + return -ERESTARTSYS; + } +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 36cd7c2fbf40..a3cbc9f4fec1 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2228,14 +2228,19 @@ static int grow_stripes(struct r5conf *conf, int num) + * of the P and Q blocks. + */ + static int scribble_alloc(struct raid5_percpu *percpu, +- int num, int cnt, gfp_t flags) ++ int num, int cnt) + { + size_t obj_size = + sizeof(struct page *) * (num+2) + + sizeof(addr_conv_t) * (num+2); + void *scribble; + +- scribble = kvmalloc_array(cnt, obj_size, flags); ++ /* ++ * If here is in raid array suspend context, it is in memalloc noio ++ * context as well, there is no potential recursive memory reclaim ++ * I/Os with the GFP_KERNEL flag. ++ */ ++ scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL); + if (!scribble) + return -ENOMEM; + +@@ -2267,8 +2272,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) + + percpu = per_cpu_ptr(conf->percpu, cpu); + err = scribble_alloc(percpu, new_disks, +- new_sectors / STRIPE_SECTORS, +- GFP_NOIO); ++ new_sectors / STRIPE_SECTORS); + if (err) + break; + } +@@ -6765,8 +6769,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu + conf->previous_raid_disks), + max(conf->chunk_sectors, + conf->prev_chunk_sectors) +- / STRIPE_SECTORS, +- GFP_KERNEL)) { ++ / STRIPE_SECTORS)) { + free_scratch_buffer(conf, percpu); + return -ENOMEM; + } +diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c +index b14c09cd9593..06383b26712b 100644 +--- a/drivers/media/cec/cec-adap.c ++++ b/drivers/media/cec/cec-adap.c +@@ -1732,6 +1732,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap, + unsigned j; + + log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID; ++ if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { ++ dprintk(1, "unknown logical address type\n"); ++ return -EINVAL; ++ } + if (type_mask & (1 << log_addrs->log_addr_type[i])) { + dprintk(1, "duplicate logical address type\n"); + return -EINVAL; +@@ -1752,10 +1756,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap, + dprintk(1, "invalid primary device type\n"); + return -EINVAL; + } +- if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { +- dprintk(1, "unknown logical address type\n"); +- return -EINVAL; +- } + for (j = 0; j < feature_sz; j++) { + if ((features[j] & 0x80) == 0) { + if (op_is_dev_features) +diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c +index 917fe034af37..032b6d7dd582 100644 +--- a/drivers/media/dvb-core/dvbdev.c ++++ b/drivers/media/dvb-core/dvbdev.c +@@ -707,9 +707,10 @@ int dvb_create_media_graph(struct dvb_adapter *adap, + } + + if (ntuner && ndemod) { +- pad_source = media_get_pad_index(tuner, true, ++ /* NOTE: first found tuner source pad presumed correct */ ++ pad_source = media_get_pad_index(tuner, false, + PAD_SIGNAL_ANALOG); +- if (pad_source) ++ if (pad_source < 0) + return -EINVAL; + ret = media_create_pad_links(mdev, + MEDIA_ENT_F_TUNER, +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c +index a398ea81e422..266e947572c1 100644 +--- a/drivers/media/i2c/ov5640.c ++++ b/drivers/media/i2c/ov5640.c +@@ -3068,8 +3068,8 @@ static int ov5640_probe(struct i2c_client *client) + free_ctrls: + v4l2_ctrl_handler_free(&sensor->ctrls.handler); + entity_cleanup: +- mutex_destroy(&sensor->lock); + media_entity_cleanup(&sensor->sd.entity); ++ mutex_destroy(&sensor->lock); + return ret; + } + +@@ -3079,9 +3079,9 @@ static int ov5640_remove(struct i2c_client *client) + struct ov5640_dev *sensor = to_ov5640_dev(sd); + + v4l2_async_unregister_subdev(&sensor->sd); +- mutex_destroy(&sensor->lock); + media_entity_cleanup(&sensor->sd.entity); + v4l2_ctrl_handler_free(&sensor->ctrls.handler); ++ mutex_destroy(&sensor->lock); + + return 0; + } +diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c +index 43c78620c9d8..5c6b00737fe7 100644 +--- a/drivers/media/platform/rcar-fcp.c ++++ b/drivers/media/platform/rcar-fcp.c +@@ -8,6 +8,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -21,6 +22,7 @@ + struct rcar_fcp_device { + struct list_head list; + struct device *dev; ++ struct device_dma_parameters dma_parms; + }; + + static LIST_HEAD(fcp_devices); +@@ -136,6 +138,9 @@ static int rcar_fcp_probe(struct platform_device *pdev) + + fcp->dev = &pdev->dev; + ++ fcp->dev->dma_parms = &fcp->dma_parms; ++ dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32)); ++ + pm_runtime_enable(&pdev->dev); + + mutex_lock(&fcp_lock); +diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c +index 82350097503e..84ec36156f73 100644 +--- a/drivers/media/platform/vicodec/vicodec-core.c ++++ b/drivers/media/platform/vicodec/vicodec-core.c +@@ -2172,16 +2172,19 @@ static int vicodec_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, dev); + +- if (register_instance(dev, &dev->stateful_enc, +- "stateful-encoder", true)) ++ ret = register_instance(dev, &dev->stateful_enc, "stateful-encoder", ++ true); ++ if (ret) + goto unreg_dev; + +- if (register_instance(dev, &dev->stateful_dec, +- "stateful-decoder", false)) ++ ret = register_instance(dev, &dev->stateful_dec, "stateful-decoder", ++ false); ++ if (ret) + goto unreg_sf_enc; + +- if (register_instance(dev, &dev->stateless_dec, +- "stateless-decoder", false)) ++ ret = register_instance(dev, &dev->stateless_dec, "stateless-decoder", ++ false); ++ if (ret) + goto unreg_sf_dec; + + #ifdef CONFIG_MEDIA_CONTROLLER +diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c +index e87040d6eca7..a39e1966816b 100644 +--- a/drivers/media/tuners/si2157.c ++++ b/drivers/media/tuners/si2157.c +@@ -75,24 +75,23 @@ static int si2157_init(struct dvb_frontend *fe) + struct si2157_cmd cmd; + const struct firmware *fw; + const char *fw_name; +- unsigned int uitmp, chip_id; ++ unsigned int chip_id, xtal_trim; + + dev_dbg(&client->dev, "\n"); + +- /* Returned IF frequency is garbage when firmware is not running */ +- memcpy(cmd.args, "\x15\x00\x06\x07", 4); ++ /* Try to get Xtal trim property, to verify tuner still running */ ++ memcpy(cmd.args, "\x15\x00\x04\x02", 4); + cmd.wlen = 4; + cmd.rlen = 4; + ret = si2157_cmd_execute(client, &cmd); +- if (ret) +- goto err; + +- uitmp = cmd.args[2] << 0 | cmd.args[3] << 8; +- dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp); ++ xtal_trim = cmd.args[2] | (cmd.args[3] << 8); + +- if (uitmp == dev->if_frequency / 1000) ++ if (ret == 0 && xtal_trim < 16) + goto warm; + ++ dev->if_frequency = 0; /* we no longer know current tuner state */ ++ + /* power up */ + if (dev->chiptype == SI2157_CHIPTYPE_SI2146) { + memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9); +diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c +index d4ea72bf09c5..5131c8d4c632 100644 +--- a/drivers/media/usb/dvb-usb/dibusb-mb.c ++++ b/drivers/media/usb/dvb-usb/dibusb-mb.c +@@ -81,7 +81,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap) + + if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) { + err("tuner i2c write failed."); +- ret = -EREMOTEIO; ++ return -EREMOTEIO; + } + + if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl) +diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c +index b05fa227ffb2..95756cbb722f 100644 +--- a/drivers/media/usb/go7007/snd-go7007.c ++++ b/drivers/media/usb/go7007/snd-go7007.c +@@ -236,22 +236,18 @@ int go7007_snd_init(struct go7007 *go) + gosnd->capturing = 0; + ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, + &gosnd->card); +- if (ret < 0) { +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_snd; ++ + ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, + &go7007_snd_device_ops); +- if (ret < 0) { +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; ++ + ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); +- if (ret < 0) { +- snd_card_free(gosnd->card); +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; ++ + strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); + strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->shortname)); + strscpy(gosnd->card->longname, gosnd->card->shortname, +@@ -262,11 +258,8 @@ int go7007_snd_init(struct go7007 *go) + &go7007_snd_capture_ops); + + ret = snd_card_register(gosnd->card); +- if (ret < 0) { +- snd_card_free(gosnd->card); +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; + + gosnd->substream = NULL; + go->snd_context = gosnd; +@@ -274,6 +267,12 @@ int go7007_snd_init(struct go7007 *go) + ++dev; + + return 0; ++ ++free_card: ++ snd_card_free(gosnd->card); ++free_snd: ++ kfree(gosnd); ++ return ret; + } + EXPORT_SYMBOL(go7007_snd_init); + +diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c +index 999214e8cf2b..360d523132bd 100644 +--- a/drivers/mmc/host/meson-mx-sdio.c ++++ b/drivers/mmc/host/meson-mx-sdio.c +@@ -246,6 +246,9 @@ static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host) + + mrq = host->mrq; + ++ if (host->cmd->error) ++ meson_mx_mmc_soft_reset(host); ++ + host->mrq = NULL; + host->cmd = NULL; + +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c +index dccb4df46512..b03d65222622 100644 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c +@@ -87,7 +87,7 @@ + #define ESDHC_STD_TUNING_EN (1 << 24) + /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ + #define ESDHC_TUNING_START_TAP_DEFAULT 0x1 +-#define ESDHC_TUNING_START_TAP_MASK 0xff ++#define ESDHC_TUNING_START_TAP_MASK 0x7f + #define ESDHC_TUNING_STEP_MASK 0x00070000 + #define ESDHC_TUNING_STEP_SHIFT 16 + +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c +index 8b2a6a362c60..84cffdef264b 100644 +--- a/drivers/mmc/host/sdhci-msm.c ++++ b/drivers/mmc/host/sdhci-msm.c +@@ -1742,7 +1742,9 @@ static const struct sdhci_ops sdhci_msm_ops = { + static const struct sdhci_pltfm_data sdhci_msm_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_SINGLE_POWER_WRITE | +- SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, ++ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | ++ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, ++ + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &sdhci_msm_ops, + }; +diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c +index f4ac064ff471..8d96ecba1b55 100644 +--- a/drivers/mmc/host/via-sdmmc.c ++++ b/drivers/mmc/host/via-sdmmc.c +@@ -319,6 +319,8 @@ struct via_crdr_mmc_host { + /* some devices need a very long delay for power to stabilize */ + #define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001 + ++#define VIA_CMD_TIMEOUT_MS 1000 ++ + static const struct pci_device_id via_ids[] = { + {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, +@@ -551,14 +553,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host, + { + void __iomem *addrbase; + struct mmc_data *data; ++ unsigned int timeout_ms; + u32 cmdctrl = 0; + + WARN_ON(host->cmd); + + data = cmd->data; +- mod_timer(&host->timer, jiffies + HZ); + host->cmd = cmd; + ++ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS; ++ mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms)); ++ + /*Command index*/ + cmdctrl = cmd->opcode << 8; + +diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c +index 15ef30b368a5..4fef20724175 100644 +--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c ++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c +@@ -1019,11 +1019,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, + if (!section) { + /* + * Small-page NAND use byte 6 for BBI while large-page +- * NAND use byte 0. ++ * NAND use bytes 0 and 1. + */ +- if (cfg->page_size > 512) +- oobregion->offset++; +- oobregion->length--; ++ if (cfg->page_size > 512) { ++ oobregion->offset += 2; ++ oobregion->length -= 2; ++ } else { ++ oobregion->length--; ++ } + } + } + +diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c +index c0e1a8ebe820..522390b99d3c 100644 +--- a/drivers/mtd/nand/raw/diskonchip.c ++++ b/drivers/mtd/nand/raw/diskonchip.c +@@ -1609,13 +1609,10 @@ static int __init doc_probe(unsigned long physadr) + numchips = doc2001_init(mtd); + + if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) { +- /* DBB note: i believe nand_release is necessary here, as ++ /* DBB note: i believe nand_cleanup is necessary here, as + buffers may have been allocated in nand_base. Check with + Thomas. FIX ME! */ +- /* nand_release will call mtd_device_unregister, but we +- haven't yet added it. This is handled without incident by +- mtd_device_unregister, as far as I can tell. */ +- nand_release(nand); ++ nand_cleanup(nand); + goto fail; + } + +diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +index 49afebee50db..4b7c399d4f4b 100644 +--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c ++++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +@@ -376,7 +376,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { +- nand_release(chip); ++ nand_cleanup(chip); + return ret; + } + +diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c +index 373d47d1ba4c..08008c844a47 100644 +--- a/drivers/mtd/nand/raw/mtk_nand.c ++++ b/drivers/mtd/nand/raw/mtk_nand.c +@@ -1419,7 +1419,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(dev, "mtd parse partition error\n"); +- nand_release(nand); ++ nand_cleanup(nand); + return ret; + } + +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c +index 47c63968fa45..db66c1be6e5f 100644 +--- a/drivers/mtd/nand/raw/nand_base.c ++++ b/drivers/mtd/nand/raw/nand_base.c +@@ -731,8 +731,14 @@ EXPORT_SYMBOL_GPL(nand_soft_waitrdy); + int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, + unsigned long timeout_ms) + { +- /* Wait until R/B pin indicates chip is ready or timeout occurs */ +- timeout_ms = jiffies + msecs_to_jiffies(timeout_ms); ++ ++ /* ++ * Wait until R/B pin indicates chip is ready or timeout occurs. ++ * +1 below is necessary because if we are now in the last fraction ++ * of jiffy and msecs_to_jiffies is 1 then we will wait only that ++ * small jiffy fraction - possibly leading to false timeout. ++ */ ++ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; + do { + if (gpiod_get_value_cansleep(gpiod)) + return 0; +diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c +index 0b879bd0a68c..8fe8d7bdd203 100644 +--- a/drivers/mtd/nand/raw/nand_onfi.c ++++ b/drivers/mtd/nand/raw/nand_onfi.c +@@ -173,7 +173,7 @@ int nand_onfi_detect(struct nand_chip *chip) + } + + if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) == +- le16_to_cpu(p->crc)) { ++ le16_to_cpu(p[i].crc)) { + if (i) + memcpy(p, &p[i], sizeof(*p)); + break; +diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c +index d27b39a7223c..a3dcdf25f5f2 100644 +--- a/drivers/mtd/nand/raw/orion_nand.c ++++ b/drivers/mtd/nand/raw/orion_nand.c +@@ -180,7 +180,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) + mtd->name = "orion_nand"; + ret = mtd_device_register(mtd, board->parts, board->nr_parts); + if (ret) { +- nand_release(nc); ++ nand_cleanup(nc); + goto no_dev; + } + +diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c +index c43cb4d92d3d..0429d218fd9f 100644 +--- a/drivers/mtd/nand/raw/oxnas_nand.c ++++ b/drivers/mtd/nand/raw/oxnas_nand.c +@@ -140,10 +140,8 @@ static int oxnas_nand_probe(struct platform_device *pdev) + goto err_release_child; + + err = mtd_device_register(mtd, NULL, 0); +- if (err) { +- nand_release(chip); +- goto err_release_child; +- } ++ if (err) ++ goto err_cleanup_nand; + + oxnas->chips[nchips] = chip; + ++nchips; +@@ -159,6 +157,8 @@ static int oxnas_nand_probe(struct platform_device *pdev) + + return 0; + ++err_cleanup_nand: ++ nand_cleanup(chip); + err_release_child: + of_node_put(nand_np); + err_clk_unprepare: +diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c +index 9cfe7395172a..066ff6dc9a23 100644 +--- a/drivers/mtd/nand/raw/pasemi_nand.c ++++ b/drivers/mtd/nand/raw/pasemi_nand.c +@@ -146,7 +146,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) + if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { + dev_err(dev, "Unable to register MTD device\n"); + err = -ENODEV; +- goto out_lpc; ++ goto out_cleanup_nand; + } + + dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, +@@ -154,6 +154,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev) + + return 0; + ++ out_cleanup_nand: ++ nand_cleanup(chip); + out_lpc: + release_region(lpcctl, 4); + out_ior: +diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c +index dc0f3074ddbf..3a495b233443 100644 +--- a/drivers/mtd/nand/raw/plat_nand.c ++++ b/drivers/mtd/nand/raw/plat_nand.c +@@ -92,7 +92,7 @@ static int plat_nand_probe(struct platform_device *pdev) + if (!err) + return err; + +- nand_release(&data->chip); ++ nand_cleanup(&data->chip); + out: + if (pdata->ctrl.remove) + pdata->ctrl.remove(pdev); +diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c +index b47a9eaff89b..d8c52a016080 100644 +--- a/drivers/mtd/nand/raw/sharpsl.c ++++ b/drivers/mtd/nand/raw/sharpsl.c +@@ -183,7 +183,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) + return 0; + + err_add: +- nand_release(this); ++ nand_cleanup(this); + + err_scan: + iounmap(sharpsl->io); +diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c +index 20f40c0e812c..7c94fc51a611 100644 +--- a/drivers/mtd/nand/raw/socrates_nand.c ++++ b/drivers/mtd/nand/raw/socrates_nand.c +@@ -169,7 +169,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) + if (!res) + return res; + +- nand_release(nand_chip); ++ nand_cleanup(nand_chip); + + out: + iounmap(host->io_base); +diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c +index 89773293c64d..45c376fc571a 100644 +--- a/drivers/mtd/nand/raw/sunxi_nand.c ++++ b/drivers/mtd/nand/raw/sunxi_nand.c +@@ -2003,7 +2003,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(dev, "failed to register mtd device: %d\n", ret); +- nand_release(nand); ++ nand_cleanup(nand); + return ret; + } + +diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c +index db030f1701ee..4e9a6d94f6e8 100644 +--- a/drivers/mtd/nand/raw/tmio_nand.c ++++ b/drivers/mtd/nand/raw/tmio_nand.c +@@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev) + if (!retval) + return retval; + +- nand_release(nand_chip); ++ nand_cleanup(nand_chip); + + err_irq: + tmio_hw_stop(dev, tmio); +diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c +index 834f794816a9..018311dc8fe1 100644 +--- a/drivers/mtd/nand/raw/xway_nand.c ++++ b/drivers/mtd/nand/raw/xway_nand.c +@@ -210,7 +210,7 @@ static int xway_nand_probe(struct platform_device *pdev) + + err = mtd_device_register(mtd, NULL, 0); + if (err) +- nand_release(&data->chip); ++ nand_cleanup(&data->chip); + + return err; + } +diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c +index 0537df06a9b5..ff318472a3ee 100644 +--- a/drivers/net/ethernet/allwinner/sun4i-emac.c ++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c +@@ -432,7 +432,7 @@ static void emac_timeout(struct net_device *dev) + /* Hardware start transmission. + * Send a packet to media from the upper layer. + */ +-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) ++static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct emac_board_info *db = netdev_priv(dev); + unsigned long channel; +@@ -440,7 +440,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) + + channel = db->tx_fifo_stat & 3; + if (channel == 3) +- return 1; ++ return NETDEV_TX_BUSY; + + channel = (channel == 1 ? 1 : 0); + +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c +index 48de4bee209e..9225733f4fec 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_com.c ++++ b/drivers/net/ethernet/amazon/ena/ena_com.c +@@ -2349,6 +2349,9 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + rss->hash_key; + int rc; + ++ if (unlikely(!func)) ++ return -EINVAL; ++ + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, +@@ -2361,8 +2364,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + if (rss->hash_func) + rss->hash_func--; + +- if (func) +- *func = rss->hash_func; ++ *func = rss->hash_func; + + if (key) + memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +index 12949f1ec1ea..145334fb18f4 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +@@ -690,6 +690,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) + u32 *regs_buff = p; + int err = 0; + ++ if (unlikely(!self->aq_hw_ops->hw_get_regs)) ++ return -EOPNOTSUPP; ++ + regs->version = 1; + + err = self->aq_hw_ops->hw_get_regs(self->aq_hw, +@@ -704,6 +707,9 @@ err_exit: + + int aq_nic_get_regs_count(struct aq_nic_s *self) + { ++ if (unlikely(!self->aq_hw_ops->hw_get_regs)) ++ return 0; ++ + return self->aq_nic_cfg.aq_hw_caps->mac_regs_count; + } + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index 6f01f4e03cef..3d3b1005d076 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -69,6 +69,9 @@ + #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + ++/* Forward declarations */ ++static void bcmgenet_set_rx_mode(struct net_device *dev); ++ + static inline void bcmgenet_writel(u32 value, void __iomem *offset) + { + /* MIPS chips strapped for BE will automagically configure the +@@ -2852,6 +2855,7 @@ static void bcmgenet_netif_start(struct net_device *dev) + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Start the network engine */ ++ bcmgenet_set_rx_mode(dev); + bcmgenet_enable_rx_napi(priv); + + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h +index dbc69d8fa05f..5b7c2f9241d0 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + /* total number of Buffer Descriptors, same for Rx/Tx */ + #define TOTAL_DESC 256 +@@ -674,6 +675,7 @@ struct bcmgenet_priv { + /* WOL */ + struct clk *clk_wol; + u32 wolopts; ++ u8 sopass[SOPASS_MAX]; + + struct bcmgenet_mib_counters mib; + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +index ea20d94bd050..a41f82379369 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +@@ -41,18 +41,13 @@ + void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct bcmgenet_priv *priv = netdev_priv(dev); +- u32 reg; + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; + wol->wolopts = priv->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + +- if (wol->wolopts & WAKE_MAGICSECURE) { +- reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS); +- put_unaligned_be16(reg, &wol->sopass[0]); +- reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS); +- put_unaligned_be32(reg, &wol->sopass[2]); +- } ++ if (wol->wolopts & WAKE_MAGICSECURE) ++ memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); + } + + /* ethtool function - set WOL (Wake on LAN) settings. +@@ -62,7 +57,6 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; +- u32 reg; + + if (!device_can_wakeup(kdev)) + return -ENOTSUPP; +@@ -70,17 +64,8 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) + return -EINVAL; + +- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); +- if (wol->wolopts & WAKE_MAGICSECURE) { +- bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), +- UMAC_MPD_PW_MS); +- bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), +- UMAC_MPD_PW_LS); +- reg |= MPD_PW_EN; +- } else { +- reg &= ~MPD_PW_EN; +- } +- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); ++ if (wol->wolopts & WAKE_MAGICSECURE) ++ memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); + + /* Flag the device and relevant IRQ as wakeup capable */ + if (wol->wolopts) { +@@ -120,6 +105,14 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) + return retries; + } + ++static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv) ++{ ++ bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), ++ UMAC_MPD_PW_MS); ++ bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), ++ UMAC_MPD_PW_LS); ++} ++ + int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) + { +@@ -140,13 +133,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg |= MPD_EN; ++ if (priv->wolopts & WAKE_MAGICSECURE) { ++ bcmgenet_set_mpd_password(priv); ++ reg |= MPD_PW_EN; ++ } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + + /* Do not leave UniMAC in MPD mode only */ + retries = bcmgenet_poll_wol_status(priv); + if (retries < 0) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); +- reg &= ~MPD_EN; ++ reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + return retries; + } +@@ -185,7 +182,7 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already powered up so skip the rest */ +- reg &= ~MPD_EN; ++ reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + + /* Disable CRC Forward */ +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +index a935b20effa3..3177dd8ede8e 100644 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +@@ -1981,7 +1981,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev, + int i; + + if (type != TC_SETUP_QDISC_MQPRIO) +- return -EINVAL; ++ return -EOPNOTSUPP; + + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_queues = dpaa2_eth_queue_count(priv); +@@ -1993,7 +1993,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev, + if (num_tc > dpaa2_eth_tc_count(priv)) { + netdev_err(net_dev, "Max %d traffic classes supported\n", + dpaa2_eth_tc_count(priv)); +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + if (!num_tc) { +diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c +index 86493fea56e4..f93ed70709c6 100644 +--- a/drivers/net/ethernet/intel/e1000/e1000_main.c ++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c +@@ -3140,8 +3140,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { ++ case e1000_82544: { + unsigned int pull_size; +- case e1000_82544: ++ + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements +@@ -3162,6 +3163,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + } + len = skb_headlen(skb); + break; ++ } + default: + /* do nothing */ + break; +diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h +index 37a2314d3e6b..944abd5eae11 100644 +--- a/drivers/net/ethernet/intel/e1000e/e1000.h ++++ b/drivers/net/ethernet/intel/e1000e/e1000.h +@@ -576,7 +576,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) + + #define er32(reg) __er32(hw, E1000_##reg) + +-s32 __ew32_prepare(struct e1000_hw *hw); + void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); + + #define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index 8c4507838325..108297a099ed 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -119,14 +119,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +-s32 __ew32_prepare(struct e1000_hw *hw) ++static void __ew32_prepare(struct e1000_hw *hw) + { + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); +- +- return i; + } + + void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +@@ -607,11 +605,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) + { + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; +- s32 ret_val = __ew32_prepare(hw); + ++ __ew32_prepare(hw); + writel(i, rx_ring->tail); + +- if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { ++ if (unlikely(i != readl(rx_ring->tail))) { + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl & ~E1000_RCTL_EN); +@@ -624,11 +622,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) + { + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; +- s32 ret_val = __ew32_prepare(hw); + ++ __ew32_prepare(hw); + writel(i, tx_ring->tail); + +- if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { ++ if (unlikely(i != readl(tx_ring->tail))) { + u32 tctl = er32(TCTL); + + ew32(TCTL, tctl & ~E1000_TCTL_EN); +@@ -5289,6 +5287,10 @@ static void e1000_watchdog_task(struct work_struct *work) + /* oops */ + break; + } ++ if (hw->mac.type == e1000_pch_spt) { ++ netdev->features &= ~NETIF_F_TSO; ++ netdev->features &= ~NETIF_F_TSO6; ++ } + } + + /* enable transmits in the hardware, need to do this +diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c +index 171f0b625407..d68b8aa31b19 100644 +--- a/drivers/net/ethernet/intel/ice/ice_common.c ++++ b/drivers/net/ethernet/intel/ice/ice_common.c +@@ -436,6 +436,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) + static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) + { + struct ice_switch_info *sw; ++ enum ice_status status; + + hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*hw->switch_info), GFP_KERNEL); +@@ -446,7 +447,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) + + INIT_LIST_HEAD(&sw->vsi_list_map_head); + +- return ice_init_def_sw_recp(hw); ++ status = ice_init_def_sw_recp(hw); ++ if (status) { ++ devm_kfree(ice_hw_to_dev(hw), hw->switch_info); ++ return status; ++ } ++ return 0; + } + + /** +diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c +index c68709c7ef81..2e9c97bad3c3 100644 +--- a/drivers/net/ethernet/intel/ice/ice_controlq.c ++++ b/drivers/net/ethernet/intel/ice/ice_controlq.c +@@ -199,7 +199,9 @@ unwind_alloc_rq_bufs: + cq->rq.r.rq_bi[i].pa = 0; + cq->rq.r.rq_bi[i].size = 0; + } ++ cq->rq.r.rq_bi = NULL; + devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); ++ cq->rq.dma_head = NULL; + + return ICE_ERR_NO_MEMORY; + } +@@ -245,7 +247,9 @@ unwind_alloc_sq_bufs: + cq->sq.r.sq_bi[i].pa = 0; + cq->sq.r.sq_bi[i].size = 0; + } ++ cq->sq.r.sq_bi = NULL; + devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); ++ cq->sq.dma_head = NULL; + + return ICE_ERR_NO_MEMORY; + } +@@ -304,6 +308,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) + return 0; + } + ++#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ ++do { \ ++ int i; \ ++ /* free descriptors */ \ ++ if ((qi)->ring.r.ring##_bi) \ ++ for (i = 0; i < (qi)->num_##ring##_entries; i++) \ ++ if ((qi)->ring.r.ring##_bi[i].pa) { \ ++ dmam_free_coherent(ice_hw_to_dev(hw), \ ++ (qi)->ring.r.ring##_bi[i].size, \ ++ (qi)->ring.r.ring##_bi[i].va, \ ++ (qi)->ring.r.ring##_bi[i].pa); \ ++ (qi)->ring.r.ring##_bi[i].va = NULL;\ ++ (qi)->ring.r.ring##_bi[i].pa = 0;\ ++ (qi)->ring.r.ring##_bi[i].size = 0;\ ++ } \ ++ /* free the buffer info list */ \ ++ if ((qi)->ring.cmd_buf) \ ++ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ ++ /* free DMA head */ \ ++ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ ++} while (0) ++ + /** + * ice_init_sq - main initialization routine for Control ATQ + * @hw: pointer to the hardware structure +@@ -357,6 +383,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) + goto init_ctrlq_exit; + + init_ctrlq_free_rings: ++ ICE_FREE_CQ_BUFS(hw, cq, sq); + ice_free_cq_ring(hw, &cq->sq); + + init_ctrlq_exit: +@@ -416,33 +443,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) + goto init_ctrlq_exit; + + init_ctrlq_free_rings: ++ ICE_FREE_CQ_BUFS(hw, cq, rq); + ice_free_cq_ring(hw, &cq->rq); + + init_ctrlq_exit: + return ret_code; + } + +-#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ +-do { \ +- int i; \ +- /* free descriptors */ \ +- for (i = 0; i < (qi)->num_##ring##_entries; i++) \ +- if ((qi)->ring.r.ring##_bi[i].pa) { \ +- dmam_free_coherent(ice_hw_to_dev(hw), \ +- (qi)->ring.r.ring##_bi[i].size,\ +- (qi)->ring.r.ring##_bi[i].va,\ +- (qi)->ring.r.ring##_bi[i].pa);\ +- (qi)->ring.r.ring##_bi[i].va = NULL; \ +- (qi)->ring.r.ring##_bi[i].pa = 0; \ +- (qi)->ring.r.ring##_bi[i].size = 0; \ +- } \ +- /* free the buffer info list */ \ +- if ((qi)->ring.cmd_buf) \ +- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ +- /* free DMA head */ \ +- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ +-} while (0) +- + /** + * ice_shutdown_sq - shutdown the Control ATQ + * @hw: pointer to the hardware structure +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index 2408f0de95fc..d0ccb7ad447b 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -2900,7 +2900,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) + if (err) { + dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); + err = -EIO; +- goto err_init_interrupt_unroll; ++ goto err_init_vsi_unroll; + } + + /* Driver is mostly up */ +@@ -2986,6 +2986,7 @@ err_msix_misc_unroll: + ice_free_irq_msix_misc(pf); + err_init_interrupt_unroll: + ice_clear_interrupt_scheme(pf); ++err_init_vsi_unroll: + devm_kfree(dev, pf->vsi); + err_init_pf_unroll: + ice_deinit_pf(pf); +diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c +index 8959418776f6..f80933320fd3 100644 +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c +@@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev, + u32 speed; + u32 supported, advertising; + +- status = rd32(E1000_STATUS); ++ status = pm_runtime_suspended(&adapter->pdev->dev) ? ++ 0 : rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + supported = (SUPPORTED_10baseT_Half | +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +index 0bd1294ba517..39c5e6fdb72c 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +@@ -2243,7 +2243,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + } + + /* Configure pause time (2 TCs per register) */ +- reg = hw->fc.pause_time * 0x00010001; ++ reg = hw->fc.pause_time * 0x00010001U; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index a26f9fb95ac0..edaa0bffa5c3 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -2254,7 +2254,8 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, + rx_buffer->page_offset ^= truesize; + #else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? +- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : ++ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + SKB_DATA_ALIGN(size); + + rx_buffer->page_offset += truesize; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index c4eed5bbcd45..066bada4ccd1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -1428,6 +1428,7 @@ out: + + #ifdef CONFIG_MLX5_CORE_IPOIB + ++#define MLX5_IB_GRH_SGID_OFFSET 8 + #define MLX5_IB_GRH_DGID_OFFSET 24 + #define MLX5_GID_SIZE 16 + +@@ -1441,6 +1442,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, + struct net_device *netdev; + struct mlx5e_priv *priv; + char *pseudo_header; ++ u32 flags_rqpn; + u32 qpn; + u8 *dgid; + u8 g; +@@ -1462,7 +1464,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, + tstamp = &priv->tstamp; + stats = &priv->channel_stats[rq->ix].rq; + +- g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; ++ flags_rqpn = be32_to_cpu(cqe->flags_rqpn); ++ g = (flags_rqpn >> 28) & 3; + dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; + if ((!g) || dgid[0] != 0xff) + skb->pkt_type = PACKET_HOST; +@@ -1471,9 +1474,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, + else + skb->pkt_type = PACKET_MULTICAST; + +- /* TODO: IB/ipoib: Allow mcast packets from other VFs +- * 68996a6e760e5c74654723eeb57bf65628ae87f4 ++ /* Drop packets that this interface sent, ie multicast packets ++ * that the HCA has replicated. + */ ++ if (g && (qpn == (flags_rqpn & 0xffffff)) && ++ (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET, ++ MLX5_GID_SIZE) == 0)) { ++ skb->dev = NULL; ++ return; ++ } + + skb_pull(skb, MLX5_IB_GRH_BYTES); + +diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c +index 544012a67221..1d59ef367a85 100644 +--- a/drivers/net/ethernet/nxp/lpc_eth.c ++++ b/drivers/net/ethernet/nxp/lpc_eth.c +@@ -815,7 +815,8 @@ static int lpc_mii_init(struct netdata_local *pldat) + if (mdiobus_register(pldat->mii_bus)) + goto err_out_unregister_bus; + +- if (lpc_mii_probe(pldat->ndev) != 0) ++ err = lpc_mii_probe(pldat->ndev); ++ if (err) + goto err_out_unregister_bus; + + return 0; +diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h +index 1f27f9866b80..61b5aa3e5b98 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede.h ++++ b/drivers/net/ethernet/qlogic/qede/qede.h +@@ -574,12 +574,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, + #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) + #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) + #define NUM_RX_BDS_MIN 128 ++#define NUM_RX_BDS_KDUMP_MIN 63 + #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) + + #define TX_RING_SIZE_POW 13 + #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) + #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) + #define NUM_TX_BDS_MIN 128 ++#define NUM_TX_BDS_KDUMP_MIN 63 + #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX + + #define QEDE_MIN_PKT_LEN 64 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c +index ba53612ae0df..1da6b5bda80a 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c +@@ -29,6 +29,7 @@ + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ ++#include + #include + #include + #include +@@ -707,8 +708,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, + edev->dp_module = dp_module; + edev->dp_level = dp_level; + edev->ops = qed_ops; +- edev->q_num_rx_buffers = NUM_RX_BDS_DEF; +- edev->q_num_tx_buffers = NUM_TX_BDS_DEF; ++ ++ if (is_kdump_kernel()) { ++ edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN; ++ edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN; ++ } else { ++ edev->q_num_rx_buffers = NUM_RX_BDS_DEF; ++ edev->q_num_tx_buffers = NUM_TX_BDS_DEF; ++ } + + DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", + info->num_queues, info->num_queues); +diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c +index 38b7f6d35759..702fdc393da0 100644 +--- a/drivers/net/ethernet/ti/davinci_mdio.c ++++ b/drivers/net/ethernet/ti/davinci_mdio.c +@@ -397,6 +397,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) + data->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) ++ return -EINVAL; + data->regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!data->regs) + return -ENOMEM; +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 0ce1004a8d0d..9d3209ae41cf 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -447,6 +447,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) + int ret; + rx_handler_result_t handle_res; + ++ /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ ++ if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) ++ return RX_HANDLER_PASS; ++ + port = macvlan_port_get_rcu(skb->dev); + if (is_multicast_ether_addr(eth->h_dest)) { + unsigned int hash; +diff --git a/drivers/net/veth.c b/drivers/net/veth.c +index 9f3c839f9e5f..88cfd63f08a6 100644 +--- a/drivers/net/veth.c ++++ b/drivers/net/veth.c +@@ -510,13 +510,15 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, + struct veth_xdp_tx_bq *bq) + { + void *hard_start = frame->data - frame->headroom; +- void *head = hard_start - sizeof(struct xdp_frame); + int len = frame->len, delta = 0; + struct xdp_frame orig_frame; + struct bpf_prog *xdp_prog; + unsigned int headroom; + struct sk_buff *skb; + ++ /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */ ++ hard_start -= sizeof(struct xdp_frame); ++ + rcu_read_lock(); + xdp_prog = rcu_dereference(rq->xdp_prog); + if (likely(xdp_prog)) { +@@ -538,7 +540,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, + break; + case XDP_TX: + orig_frame = *frame; +- xdp.data_hard_start = head; + xdp.rxq->mem = frame->mem; + if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) { + trace_xdp_exception(rq->dev, xdp_prog, act); +@@ -550,7 +551,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, + goto xdp_xmit; + case XDP_REDIRECT: + orig_frame = *frame; +- xdp.data_hard_start = head; + xdp.rxq->mem = frame->mem; + if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) { + frame = &orig_frame; +@@ -572,7 +572,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, + rcu_read_unlock(); + + headroom = sizeof(struct xdp_frame) + frame->headroom - delta; +- skb = veth_build_skb(head, headroom, len, 0); ++ skb = veth_build_skb(hard_start, headroom, len, 0); + if (!skb) { + xdp_return_frame(frame); + goto err; +diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c +index 0a38c76688ab..5e2571d23ab9 100644 +--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c ++++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c +@@ -702,6 +702,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; ++ if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) ++ return 0; + while (n--) + p[n] = rssConf->indTable[n]; + return 0; +diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h +index 30c080094af1..bd5fa4dbab9c 100644 +--- a/drivers/net/wireless/ath/ath10k/htt.h ++++ b/drivers/net/wireless/ath/ath10k/htt.h +@@ -2033,6 +2033,7 @@ struct ath10k_htt_tx_ops { + int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt, + u8 max_subfrms_ampdu, + u8 max_subfrms_amsdu); ++ void (*htt_flush_tx)(struct ath10k_htt *htt); + }; + + static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) +@@ -2072,6 +2073,12 @@ static inline int ath10k_htt_tx(struct ath10k_htt *htt, + return htt->tx_ops->htt_tx(htt, txmode, msdu); + } + ++static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt) ++{ ++ if (htt->tx_ops->htt_flush_tx) ++ htt->tx_ops->htt_flush_tx(htt); ++} ++ + static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt) + { + if (!htt->tx_ops->htt_alloc_txbuff) +diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c +index a182c0944cc7..735482877a1f 100644 +--- a/drivers/net/wireless/ath/ath10k/htt_tx.c ++++ b/drivers/net/wireless/ath/ath10k/htt_tx.c +@@ -529,9 +529,14 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt) + htt->tx_mem_allocated = false; + } + +-void ath10k_htt_tx_stop(struct ath10k_htt *htt) ++static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt) + { + idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); ++} ++ ++void ath10k_htt_tx_stop(struct ath10k_htt *htt) ++{ ++ ath10k_htt_flush_tx_queue(htt); + idr_destroy(&htt->pending_tx); + } + +@@ -1774,6 +1779,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { + .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, + .htt_tx = ath10k_htt_tx_hl, + .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, ++ .htt_flush_tx = ath10k_htt_flush_tx_queue, + }; + + void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c +index 36d24ea126a2..d373602a8014 100644 +--- a/drivers/net/wireless/ath/ath10k/mac.c ++++ b/drivers/net/wireless/ath/ath10k/mac.c +@@ -3911,6 +3911,9 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) + if (ret) { + ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", + ret); ++ /* remove this msdu from idr tracking */ ++ ath10k_wmi_cleanup_mgmt_tx_send(ar, skb); ++ + dma_unmap_single(ar->dev, paddr, skb->len, + DMA_TO_DEVICE); + ieee80211_free_txskb(ar->hw, skb); +@@ -7082,6 +7085,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + ath10k_wmi_peer_flush(ar, arvif->vdev_id, + arvif->bssid, bitmap); + } ++ ath10k_htt_flush_tx(&ar->htt); + } + return; + } +@@ -8811,7 +8815,6 @@ int ath10k_mac_register(struct ath10k *ar) + ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; + + if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { +- ar->hw->wiphy->max_sched_scan_reqs = 1; + ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; + ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; +diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c +index 0a727502d14c..fd49d3419e79 100644 +--- a/drivers/net/wireless/ath/ath10k/pci.c ++++ b/drivers/net/wireless/ath/ath10k/pci.c +@@ -2074,6 +2074,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) + ath10k_pci_irq_sync(ar); + napi_synchronize(&ar->napi); + napi_disable(&ar->napi); ++ cancel_work_sync(&ar_pci->dump_work); + + /* Most likely the device has HTT Rx ring configured. The only way to + * prevent the device from accessing (and possible corrupting) host +diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c +index 39abf8b12903..f46b9083bbf1 100644 +--- a/drivers/net/wireless/ath/ath10k/txrx.c ++++ b/drivers/net/wireless/ath/ath10k/txrx.c +@@ -84,9 +84,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, + wake_up(&htt->empty_tx_wq); + spin_unlock_bh(&htt->tx_lock); + ++ rcu_read_lock(); + if (txq && txq->sta && skb_cb->airtime_est) + ieee80211_sta_register_airtime(txq->sta, txq->tid, + skb_cb->airtime_est, 0); ++ rcu_read_unlock(); + + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) + dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); +diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h +index 1491c25518bb..edccabc667e8 100644 +--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h ++++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h +@@ -133,6 +133,7 @@ struct wmi_ops { + struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, + struct sk_buff *skb, + dma_addr_t paddr); ++ int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu); + struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, + u32 log_level); + struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); +@@ -441,6 +442,15 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) + return ar->wmi.ops->get_txbf_conf_scheme(ar); + } + ++static inline int ++ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu) ++{ ++ if (!ar->wmi.ops->cleanup_mgmt_tx_send) ++ return -EOPNOTSUPP; ++ ++ return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu); ++} ++ + static inline int + ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, + dma_addr_t paddr) +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c +index eb0c963d9fd5..9d5b9df29c35 100644 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c +@@ -2837,6 +2837,18 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) + return skb; + } + ++static int ++ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar, ++ struct sk_buff *msdu) ++{ ++ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); ++ struct ath10k_wmi *wmi = &ar->wmi; ++ ++ idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); ++ ++ return 0; ++} ++ + static int + ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, + dma_addr_t paddr) +@@ -2911,6 +2923,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, + if (desc_id < 0) + goto err_free_skb; + ++ cb->msdu_id = desc_id; ++ + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); +@@ -4339,6 +4353,7 @@ static const struct wmi_ops wmi_tlv_ops = { + .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, + /* .gen_mgmt_tx = not implemented; HTT is used */ + .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, ++ .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send, + .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, + .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, + .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, +diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c +index 51934d191f33..1ab09e1c9ec5 100644 +--- a/drivers/net/wireless/ath/carl9170/fw.c ++++ b/drivers/net/wireless/ath/carl9170/fw.c +@@ -338,9 +338,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) + ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + + if (SUPP(CARL9170FW_WLANTX_CAB)) { +- if_comb_types |= +- BIT(NL80211_IFTYPE_AP) | +- BIT(NL80211_IFTYPE_P2P_GO); ++ if_comb_types |= BIT(NL80211_IFTYPE_AP); + + #ifdef CONFIG_MAC80211_MESH + if_comb_types |= +diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c +index 40a8054f8aa6..21ca62b06214 100644 +--- a/drivers/net/wireless/ath/carl9170/main.c ++++ b/drivers/net/wireless/ath/carl9170/main.c +@@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar, + ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && + (vif->type != NL80211_IFTYPE_AP)); + +- /* While the driver supports HW offload in a single +- * P2P client configuration, it doesn't support HW +- * offload in the favourit, concurrent P2P GO+CLIENT +- * configuration. Hence, HW offload will always be +- * disabled for P2P. ++ /* The driver used to have P2P GO+CLIENT support, ++ * but since this was dropped and we don't know if ++ * there are any gremlins lurking in the shadows, ++ * so best we keep HW offload disabled for P2P. + */ + ar->disable_offload |= vif->p2p; + +@@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, + if (vif->type == NL80211_IFTYPE_STATION) + break; + +- /* P2P GO [master] use-case +- * Because the P2P GO station is selected dynamically +- * by all participating peers of a WIFI Direct network, +- * the driver has be able to change the main interface +- * operating mode on the fly. +- */ +- if (main_vif->p2p && vif->p2p && +- vif->type == NL80211_IFTYPE_AP) { +- old_main = main_vif; +- break; +- } +- + err = -EBUSY; + rcu_read_unlock(); + +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c +index 79998a3ddb7a..ad051f34e65b 100644 +--- a/drivers/net/wireless/ath/wcn36xx/main.c ++++ b/drivers/net/wireless/ath/wcn36xx/main.c +@@ -1341,7 +1341,7 @@ static int wcn36xx_probe(struct platform_device *pdev) + if (addr && ret != ETH_ALEN) { + wcn36xx_err("invalid local-mac-address\n"); + ret = -EINVAL; +- goto out_wq; ++ goto out_destroy_ept; + } else if (addr) { + wcn36xx_info("mac address: %pM\n", addr); + SET_IEEE80211_PERM_ADDR(wcn->hw, addr); +@@ -1349,7 +1349,7 @@ static int wcn36xx_probe(struct platform_device *pdev) + + ret = wcn36xx_platform_get_resources(wcn, pdev); + if (ret) +- goto out_wq; ++ goto out_destroy_ept; + + wcn36xx_init_ieee80211(wcn); + ret = ieee80211_register_hw(wcn->hw); +@@ -1361,6 +1361,8 @@ static int wcn36xx_probe(struct platform_device *pdev) + out_unmap: + iounmap(wcn->ccu_base); + iounmap(wcn->dxe_base); ++out_destroy_ept: ++ rpmsg_destroy_ept(wcn->smd_channel); + out_wq: + ieee80211_free_hw(hw); + out_err: +diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c +index b85603e91c7a..3432dfe1ddb4 100644 +--- a/drivers/net/wireless/broadcom/b43/main.c ++++ b/drivers/net/wireless/broadcom/b43/main.c +@@ -5569,7 +5569,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) + /* fill hw info */ + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); +- ++ ieee80211_hw_set(hw, MFP_CAPABLE); + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | +diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c +index 8b6b657c4b85..5208a39fd6f7 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/main.c ++++ b/drivers/net/wireless/broadcom/b43legacy/main.c +@@ -3801,6 +3801,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev) + /* fill hw info */ + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); ++ ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */ + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | +diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c +index e9b23c2e5bd4..efd63f4ce74f 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c ++++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c +@@ -558,6 +558,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev, + default: + b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", + chanstat); ++ goto drop; + } + + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +index 2c3526aeca6f..545015610cf8 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +@@ -283,13 +283,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) + if (!err) + ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC); + ++ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); ++ + if (drvr->settings->feature_disable) { + brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n", + ifp->drvr->feat_flags, + drvr->settings->feature_disable); + ifp->drvr->feat_flags &= ~drvr->settings->feature_disable; + } +- brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); + + brcmf_feat_firmware_overrides(drvr); + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +index ad18c2f1a806..524f9dd2323d 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +@@ -5,10 +5,9 @@ + * + * GPL LICENSE SUMMARY + * +- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH +- * Copyright(c) 2018 - 2019 Intel Corporation ++ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -28,10 +27,9 @@ + * + * BSD LICENSE + * +- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH +- * Copyright(c) 2018 - 2019 Intel Corporation ++ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -478,6 +476,11 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, + if (kstrtou16(buf, 0, &amsdu_len)) + return -EINVAL; + ++ /* only change from debug set <-> debug unset */ ++ if ((amsdu_len && mvmsta->orig_amsdu_len) || ++ (!!amsdu_len && mvmsta->orig_amsdu_len)) ++ return -EBUSY; ++ + if (amsdu_len) { + mvmsta->orig_amsdu_len = sta->max_amsdu_len; + sta->max_amsdu_len = amsdu_len; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index 6ca087ffd163..ed92a8e8cd51 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -1193,14 +1193,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) + */ + flush_work(&mvm->roc_done_wk); + ++ iwl_mvm_rm_aux_sta(mvm); ++ + iwl_mvm_stop_device(mvm); + + iwl_mvm_async_handlers_purge(mvm); + /* async_handlers_list is empty and will stay empty: HW is stopped */ + +- /* the fw is stopped, the aux sta is dead: clean up driver state */ +- iwl_mvm_del_aux_sta(mvm); +- + /* + * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the + * hw (as restart_complete() won't be called in this case) and mac80211 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +index 5b2bd603febf..be8bc0601d7b 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +@@ -367,14 +367,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, + u16 size = le32_to_cpu(notif->amsdu_size); + int i; + +- /* +- * In debug sta->max_amsdu_len < size +- * so also check with orig_amsdu_len which holds the original +- * data before debugfs changed the value +- */ +- if (WARN_ON(sta->max_amsdu_len < size && +- mvmsta->orig_amsdu_len < size)) ++ if (sta->max_amsdu_len < size) { ++ /* ++ * In debug sta->max_amsdu_len < size ++ * so also check with orig_amsdu_len which holds the ++ * original data before debugfs changed the value ++ */ ++ WARN_ON(mvmsta->orig_amsdu_len < size); + goto out; ++ } + + mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); + mvmsta->max_amsdu_len = size; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +index 71d339e90a9e..41f62793a57c 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +@@ -2080,16 +2080,24 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + return ret; + } + +-void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) ++int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) + { +- iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); +-} ++ int ret; + +-void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) +-{ + lockdep_assert_held(&mvm->mutex); + ++ iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); ++ ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); ++ if (ret) ++ IWL_WARN(mvm, "Failed sending remove station\n"); + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); ++ ++ return ret; ++} ++ ++void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) ++{ ++ iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); + } + + /* +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +index 8d70093847cb..da2d1ac01229 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +@@ -8,7 +8,7 @@ + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH +- * Copyright(c) 2018 - 2019 Intel Corporation ++ * Copyright(c) 2018 - 2020 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -31,7 +31,7 @@ + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH +- * Copyright(c) 2018 - 2019 Intel Corporation ++ * Copyright(c) 2018 - 2020 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -541,7 +541,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u8 queue, bool start); + + int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); +-void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); ++int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm); + + int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); + int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c +index 25ac9db35dbf..bedc09215088 100644 +--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c ++++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c +@@ -247,10 +247,10 @@ static void if_usb_disconnect(struct usb_interface *intf) + + lbtf_deb_enter(LBTF_DEB_MAIN); + +- if_usb_reset_device(priv); +- +- if (priv) ++ if (priv) { ++ if_usb_reset_device(priv); + lbtf_remove_card(priv); ++ } + + /* Unlink and free urb */ + if_usb_free(cardp); +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index d89684168500..9e6dc289ec3e 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -1496,7 +1496,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) + { + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); +- static struct mwifiex_sta_node *node; ++ struct mwifiex_sta_node *node; ++ int i; + + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && + priv->media_connected && idx == 0) { +@@ -1506,13 +1507,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST, + HostCmd_ACT_GEN_GET, 0, NULL, true); + +- if (node && (&node->list == &priv->sta_list)) { +- node = NULL; +- return -ENOENT; +- } +- +- node = list_prepare_entry(node, &priv->sta_list, list); +- list_for_each_entry_continue(node, &priv->sta_list, list) { ++ i = 0; ++ list_for_each_entry(node, &priv->sta_list, list) { ++ if (i++ != idx) ++ continue; + ether_addr_copy(mac, node->mac_addr); + return mwifiex_dump_station_info(priv, node, sinfo); + } +diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c +index 8f3d36a15e17..cbff0dfc9631 100644 +--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c ++++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c +@@ -143,8 +143,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) + struct ieee80211_sta *sta; + struct mt76_rx_tid *tid; + bool sn_less; +- u16 seqno, head, size; +- u8 ackp, idx; ++ u16 seqno, head, size, idx; ++ u8 ackp; + + __skb_queue_tail(frames, skb); + +@@ -230,7 +230,7 @@ out: + } + + int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno, +- u16 ssn, u8 size) ++ u16 ssn, u16 size) + { + struct mt76_rx_tid *tid; + +@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_aggr_start); + + static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) + { +- u8 size = tid->size; ++ u16 size = tid->size; + int i; + + cancel_delayed_work(&tid->reorder_work); +diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h +index 502814c26b33..52a16b42dfd7 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76.h ++++ b/drivers/net/wireless/mediatek/mt76/mt76.h +@@ -240,8 +240,8 @@ struct mt76_rx_tid { + struct delayed_work reorder_work; + + u16 head; +- u8 size; +- u8 nframes; ++ u16 size; ++ u16 nframes; + + u8 started:1, stopped:1, timer_pending:1; + +@@ -723,7 +723,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx, + void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); + + int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, +- u16 ssn, u8 size); ++ u16 ssn, u16 size); + void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); + + void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, +diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c +index 348b0072cdd6..c66c6dc00378 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/usb.c ++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c +@@ -881,10 +881,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, + + WARN_ON(NULL == skb); + _urb = usb_alloc_urb(0, GFP_ATOMIC); +- if (!_urb) { +- kfree_skb(skb); ++ if (!_urb) + return NULL; +- } + _rtl_install_trx_info(rtlusb, skb, ep_num); + usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, + ep_num), skb->data, skb->len, _rtl_tx_complete, skb); +@@ -898,7 +896,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + u32 ep_num; + struct urb *_urb = NULL; +- struct sk_buff *_skb = NULL; + + WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); + if (unlikely(IS_USB_STOP(rtlusb))) { +@@ -907,8 +904,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, + return; + } + ep_num = rtlusb->ep_map.ep_mapping[qnum]; +- _skb = skb; +- _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); ++ _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num); + if (unlikely(!_urb)) { + pr_err("Can't allocate urb. Drop skb!\n"); + kfree_skb(skb); +diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c +index 77a2bdee50fa..4a43c4fa716d 100644 +--- a/drivers/net/wireless/realtek/rtw88/pci.c ++++ b/drivers/net/wireless/realtek/rtw88/pci.c +@@ -974,6 +974,7 @@ static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, + len = pci_resource_len(pdev, bar_id); + rtwpci->mmap = pci_iomap(pdev, bar_id, len); + if (!rtwpci->mmap) { ++ pci_release_regions(pdev); + rtw_err(rtwdev, "failed to map pci memory\n"); + return -ENOMEM; + } +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index f0e0af3aa714..d4b388793f40 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -1032,6 +1032,19 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) + } + EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); + ++/* ++ * In NVMe 1.0 the CNS field was just a binary controller or namespace ++ * flag, thus sending any new CNS opcodes has a big chance of not working. ++ * Qemu unfortunately had that bug after reporting a 1.1 version compliance ++ * (but not for any later version). ++ */ ++static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) ++{ ++ if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) ++ return ctrl->vs < NVME_VS(1, 2, 0); ++ return ctrl->vs < NVME_VS(1, 1, 0); ++} ++ + static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) + { + struct nvme_command c = { }; +@@ -3740,8 +3753,7 @@ static void nvme_scan_work(struct work_struct *work) + + mutex_lock(&ctrl->scan_lock); + nn = le32_to_cpu(id->nn); +- if (ctrl->vs >= NVME_VS(1, 1, 0) && +- !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { ++ if (!nvme_ctrl_limited_cns(ctrl)) { + if (!nvme_scan_ns_list(ctrl, nn)) + goto out_free_id; + } +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index cd64ddb129e5..1c2129493508 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -128,6 +128,9 @@ struct nvme_dev { + dma_addr_t host_mem_descs_dma; + struct nvme_host_mem_buf_desc *host_mem_descs; + void **host_mem_desc_bufs; ++ unsigned int nr_allocated_queues; ++ unsigned int nr_write_queues; ++ unsigned int nr_poll_queues; + }; + + static int io_queue_depth_set(const char *val, const struct kernel_param *kp) +@@ -210,25 +213,14 @@ struct nvme_iod { + struct scatterlist *sg; + }; + +-static unsigned int max_io_queues(void) ++static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) + { +- return num_possible_cpus() + write_queues + poll_queues; +-} +- +-static unsigned int max_queue_count(void) +-{ +- /* IO queues + admin queue */ +- return 1 + max_io_queues(); +-} +- +-static inline unsigned int nvme_dbbuf_size(u32 stride) +-{ +- return (max_queue_count() * 8 * stride); ++ return dev->nr_allocated_queues * 8 * dev->db_stride; + } + + static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) + { +- unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); ++ unsigned int mem_size = nvme_dbbuf_size(dev); + + if (dev->dbbuf_dbs) + return 0; +@@ -253,7 +245,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) + + static void nvme_dbbuf_dma_free(struct nvme_dev *dev) + { +- unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); ++ unsigned int mem_size = nvme_dbbuf_size(dev); + + if (dev->dbbuf_dbs) { + dma_free_coherent(dev->dev, mem_size, +@@ -2030,7 +2022,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) + static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) + { + struct nvme_dev *dev = affd->priv; +- unsigned int nr_read_queues; ++ unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; + + /* + * If there is no interupt available for queues, ensure that +@@ -2046,12 +2038,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) + if (!nrirqs) { + nrirqs = 1; + nr_read_queues = 0; +- } else if (nrirqs == 1 || !write_queues) { ++ } else if (nrirqs == 1 || !nr_write_queues) { + nr_read_queues = 0; +- } else if (write_queues >= nrirqs) { ++ } else if (nr_write_queues >= nrirqs) { + nr_read_queues = 1; + } else { +- nr_read_queues = nrirqs - write_queues; ++ nr_read_queues = nrirqs - nr_write_queues; + } + + dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; +@@ -2075,7 +2067,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) + * Poll queues don't need interrupts, but we need at least one IO + * queue left over for non-polled IO. + */ +- this_p_queues = poll_queues; ++ this_p_queues = dev->nr_poll_queues; + if (this_p_queues >= nr_io_queues) { + this_p_queues = nr_io_queues - 1; + irq_queues = 1; +@@ -2105,14 +2097,25 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) + __nvme_disable_io_queues(dev, nvme_admin_delete_cq); + } + ++static unsigned int nvme_max_io_queues(struct nvme_dev *dev) ++{ ++ return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; ++} ++ + static int nvme_setup_io_queues(struct nvme_dev *dev) + { + struct nvme_queue *adminq = &dev->queues[0]; + struct pci_dev *pdev = to_pci_dev(dev->dev); +- int result, nr_io_queues; ++ unsigned int nr_io_queues; + unsigned long size; ++ int result; + +- nr_io_queues = max_io_queues(); ++ /* ++ * Sample the module parameters once at reset time so that we have ++ * stable values to work with. ++ */ ++ dev->nr_write_queues = write_queues; ++ dev->nr_poll_queues = poll_queues; + + /* + * If tags are shared with admin queue (Apple bug), then +@@ -2120,6 +2123,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) + */ + if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) + nr_io_queues = 1; ++ else ++ nr_io_queues = min(nvme_max_io_queues(dev), ++ dev->nr_allocated_queues - 1); + + result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); + if (result < 0) +@@ -2794,8 +2800,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) + if (!dev) + return -ENOMEM; + +- dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue), +- GFP_KERNEL, node); ++ dev->nr_write_queues = write_queues; ++ dev->nr_poll_queues = poll_queues; ++ dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; ++ dev->queues = kcalloc_node(dev->nr_allocated_queues, ++ sizeof(struct nvme_queue), GFP_KERNEL, node); + if (!dev->queues) + goto free; + +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c +index 11e84ed4de36..7900814355c2 100644 +--- a/drivers/nvme/host/tcp.c ++++ b/drivers/nvme/host/tcp.c +@@ -784,11 +784,11 @@ static void nvme_tcp_data_ready(struct sock *sk) + { + struct nvme_tcp_queue *queue; + +- read_lock(&sk->sk_callback_lock); ++ read_lock_bh(&sk->sk_callback_lock); + queue = sk->sk_user_data; + if (likely(queue && queue->rd_enabled)) + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); +- read_unlock(&sk->sk_callback_lock); ++ read_unlock_bh(&sk->sk_callback_lock); + } + + static void nvme_tcp_write_space(struct sock *sk) +diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c +index a35d3f3996d7..afc1a3d240b5 100644 +--- a/drivers/pci/controller/vmd.c ++++ b/drivers/pci/controller/vmd.c +@@ -854,6 +854,8 @@ static const struct pci_device_id vmd_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), ++ .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + {0,} + }; + MODULE_DEVICE_TABLE(pci, vmd_ids); +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index d3033873395d..83d909abc61d 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -1777,7 +1777,7 @@ int pci_setup_device(struct pci_dev *dev) + /* Device class may be changed after fixup */ + class = dev->class >> 8; + +- if (dev->non_compliant_bars) { ++ if (dev->non_compliant_bars && !dev->mmio_always_on) { + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { + pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); +@@ -1889,13 +1889,33 @@ static void pci_configure_mps(struct pci_dev *dev) + struct pci_dev *bridge = pci_upstream_bridge(dev); + int mps, mpss, p_mps, rc; + +- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) ++ if (!pci_is_pcie(dev)) + return; + + /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ + if (dev->is_virtfn) + return; + ++ /* ++ * For Root Complex Integrated Endpoints, program the maximum ++ * supported value unless limited by the PCIE_BUS_PEER2PEER case. ++ */ ++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { ++ if (pcie_bus_config == PCIE_BUS_PEER2PEER) ++ mps = 128; ++ else ++ mps = 128 << dev->pcie_mpss; ++ rc = pcie_set_mps(dev, mps); ++ if (rc) { ++ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", ++ mps); ++ } ++ return; ++ } ++ ++ if (!bridge || !pci_is_pcie(bridge)) ++ return; ++ + mps = pcie_get_mps(dev); + p_mps = pcie_get_mps(bridge); + +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 798e52051ecc..a1ec8a1977d3 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -4683,6 +4683,20 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) + PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); + } + ++static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags) ++{ ++ /* ++ * Intel RCiEP's are required to allow p2p only on translated ++ * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16, ++ * "Root-Complex Peer to Peer Considerations". ++ */ ++ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END) ++ return -ENOTTY; ++ ++ return pci_acs_ctrl_enabled(acs_flags, ++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); ++} ++ + static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) + { + /* +@@ -4765,6 +4779,7 @@ static const struct pci_dev_acs_enabled { + /* I219 */ + { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, ++ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs }, + /* QCOM QDF2xxx root ports */ + { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs }, + { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs }, +@@ -5130,13 +5145,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev) + } + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); + +-/* FLR may cause some 82579 devices to hang */ +-static void quirk_intel_no_flr(struct pci_dev *dev) ++/* ++ * FLR may cause the following to devices to hang: ++ * ++ * AMD Starship/Matisse HD Audio Controller 0x1487 ++ * AMD Starship USB 3.0 Host Controller 0x148c ++ * AMD Matisse USB 3.0 Host Controller 0x149c ++ * Intel 82579LM Gigabit Ethernet Controller 0x1502 ++ * Intel 82579V Gigabit Ethernet Controller 0x1503 ++ * ++ */ ++static void quirk_no_flr(struct pci_dev *dev) + { + dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; + } +-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr); +-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr); + + static void quirk_no_ext_tags(struct pci_dev *pdev) + { +@@ -5551,6 +5578,19 @@ static void pci_fixup_no_d0_pme(struct pci_dev *dev) + } + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme); + ++/* ++ * Device [12d8:0x400e] and [12d8:0x400f] ++ * These devices advertise PME# support in all power states but don't ++ * reliably assert it. ++ */ ++static void pci_fixup_no_pme(struct pci_dev *dev) ++{ ++ pci_info(dev, "PME# is unreliable, disabling it\n"); ++ dev->pme_support = 0; ++} ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme); ++ + static void apex_pci_fixup_class(struct pci_dev *pdev) + { + pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class; +diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c +index 2f8787276d9b..3269232ff570 100644 +--- a/drivers/perf/arm_smmuv3_pmu.c ++++ b/drivers/perf/arm_smmuv3_pmu.c +@@ -815,7 +815,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) + if (err) { + dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", + err, &res_0->start); +- return err; ++ goto out_clear_affinity; + } + + err = perf_pmu_register(&smmu_pmu->pmu, name, -1); +@@ -834,6 +834,8 @@ static int smmu_pmu_probe(struct platform_device *pdev) + + out_unregister: + cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); ++out_clear_affinity: ++ irq_set_affinity_hint(smmu_pmu->irq, NULL); + return err; + } + +@@ -843,6 +845,7 @@ static int smmu_pmu_remove(struct platform_device *pdev) + + perf_pmu_unregister(&smmu_pmu->pmu); + cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); ++ irq_set_affinity_hint(smmu_pmu->irq, NULL); + + return 0; + } +diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +index f28063873e11..0d6325d6a4ec 100644 +--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c ++++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +@@ -285,7 +285,7 @@ static struct attribute *hisi_hha_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(rx_wbip, 0x05), + HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11), + HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c), +- HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d), ++ HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d), + HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e), + HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f), + HISI_PMU_EVENT_ATTR(spill_num, 0x20), +diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c +index 0599f5127b01..84501c785473 100644 +--- a/drivers/pinctrl/samsung/pinctrl-exynos.c ++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c +@@ -40,6 +40,8 @@ struct exynos_irq_chip { + u32 eint_pend; + u32 eint_wake_mask_value; + u32 eint_wake_mask_reg; ++ void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata, ++ struct exynos_irq_chip *irq_chip); + }; + + static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip) +@@ -265,6 +267,7 @@ struct exynos_eint_gpio_save { + u32 eint_con; + u32 eint_fltcon0; + u32 eint_fltcon1; ++ u32 eint_mask; + }; + + /* +@@ -342,6 +345,47 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on) + return 0; + } + ++static void ++exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, ++ struct exynos_irq_chip *irq_chip) ++{ ++ struct regmap *pmu_regs; ++ ++ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) { ++ dev_warn(drvdata->dev, ++ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n"); ++ return; ++ } ++ ++ pmu_regs = drvdata->retention_ctrl->priv; ++ dev_info(drvdata->dev, ++ "Setting external wakeup interrupt mask: 0x%x\n", ++ irq_chip->eint_wake_mask_value); ++ ++ regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg, ++ irq_chip->eint_wake_mask_value); ++} ++ ++static void ++s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, ++ struct exynos_irq_chip *irq_chip) ++ ++{ ++ void __iomem *clk_base; ++ ++ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) { ++ dev_warn(drvdata->dev, ++ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n"); ++ return; ++ } ++ ++ ++ clk_base = (void __iomem *) drvdata->retention_ctrl->priv; ++ ++ __raw_writel(irq_chip->eint_wake_mask_value, ++ clk_base + irq_chip->eint_wake_mask_reg); ++} ++ + /* + * irq_chip for wakeup interrupts + */ +@@ -360,8 +404,9 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = { + .eint_mask = EXYNOS_WKUP_EMASK_OFFSET, + .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, + .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, +- /* Only difference with exynos4210_wkup_irq_chip: */ ++ /* Only differences with exynos4210_wkup_irq_chip: */ + .eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK, ++ .set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask, + }; + + static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = { +@@ -380,6 +425,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = { + .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, + .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, + .eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK, ++ .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask, + }; + + static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = { +@@ -398,6 +444,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = { + .eint_pend = EXYNOS7_WKUP_EPEND_OFFSET, + .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, + .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK, ++ .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask, + }; + + /* list of external wakeup controllers supported */ +@@ -574,27 +621,6 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) + return 0; + } + +-static void +-exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, +- struct exynos_irq_chip *irq_chip) +-{ +- struct regmap *pmu_regs; +- +- if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) { +- dev_warn(drvdata->dev, +- "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n"); +- return; +- } +- +- pmu_regs = drvdata->retention_ctrl->priv; +- dev_info(drvdata->dev, +- "Setting external wakeup interrupt mask: 0x%x\n", +- irq_chip->eint_wake_mask_value); +- +- regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg, +- irq_chip->eint_wake_mask_value); +-} +- + static void exynos_pinctrl_suspend_bank( + struct samsung_pinctrl_drv_data *drvdata, + struct samsung_pin_bank *bank) +@@ -608,10 +634,13 @@ static void exynos_pinctrl_suspend_bank( + + 2 * bank->eint_offset); + save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); ++ save->eint_mask = readl(regs + bank->irq_chip->eint_mask ++ + bank->eint_offset); + + pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); + pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); + pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); ++ pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); + } + + void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) +@@ -626,8 +655,8 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) + else if (bank->eint_type == EINT_TYPE_WKUP) { + if (!irq_chip) { + irq_chip = bank->irq_chip; +- exynos_pinctrl_set_eint_wakeup_mask(drvdata, +- irq_chip); ++ irq_chip->set_eint_wakeup_mask(drvdata, ++ irq_chip); + } else if (bank->irq_chip != irq_chip) { + dev_warn(drvdata->dev, + "More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n", +@@ -653,6 +682,9 @@ static void exynos_pinctrl_resume_bank( + pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4), save->eint_fltcon1); ++ pr_debug("%s: mask %#010x => %#010x\n", bank->name, ++ readl(regs + bank->irq_chip->eint_mask ++ + bank->eint_offset), save->eint_mask); + + writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); +@@ -660,6 +692,8 @@ static void exynos_pinctrl_resume_bank( + + 2 * bank->eint_offset); + writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); ++ writel(save->eint_mask, regs + bank->irq_chip->eint_mask ++ + bank->eint_offset); + } + + void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c +index 41e28552b2ce..b1f4a31ba1ee 100644 +--- a/drivers/platform/x86/asus-wmi.c ++++ b/drivers/platform/x86/asus-wmi.c +@@ -111,6 +111,8 @@ struct bios_args { + u32 arg0; + u32 arg1; + u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */ ++ u32 arg4; ++ u32 arg5; + } __packed; + + /* +diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c +index 74e988f839e8..4c1dd1d4e60b 100644 +--- a/drivers/platform/x86/dell-laptop.c ++++ b/drivers/platform/x86/dell-laptop.c +@@ -2204,10 +2204,13 @@ static int __init dell_init(void) + + dell_laptop_register_notifier(&dell_laptop_notifier); + +- micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE); +- ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev); +- if (ret < 0) +- goto fail_led; ++ if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) && ++ dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE)) { ++ micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE); ++ ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev); ++ if (ret < 0) ++ goto fail_led; ++ } + + if (acpi_video_get_backlight_type() != acpi_backlight_vendor) + return 0; +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c +index a881b709af25..a44a2ec33287 100644 +--- a/drivers/platform/x86/hp-wmi.c ++++ b/drivers/platform/x86/hp-wmi.c +@@ -461,8 +461,14 @@ static ssize_t postcode_show(struct device *dev, struct device_attribute *attr, + static ssize_t als_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) + { +- u32 tmp = simple_strtoul(buf, NULL, 10); +- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, ++ u32 tmp; ++ int ret; ++ ++ ret = kstrtou32(buf, 10, &tmp); ++ if (ret) ++ return ret; ++ ++ ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, + sizeof(tmp), sizeof(tmp)); + if (ret) + return ret < 0 ? ret : -EINVAL; +diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c +index ef6d4bd77b1a..7a506c1d0113 100644 +--- a/drivers/platform/x86/intel-hid.c ++++ b/drivers/platform/x86/intel-hid.c +@@ -77,6 +77,13 @@ static const struct dmi_system_id button_array_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 16"), + }, + }, ++ { ++ .ident = "HP Spectre x2 (2015)", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "HP"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c +index b74932307d69..cb2a80fdd8f4 100644 +--- a/drivers/platform/x86/intel-vbtn.c ++++ b/drivers/platform/x86/intel-vbtn.c +@@ -39,28 +39,51 @@ static const struct key_entry intel_vbtn_keymap[] = { + { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */ + { KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */ + { KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */ ++}; ++ ++static const struct key_entry intel_vbtn_switchmap[] = { + { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ + { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ + { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */ + { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */ +- { KE_END }, + }; + ++#define KEYMAP_LEN \ ++ (ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1) ++ + struct intel_vbtn_priv { ++ struct key_entry keymap[KEYMAP_LEN]; + struct input_dev *input_dev; ++ bool has_switches; + bool wakeup_mode; + }; + + static int intel_vbtn_input_setup(struct platform_device *device) + { + struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); +- int ret; ++ int ret, keymap_len = 0; ++ ++ if (true) { ++ memcpy(&priv->keymap[keymap_len], intel_vbtn_keymap, ++ ARRAY_SIZE(intel_vbtn_keymap) * ++ sizeof(struct key_entry)); ++ keymap_len += ARRAY_SIZE(intel_vbtn_keymap); ++ } ++ ++ if (priv->has_switches) { ++ memcpy(&priv->keymap[keymap_len], intel_vbtn_switchmap, ++ ARRAY_SIZE(intel_vbtn_switchmap) * ++ sizeof(struct key_entry)); ++ keymap_len += ARRAY_SIZE(intel_vbtn_switchmap); ++ } ++ ++ priv->keymap[keymap_len].type = KE_END; + + priv->input_dev = devm_input_allocate_device(&device->dev); + if (!priv->input_dev) + return -ENOMEM; + +- ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL); ++ ret = sparse_keymap_setup(priv->input_dev, priv->keymap, NULL); + if (ret) + return ret; + +@@ -115,31 +138,40 @@ out_unknown: + + static void detect_tablet_mode(struct platform_device *device) + { +- const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); + struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); + acpi_handle handle = ACPI_HANDLE(&device->dev); +- struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; +- union acpi_object *obj; ++ unsigned long long vgbs; + acpi_status status; + int m; + +- if (!(chassis_type && strcmp(chassis_type, "31") == 0)) +- goto out; +- +- status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); ++ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs); + if (ACPI_FAILURE(status)) +- goto out; +- +- obj = vgbs_output.pointer; +- if (!(obj && obj->type == ACPI_TYPE_INTEGER)) +- goto out; ++ return; + +- m = !(obj->integer.value & TABLET_MODE_FLAG); ++ m = !(vgbs & TABLET_MODE_FLAG); + input_report_switch(priv->input_dev, SW_TABLET_MODE, m); +- m = (obj->integer.value & DOCK_MODE_FLAG) ? 1 : 0; ++ m = (vgbs & DOCK_MODE_FLAG) ? 1 : 0; + input_report_switch(priv->input_dev, SW_DOCK, m); +-out: +- kfree(vgbs_output.pointer); ++} ++ ++static bool intel_vbtn_has_switches(acpi_handle handle) ++{ ++ const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); ++ unsigned long long vgbs; ++ acpi_status status; ++ ++ /* ++ * Some normal laptops have a VGBS method despite being non-convertible ++ * and their VGBS method always returns 0, causing detect_tablet_mode() ++ * to report SW_TABLET_MODE=1 to userspace, which causes issues. ++ * These laptops have a DMI chassis_type of 9 ("Laptop"), do not report ++ * switches on any devices with a DMI chassis_type of 9. ++ */ ++ if (chassis_type && strcmp(chassis_type, "9") == 0) ++ return false; ++ ++ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs); ++ return ACPI_SUCCESS(status); + } + + static int intel_vbtn_probe(struct platform_device *device) +@@ -160,13 +192,16 @@ static int intel_vbtn_probe(struct platform_device *device) + return -ENOMEM; + dev_set_drvdata(&device->dev, priv); + ++ priv->has_switches = intel_vbtn_has_switches(handle); ++ + err = intel_vbtn_input_setup(device); + if (err) { + pr_err("Failed to setup Intel Virtual Button\n"); + return err; + } + +- detect_tablet_mode(device); ++ if (priv->has_switches) ++ detect_tablet_mode(device); + + status = acpi_install_notify_handler(handle, + ACPI_DEVICE_NOTIFY, +diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c +index 90cbaa8341e3..0bf9ab8653ae 100644 +--- a/drivers/power/reset/vexpress-poweroff.c ++++ b/drivers/power/reset/vexpress-poweroff.c +@@ -143,6 +143,7 @@ static struct platform_driver vexpress_reset_driver = { + .driver = { + .name = "vexpress-reset", + .of_match_table = vexpress_reset_of_match, ++ .suppress_bind_attrs = true, + }, + }; + +diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c +index 75cf861ba492..2e7e2b73b012 100644 +--- a/drivers/power/supply/power_supply_hwmon.c ++++ b/drivers/power/supply/power_supply_hwmon.c +@@ -144,7 +144,7 @@ static int power_supply_hwmon_read_string(struct device *dev, + u32 attr, int channel, + const char **str) + { +- *str = channel ? "temp" : "temp ambient"; ++ *str = channel ? "temp ambient" : "temp"; + return 0; + } + +@@ -304,7 +304,7 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy) + goto error; + } + +- ret = devm_add_action(dev, power_supply_hwmon_bitmap_free, ++ ret = devm_add_action_or_reset(dev, power_supply_hwmon_bitmap_free, + psyhw->props); + if (ret) + goto error; +diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c +index 0246b6f99fb5..f11e4bfbc91b 100644 +--- a/drivers/regulator/qcom-rpmh-regulator.c ++++ b/drivers/regulator/qcom-rpmh-regulator.c +@@ -832,11 +832,11 @@ static const struct rpmh_vreg_init_data pm8150_vreg_data[] = { + RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l2-l10"), + RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l1-l8-l11"), + RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"), +- RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l6-l17"), ++ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l16-l17"), + RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"), + RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"), +- RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l6-l17"), +- RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l6-l17"), ++ RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16-l17"), ++ RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l16-l17"), + RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"), + {}, + }; +@@ -857,7 +857,7 @@ static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = { + RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l4-l5-l6"), + RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l4-l5-l6"), + RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-l11"), +- RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8-l11"), ++ RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8"), + RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l9-l10"), + RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"), + RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"), +diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig +index c8ef05d6b8c7..25df4406ce52 100644 +--- a/drivers/soc/tegra/Kconfig ++++ b/drivers/soc/tegra/Kconfig +@@ -130,6 +130,7 @@ config SOC_TEGRA_FLOWCTRL + + config SOC_TEGRA_PMC + bool ++ select GENERIC_PINCONF + + config SOC_TEGRA_POWERGATE_BPMP + def_bool y +diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c +index 2663bb12d9ce..b07710c76fc9 100644 +--- a/drivers/spi/spi-dw-mid.c ++++ b/drivers/spi/spi-dw-mid.c +@@ -147,6 +147,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, + if (!xfer->tx_buf) + return NULL; + ++ memset(&txconf, 0, sizeof(txconf)); + txconf.direction = DMA_MEM_TO_DEV; + txconf.dst_addr = dws->dma_addr; + txconf.dst_maxburst = 16; +@@ -193,6 +194,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + if (!xfer->rx_buf) + return NULL; + ++ memset(&rxconf, 0, sizeof(rxconf)); + rxconf.direction = DMA_DEV_TO_MEM; + rxconf.src_addr = dws->dma_addr; + rxconf.src_maxburst = 16; +@@ -218,19 +220,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + + static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) + { +- u16 dma_ctrl = 0; ++ u16 imr = 0, dma_ctrl = 0; + + dw_writel(dws, DW_SPI_DMARDLR, 0xf); + dw_writel(dws, DW_SPI_DMATDLR, 0x10); + +- if (xfer->tx_buf) ++ if (xfer->tx_buf) { + dma_ctrl |= SPI_DMA_TDMAE; +- if (xfer->rx_buf) ++ imr |= SPI_INT_TXOI; ++ } ++ if (xfer->rx_buf) { + dma_ctrl |= SPI_DMA_RDMAE; ++ imr |= SPI_INT_RXUI | SPI_INT_RXOI; ++ } + dw_writel(dws, DW_SPI_DMACR, dma_ctrl); + + /* Set the interrupt mask */ +- spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); ++ spi_umask_intr(dws, imr); + + dws->transfer_handler = dma_transfer; + +@@ -260,7 +266,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) + dma_async_issue_pending(dws->txchan); + } + +- return 0; ++ return 1; + } + + static void mid_spi_dma_stop(struct dw_spi *dws) +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index 82c5c027ec4c..c2f96941ad04 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -381,11 +381,8 @@ static int dw_spi_transfer_one(struct spi_controller *master, + + spi_enable_chip(dws, 1); + +- if (dws->dma_mapped) { +- ret = dws->dma_ops->dma_transfer(dws, transfer); +- if (ret < 0) +- return ret; +- } ++ if (dws->dma_mapped) ++ return dws->dma_ops->dma_transfer(dws, transfer); + + if (chip->poll_mode) + return poll_transfer(dws); +@@ -529,6 +526,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) + dws->dma_inited = 0; + } else { + master->can_dma = dws->dma_ops->can_dma; ++ master->flags |= SPI_CONTROLLER_MUST_TX; + } + } + +diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c +index 9f0fa9f3116d..de0ba3e5449f 100644 +--- a/drivers/spi/spi-mem.c ++++ b/drivers/spi/spi-mem.c +@@ -108,15 +108,17 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) + return 0; + + case 2: +- if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || +- (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) ++ if ((tx && ++ (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) || ++ (!tx && ++ (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))) + return 0; + + break; + + case 4: +- if ((tx && (mode & SPI_TX_QUAD)) || +- (!tx && (mode & SPI_RX_QUAD))) ++ if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) || ++ (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))) + return 0; + + break; +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index d0d6f1bda1b6..7f4285e2ae68 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -148,6 +148,7 @@ static const struct lpss_config lpss_platforms[] = { + .tx_threshold_hi = 48, + .cs_sel_shift = 8, + .cs_sel_mask = 3 << 8, ++ .cs_clk_stays_gated = true, + }, + { /* LPSS_CNL_SSP */ + .offset = 0x200, +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index 6bfbf0cfcf63..c6242f0a307f 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -1950,6 +1950,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) + } + + lookup->max_speed_hz = sb->connection_speed; ++ lookup->bits_per_word = sb->data_bit_length; + + if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) + lookup->mode |= SPI_CPHA; +diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c +index 473b465724f1..0755b11348ed 100644 +--- a/drivers/staging/android/ion/ion_heap.c ++++ b/drivers/staging/android/ion/ion_heap.c +@@ -99,12 +99,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + + static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) + { +- void *addr = vm_map_ram(pages, num, -1, pgprot); ++ void *addr = vmap(pages, num, VM_MAP, pgprot); + + if (!addr) + return -ENOMEM; + memset(addr, 0, PAGE_SIZE * num); +- vm_unmap_ram(addr, num); ++ vunmap(addr); + + return 0; + } +diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c +index 68c5718be827..c4b16bb5c1a4 100644 +--- a/drivers/staging/greybus/sdio.c ++++ b/drivers/staging/greybus/sdio.c +@@ -411,6 +411,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) + struct gb_sdio_command_request request = {0}; + struct gb_sdio_command_response response; + struct mmc_data *data = host->mrq->data; ++ unsigned int timeout_ms; + u8 cmd_flags; + u8 cmd_type; + int i; +@@ -469,9 +470,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) + request.data_blksz = cpu_to_le16(data->blksz); + } + +- ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND, +- &request, sizeof(request), &response, +- sizeof(response)); ++ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : ++ GB_OPERATION_TIMEOUT_DEFAULT; ++ ++ ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND, ++ &request, sizeof(request), &response, ++ sizeof(response), timeout_ms); + if (ret < 0) + goto out; + +diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c +index 6f628195c4da..021bbd420390 100644 +--- a/drivers/staging/media/imx/imx7-mipi-csis.c ++++ b/drivers/staging/media/imx/imx7-mipi-csis.c +@@ -657,28 +657,6 @@ static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd, + return 0; + } + +-static struct csis_pix_format const * +-mipi_csis_try_format(struct v4l2_subdev *mipi_sd, struct v4l2_mbus_framefmt *mf) +-{ +- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd); +- struct csis_pix_format const *csis_fmt; +- +- csis_fmt = find_csis_format(mf->code); +- if (!csis_fmt) +- csis_fmt = &mipi_csis_formats[0]; +- +- v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH, +- csis_fmt->pix_width_alignment, +- &mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1, +- 0); +- +- state->format_mbus.code = csis_fmt->code; +- state->format_mbus.width = mf->width; +- state->format_mbus.height = mf->height; +- +- return csis_fmt; +-} +- + static struct v4l2_mbus_framefmt * + mipi_csis_get_format(struct csi_state *state, + struct v4l2_subdev_pad_config *cfg, +@@ -691,53 +669,67 @@ mipi_csis_get_format(struct csi_state *state, + return &state->format_mbus; + } + +-static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd, ++static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) + { + struct csi_state *state = mipi_sd_to_csis_state(mipi_sd); +- struct csis_pix_format const *csis_fmt; + struct v4l2_mbus_framefmt *fmt; + +- if (sdformat->pad >= CSIS_PADS_NUM) +- return -EINVAL; +- +- fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad); +- + mutex_lock(&state->lock); +- if (sdformat->pad == CSIS_PAD_SOURCE) { +- sdformat->format = *fmt; +- goto unlock; +- } +- +- csis_fmt = mipi_csis_try_format(mipi_sd, &sdformat->format); +- ++ fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad); + sdformat->format = *fmt; +- +- if (csis_fmt && sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE) +- state->csis_fmt = csis_fmt; +- else +- cfg->try_fmt = sdformat->format; +- +-unlock: + mutex_unlock(&state->lock); + + return 0; + } + +-static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd, ++static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) + { + struct csi_state *state = mipi_sd_to_csis_state(mipi_sd); ++ struct csis_pix_format const *csis_fmt; + struct v4l2_mbus_framefmt *fmt; + +- mutex_lock(&state->lock); ++ /* ++ * The CSIS can't transcode in any way, the source format can't be ++ * modified. ++ */ ++ if (sdformat->pad == CSIS_PAD_SOURCE) ++ return mipi_csis_get_fmt(mipi_sd, cfg, sdformat); ++ ++ if (sdformat->pad != CSIS_PAD_SINK) ++ return -EINVAL; + + fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad); + ++ mutex_lock(&state->lock); ++ ++ /* Validate the media bus code and clamp the size. */ ++ csis_fmt = find_csis_format(sdformat->format.code); ++ if (!csis_fmt) ++ csis_fmt = &mipi_csis_formats[0]; ++ ++ fmt->code = csis_fmt->code; ++ fmt->width = sdformat->format.width; ++ fmt->height = sdformat->format.height; ++ ++ v4l_bound_align_image(&fmt->width, 1, CSIS_MAX_PIX_WIDTH, ++ csis_fmt->pix_width_alignment, ++ &fmt->height, 1, CSIS_MAX_PIX_HEIGHT, 1, 0); ++ + sdformat->format = *fmt; + ++ /* Propagate the format from sink to source. */ ++ fmt = mipi_csis_get_format(state, cfg, sdformat->which, ++ CSIS_PAD_SOURCE); ++ *fmt = sdformat->format; ++ ++ /* Store the CSIS format descriptor for active formats. */ ++ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE) ++ state->csis_fmt = csis_fmt; ++ + mutex_unlock(&state->lock); + + return 0; +diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c +index 3d969b0522ab..abcf1f3e5f63 100644 +--- a/drivers/staging/media/ipu3/ipu3-mmu.c ++++ b/drivers/staging/media/ipu3/ipu3-mmu.c +@@ -174,8 +174,10 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) + spin_lock_irqsave(&mmu->lock, flags); + + l2pt = mmu->l2pts[l1pt_idx]; +- if (l2pt) +- goto done; ++ if (l2pt) { ++ spin_unlock_irqrestore(&mmu->lock, flags); ++ return l2pt; ++ } + + spin_unlock_irqrestore(&mmu->lock, flags); + +@@ -190,8 +192,9 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) + + l2pt = mmu->l2pts[l1pt_idx]; + if (l2pt) { ++ spin_unlock_irqrestore(&mmu->lock, flags); + imgu_mmu_free_page_table(new_l2pt); +- goto done; ++ return l2pt; + } + + l2pt = new_l2pt; +@@ -200,7 +203,6 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) + pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt)); + mmu->l1pt[l1pt_idx] = pteval; + +-done: + spin_unlock_irqrestore(&mmu->lock, flags); + return l2pt; + } +diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c +index 3c7ad1eed434..c764cb55dc8d 100644 +--- a/drivers/staging/media/ipu3/ipu3-v4l2.c ++++ b/drivers/staging/media/ipu3/ipu3-v4l2.c +@@ -367,8 +367,10 @@ static void imgu_vb2_buf_queue(struct vb2_buffer *vb) + + vb2_set_plane_payload(vb, 0, need_bytes); + ++ mutex_lock(&imgu->streaming_lock); + if (imgu->streaming) + imgu_queue_buffers(imgu, false, node->pipe); ++ mutex_unlock(&imgu->streaming_lock); + + dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__, + node->pipe, node->id); +@@ -468,10 +470,13 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) + dev_dbg(dev, "%s node name %s pipe %u id %u", __func__, + node->name, node->pipe, node->id); + ++ mutex_lock(&imgu->streaming_lock); + if (imgu->streaming) { + r = -EBUSY; ++ mutex_unlock(&imgu->streaming_lock); + goto fail_return_bufs; + } ++ mutex_unlock(&imgu->streaming_lock); + + if (!node->enabled) { + dev_err(dev, "IMGU node is not enabled"); +@@ -498,9 +503,11 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) + + /* Start streaming of the whole pipeline now */ + dev_dbg(dev, "IMGU streaming is ready to start"); ++ mutex_lock(&imgu->streaming_lock); + r = imgu_s_stream(imgu, true); + if (!r) + imgu->streaming = true; ++ mutex_unlock(&imgu->streaming_lock); + + return 0; + +@@ -532,6 +539,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq) + dev_err(&imgu->pci_dev->dev, + "failed to stop subdev streaming\n"); + ++ mutex_lock(&imgu->streaming_lock); + /* Was this the first node with streaming disabled? */ + if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) { + /* Yes, really stop streaming now */ +@@ -542,6 +550,8 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq) + } + + imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR); ++ mutex_unlock(&imgu->streaming_lock); ++ + media_pipeline_stop(&node->vdev.entity); + } + +diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c +index 06a61f31ca50..08eb6791918b 100644 +--- a/drivers/staging/media/ipu3/ipu3.c ++++ b/drivers/staging/media/ipu3/ipu3.c +@@ -261,6 +261,7 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe + + ivb = list_first_entry(&imgu_pipe->nodes[node].buffers, + struct imgu_vb2_buffer, list); ++ list_del(&ivb->list); + vb = &ivb->vbb.vb2_buf; + r = imgu_css_set_parameters(&imgu->css, pipe, + vb2_plane_vaddr(vb, 0)); +@@ -274,7 +275,6 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + dev_dbg(&imgu->pci_dev->dev, + "queue user parameters %d to css.", vb->index); +- list_del(&ivb->list); + } else if (imgu_pipe->queue_enabled[node]) { + struct imgu_css_buffer *buf = + imgu_queue_getbuf(imgu, node, pipe); +@@ -663,6 +663,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev, + return r; + + mutex_init(&imgu->lock); ++ mutex_init(&imgu->streaming_lock); + atomic_set(&imgu->qbuf_barrier, 0); + init_waitqueue_head(&imgu->buf_drain_wq); + +@@ -726,6 +727,7 @@ out_mmu_exit: + out_css_powerdown: + imgu_css_set_powerdown(&pci_dev->dev, imgu->base); + out_mutex_destroy: ++ mutex_destroy(&imgu->streaming_lock); + mutex_destroy(&imgu->lock); + + return r; +@@ -743,6 +745,7 @@ static void imgu_pci_remove(struct pci_dev *pci_dev) + imgu_css_set_powerdown(&pci_dev->dev, imgu->base); + imgu_dmamap_exit(imgu); + imgu_mmu_exit(imgu->mmu); ++ mutex_destroy(&imgu->streaming_lock); + mutex_destroy(&imgu->lock); + } + +diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h +index 73b123b2b8a2..8cd6a0077d99 100644 +--- a/drivers/staging/media/ipu3/ipu3.h ++++ b/drivers/staging/media/ipu3/ipu3.h +@@ -146,6 +146,10 @@ struct imgu_device { + * vid_buf.list and css->queue + */ + struct mutex lock; ++ ++ /* Lock to protect writes to streaming flag in this struct */ ++ struct mutex streaming_lock; ++ + /* Forbid streaming and buffer queuing during system suspend. */ + atomic_t qbuf_barrier; + /* Indicate if system suspend take place while imgu is streaming. */ +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c +index 56ca4c9ad01c..47940f02457b 100644 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c +@@ -65,6 +65,8 @@ void cedrus_device_run(void *priv) + + v4l2_m2m_buf_copy_metadata(run.src, run.dst, true); + ++ cedrus_dst_format_set(dev, &ctx->dst_fmt); ++ + dev->dec_ops[ctx->current_codec]->setup(ctx, &run); + + /* Complete request(s) controls if needed. */ +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c +index eeee3efd247b..966f9f3ed9d3 100644 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c +@@ -286,7 +286,6 @@ static int cedrus_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) + { + struct cedrus_ctx *ctx = cedrus_file2ctx(file); +- struct cedrus_dev *dev = ctx->dev; + struct vb2_queue *vq; + int ret; + +@@ -300,8 +299,6 @@ static int cedrus_s_fmt_vid_cap(struct file *file, void *priv, + + ctx->dst_fmt = f->fmt.pix; + +- cedrus_dst_format_set(dev, &ctx->dst_fmt); +- + return 0; + } + +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c +index 28bdbd7b4ab2..f1d230c5a8ef 100644 +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -1026,7 +1026,7 @@ int serial8250_register_8250_port(struct uart_8250_port *up) + gpios = mctrl_gpio_init(&uart->port, 0); + if (IS_ERR(gpios)) { + ret = PTR_ERR(gpios); +- goto out_unlock; ++ goto err; + } else { + uart->gpios = gpios; + } +@@ -1075,8 +1075,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up) + serial8250_apply_quirks(uart); + ret = uart_add_one_port(&serial8250_reg, + &uart->port); +- if (ret == 0) +- ret = uart->port.line; ++ if (ret) ++ goto err; ++ ++ ret = uart->port.line; + } else { + dev_info(uart->port.dev, + "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", +@@ -1098,10 +1100,14 @@ int serial8250_register_8250_port(struct uart_8250_port *up) + } + } + +-out_unlock: + mutex_unlock(&serial_mutex); + + return ret; ++ ++err: ++ uart->port.dev = NULL; ++ mutex_unlock(&serial_mutex); ++ return ret; + } + EXPORT_SYMBOL(serial8250_register_8250_port); + +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 8a01d034f9d1..7cad66eb39ff 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -1871,12 +1871,6 @@ pci_moxa_setup(struct serial_private *priv, + #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 + #define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253 + +-#define PCI_VENDOR_ID_PERICOM 0x12D8 +-#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 +-#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 +-#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 +-#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 +- + #define PCI_VENDOR_ID_ACCESIO 0x494f + #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051 + #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053 +diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c +index c7d51b51898f..f5608ad68ae1 100644 +--- a/drivers/tty/serial/kgdboc.c ++++ b/drivers/tty/serial/kgdboc.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + #define MAX_CONFIG_LEN 40 + +@@ -27,6 +28,7 @@ static struct kgdb_io kgdboc_io_ops; + + /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ + static int configured = -1; ++static DEFINE_MUTEX(config_mutex); + + static char config[MAX_CONFIG_LEN]; + static struct kparam_string kps = { +@@ -38,6 +40,8 @@ static int kgdboc_use_kms; /* 1 if we use kernel mode switching */ + static struct tty_driver *kgdb_tty_driver; + static int kgdb_tty_line; + ++static struct platform_device *kgdboc_pdev; ++ + #ifdef CONFIG_KDB_KEYBOARD + static int kgdboc_reset_connect(struct input_handler *handler, + struct input_dev *dev, +@@ -133,11 +137,13 @@ static void kgdboc_unregister_kbd(void) + + static void cleanup_kgdboc(void) + { ++ if (configured != 1) ++ return; ++ + if (kgdb_unregister_nmi_console()) + return; + kgdboc_unregister_kbd(); +- if (configured == 1) +- kgdb_unregister_io_module(&kgdboc_io_ops); ++ kgdb_unregister_io_module(&kgdboc_io_ops); + } + + static int configure_kgdboc(void) +@@ -200,20 +206,79 @@ nmi_con_failed: + kgdb_unregister_io_module(&kgdboc_io_ops); + noconfig: + kgdboc_unregister_kbd(); +- config[0] = 0; + configured = 0; +- cleanup_kgdboc(); + + return err; + } + ++static int kgdboc_probe(struct platform_device *pdev) ++{ ++ int ret = 0; ++ ++ mutex_lock(&config_mutex); ++ if (configured != 1) { ++ ret = configure_kgdboc(); ++ ++ /* Convert "no device" to "defer" so we'll keep trying */ ++ if (ret == -ENODEV) ++ ret = -EPROBE_DEFER; ++ } ++ mutex_unlock(&config_mutex); ++ ++ return ret; ++} ++ ++static struct platform_driver kgdboc_platform_driver = { ++ .probe = kgdboc_probe, ++ .driver = { ++ .name = "kgdboc", ++ .suppress_bind_attrs = true, ++ }, ++}; ++ + static int __init init_kgdboc(void) + { +- /* Already configured? */ +- if (configured == 1) ++ int ret; ++ ++ /* ++ * kgdboc is a little bit of an odd "platform_driver". It can be ++ * up and running long before the platform_driver object is ++ * created and thus doesn't actually store anything in it. There's ++ * only one instance of kgdb so anything is stored as global state. ++ * The platform_driver is only created so that we can leverage the ++ * kernel's mechanisms (like -EPROBE_DEFER) to call us when our ++ * underlying tty is ready. Here we init our platform driver and ++ * then create the single kgdboc instance. ++ */ ++ ret = platform_driver_register(&kgdboc_platform_driver); ++ if (ret) ++ return ret; ++ ++ kgdboc_pdev = platform_device_alloc("kgdboc", PLATFORM_DEVID_NONE); ++ if (!kgdboc_pdev) { ++ ret = -ENOMEM; ++ goto err_did_register; ++ } ++ ++ ret = platform_device_add(kgdboc_pdev); ++ if (!ret) + return 0; + +- return configure_kgdboc(); ++ platform_device_put(kgdboc_pdev); ++ ++err_did_register: ++ platform_driver_unregister(&kgdboc_platform_driver); ++ return ret; ++} ++ ++static void exit_kgdboc(void) ++{ ++ mutex_lock(&config_mutex); ++ cleanup_kgdboc(); ++ mutex_unlock(&config_mutex); ++ ++ platform_device_unregister(kgdboc_pdev); ++ platform_driver_unregister(&kgdboc_platform_driver); + } + + static int kgdboc_get_char(void) +@@ -236,24 +301,20 @@ static int param_set_kgdboc_var(const char *kmessage, + const struct kernel_param *kp) + { + size_t len = strlen(kmessage); ++ int ret = 0; + + if (len >= MAX_CONFIG_LEN) { + pr_err("config string too long\n"); + return -ENOSPC; + } + +- /* Only copy in the string if the init function has not run yet */ +- if (configured < 0) { +- strcpy(config, kmessage); +- return 0; +- } +- + if (kgdb_connected) { + pr_err("Cannot reconfigure while KGDB is connected.\n"); +- + return -EBUSY; + } + ++ mutex_lock(&config_mutex); ++ + strcpy(config, kmessage); + /* Chop out \n char as a result of echo */ + if (len && config[len - 1] == '\n') +@@ -262,8 +323,30 @@ static int param_set_kgdboc_var(const char *kmessage, + if (configured == 1) + cleanup_kgdboc(); + +- /* Go and configure with the new params. */ +- return configure_kgdboc(); ++ /* ++ * Configure with the new params as long as init already ran. ++ * Note that we can get called before init if someone loads us ++ * with "modprobe kgdboc kgdboc=..." or if they happen to use the ++ * the odd syntax of "kgdboc.kgdboc=..." on the kernel command. ++ */ ++ if (configured >= 0) ++ ret = configure_kgdboc(); ++ ++ /* ++ * If we couldn't configure then clear out the config. Note that ++ * specifying an invalid config on the kernel command line vs. ++ * through sysfs have slightly different behaviors. If we fail ++ * to configure what was specified on the kernel command line ++ * we'll leave it in the 'config' and return -EPROBE_DEFER from ++ * our probe. When specified through sysfs userspace is ++ * responsible for loading the tty driver before setting up. ++ */ ++ if (ret) ++ config[0] = '\0'; ++ ++ mutex_unlock(&config_mutex); ++ ++ return ret; + } + + static int dbg_restore_graphics; +@@ -326,15 +409,8 @@ __setup("kgdboc=", kgdboc_option_setup); + /* This is only available if kgdboc is a built in for early debugging */ + static int __init kgdboc_early_init(char *opt) + { +- /* save the first character of the config string because the +- * init routine can destroy it. +- */ +- char save_ch; +- + kgdboc_option_setup(opt); +- save_ch = config[0]; +- init_kgdboc(); +- config[0] = save_ch; ++ configure_kgdboc(); + return 0; + } + +@@ -342,7 +418,7 @@ early_param("ekgdboc", kgdboc_early_init); + #endif /* CONFIG_KGDB_SERIAL_CONSOLE */ + + module_init(init_kgdboc); +-module_exit(cleanup_kgdboc); ++module_exit(exit_kgdboc); + module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644); + MODULE_PARM_DESC(kgdboc, "[,baud]"); + MODULE_DESCRIPTION("KGDB Console TTY Driver"); +diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c +index 4164045866b3..6bac5c18cf6d 100644 +--- a/drivers/w1/masters/omap_hdq.c ++++ b/drivers/w1/masters/omap_hdq.c +@@ -176,7 +176,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) + /* check irqstatus */ + if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { + dev_dbg(hdq_data->dev, "timeout waiting for" +- " TXCOMPLETE/RXCOMPLETE, %x", *status); ++ " TXCOMPLETE/RXCOMPLETE, %x\n", *status); + ret = -ETIMEDOUT; + goto out; + } +@@ -187,7 +187,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) + OMAP_HDQ_FLAG_CLEAR, &tmp_status); + if (ret) { + dev_dbg(hdq_data->dev, "timeout waiting GO bit" +- " return to zero, %x", tmp_status); ++ " return to zero, %x\n", tmp_status); + } + + out: +@@ -203,7 +203,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq) + spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); + hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); + spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); +- dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); ++ dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus); + + if (hdq_data->hdq_irqstatus & + (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE +@@ -311,7 +311,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) + tmp_status = hdq_data->hdq_irqstatus; + /* check irqstatus */ + if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { +- dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", ++ dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n", + tmp_status); + ret = -ETIMEDOUT; + goto out; +@@ -338,7 +338,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) + &tmp_status); + if (ret) + dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" +- " return to zero, %x", tmp_status); ++ " return to zero, %x\n", tmp_status); + + out: + mutex_unlock(&hdq_data->hdq_mutex); +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index 2fead6c3c687..c2dd94e1b274 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -1167,7 +1167,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( + free_extent_map(em); + + return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, +- num_items, 1); ++ num_items); + } + + /* +diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c +index d07bd41a7c1e..343400d49bd1 100644 +--- a/fs/btrfs/block-rsv.c ++++ b/fs/btrfs/block-rsv.c +@@ -5,6 +5,7 @@ + #include "block-rsv.h" + #include "space-info.h" + #include "transaction.h" ++#include "block-group.h" + + static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, + struct btrfs_block_rsv *block_rsv, +@@ -313,6 +314,8 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) + else + block_rsv->full = 0; + ++ if (block_rsv->size >= sinfo->total_bytes) ++ sinfo->force_alloc = CHUNK_ALLOC_FORCE; + spin_unlock(&block_rsv->lock); + spin_unlock(&sinfo->lock); + } +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 169075550a5a..6d2c277c6e0a 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -2465,6 +2465,7 @@ enum btrfs_reserve_flush_enum { + BTRFS_RESERVE_FLUSH_LIMIT, + BTRFS_RESERVE_FLUSH_EVICT, + BTRFS_RESERVE_FLUSH_ALL, ++ BTRFS_RESERVE_FLUSH_ALL_STEAL, + }; + + enum btrfs_flush_state { +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c +index f62a179f85bb..2b8f29c07668 100644 +--- a/fs/btrfs/file-item.c ++++ b/fs/btrfs/file-item.c +@@ -798,10 +798,12 @@ again: + nritems = btrfs_header_nritems(path->nodes[0]); + if (!nritems || (path->slots[0] >= nritems - 1)) { + ret = btrfs_next_leaf(root, path); +- if (ret == 1) ++ if (ret < 0) { ++ goto out; ++ } else if (ret > 0) { + found_next = 1; +- if (ret != 0) + goto insert; ++ } + slot = path->slots[0]; + } + btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 94b0df3fb3c8..127cdecbe872 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -49,6 +49,7 @@ + #include "qgroup.h" + #include "delalloc-space.h" + #include "block-group.h" ++#include "space-info.h" + + struct btrfs_iget_args { + struct btrfs_key *location; +@@ -1132,7 +1133,7 @@ out_unlock: + */ + if (extent_reserved) { + extent_clear_unlock_delalloc(inode, start, +- start + cur_alloc_size, ++ start + cur_alloc_size - 1, + locked_page, + clear_bits, + page_ops); +@@ -1322,6 +1323,66 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, + return 1; + } + ++static int fallback_to_cow(struct inode *inode, struct page *locked_page, ++ const u64 start, const u64 end, ++ int *page_started, unsigned long *nr_written) ++{ ++ const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode)); ++ const u64 range_bytes = end + 1 - start; ++ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; ++ u64 range_start = start; ++ u64 count; ++ ++ /* ++ * If EXTENT_NORESERVE is set it means that when the buffered write was ++ * made we had not enough available data space and therefore we did not ++ * reserve data space for it, since we though we could do NOCOW for the ++ * respective file range (either there is prealloc extent or the inode ++ * has the NOCOW bit set). ++ * ++ * However when we need to fallback to COW mode (because for example the ++ * block group for the corresponding extent was turned to RO mode by a ++ * scrub or relocation) we need to do the following: ++ * ++ * 1) We increment the bytes_may_use counter of the data space info. ++ * If COW succeeds, it allocates a new data extent and after doing ++ * that it decrements the space info's bytes_may_use counter and ++ * increments its bytes_reserved counter by the same amount (we do ++ * this at btrfs_add_reserved_bytes()). So we need to increment the ++ * bytes_may_use counter to compensate (when space is reserved at ++ * buffered write time, the bytes_may_use counter is incremented); ++ * ++ * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so ++ * that if the COW path fails for any reason, it decrements (through ++ * extent_clear_unlock_delalloc()) the bytes_may_use counter of the ++ * data space info, which we incremented in the step above. ++ * ++ * If we need to fallback to cow and the inode corresponds to a free ++ * space cache inode, we must also increment bytes_may_use of the data ++ * space_info for the same reason. Space caches always get a prealloc ++ * extent for them, however scrub or balance may have set the block ++ * group that contains that extent to RO mode. ++ */ ++ count = count_range_bits(io_tree, &range_start, end, range_bytes, ++ EXTENT_NORESERVE, 0); ++ if (count > 0 || is_space_ino) { ++ const u64 bytes = is_space_ino ? range_bytes : count; ++ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; ++ struct btrfs_space_info *sinfo = fs_info->data_sinfo; ++ ++ spin_lock(&sinfo->lock); ++ btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); ++ spin_unlock(&sinfo->lock); ++ ++ if (count > 0) ++ clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, ++ 0, 0, NULL); ++ } ++ ++ return cow_file_range(inode, locked_page, start, end, page_started, ++ nr_written, 1); ++} ++ + /* + * when nowcow writeback call back. This checks for snapshots or COW copies + * of the extents that exist in the file, and COWs the file as required. +@@ -1569,9 +1630,9 @@ out_check: + * NOCOW, following one which needs to be COW'ed + */ + if (cow_start != (u64)-1) { +- ret = cow_file_range(inode, locked_page, +- cow_start, found_key.offset - 1, +- page_started, nr_written, 1); ++ ret = fallback_to_cow(inode, locked_page, cow_start, ++ found_key.offset - 1, ++ page_started, nr_written); + if (ret) { + if (nocow) + btrfs_dec_nocow_writers(fs_info, +@@ -1660,8 +1721,8 @@ out_check: + + if (cow_start != (u64)-1) { + cur_offset = end; +- ret = cow_file_range(inode, locked_page, cow_start, end, +- page_started, nr_written, 1); ++ ret = fallback_to_cow(inode, locked_page, cow_start, end, ++ page_started, nr_written); + if (ret) + goto error; + } +@@ -4250,7 +4311,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) + * 1 for the inode ref + * 1 for the inode + */ +- return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); ++ return btrfs_start_transaction_fallback_global_rsv(root, 5); + } + + static int btrfs_unlink(struct inode *dir, struct dentry *dentry) +@@ -8534,7 +8595,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) + + /* bio split */ + ASSERT(geom.len <= INT_MAX); +- atomic_inc(&dip->pending_bios); + do { + clone_len = min_t(int, submit_len, geom.len); + +@@ -8584,7 +8644,8 @@ submit: + if (!status) + return 0; + +- bio_put(bio); ++ if (bio != orig_bio) ++ bio_put(bio); + out_err: + dip->errors = 1; + /* +@@ -8625,7 +8686,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, + bio->bi_private = dip; + dip->orig_bio = bio; + dip->dio_bio = dio_bio; +- atomic_set(&dip->pending_bios, 0); ++ atomic_set(&dip->pending_bios, 1); + io_bio = btrfs_io_bio(bio); + io_bio->logical = file_offset; + +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 590defdf8860..b94f6f99e90d 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -2636,6 +2636,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, + struct btrfs_root *quota_root; + struct btrfs_qgroup *srcgroup; + struct btrfs_qgroup *dstgroup; ++ bool need_rescan = false; + u32 level_size = 0; + u64 nums; + +@@ -2779,6 +2780,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, + goto unlock; + } + ++i_qgroups; ++ ++ /* ++ * If we're doing a snapshot, and adding the snapshot to a new ++ * qgroup, the numbers are guaranteed to be incorrect. ++ */ ++ if (srcid) ++ need_rescan = true; + } + + for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { +@@ -2798,6 +2806,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, + + dst->rfer = src->rfer - level_size; + dst->rfer_cmpr = src->rfer_cmpr - level_size; ++ ++ /* Manually tweaking numbers certainly needs a rescan */ ++ need_rescan = true; + } + for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { + struct btrfs_qgroup *src; +@@ -2816,6 +2827,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, + + dst->excl = src->excl + level_size; + dst->excl_cmpr = src->excl_cmpr + level_size; ++ need_rescan = true; + } + + unlock: +@@ -2823,6 +2835,8 @@ unlock: + out: + if (!committing) + mutex_unlock(&fs_info->qgroup_ioctl_lock); ++ if (need_rescan) ++ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; + return ret; + } + +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index 3eb0fec2488a..6ad216e8178e 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -23,6 +23,7 @@ + #include "btrfs_inode.h" + #include "transaction.h" + #include "compression.h" ++#include "xattr.h" + + /* + * Maximum number of references an extent can have in order for us to attempt to +@@ -4536,6 +4537,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, + struct fs_path *p; + struct posix_acl_xattr_header dummy_acl; + ++ /* Capabilities are emitted by finish_inode_if_needed */ ++ if (!strncmp(name, XATTR_NAME_CAPS, name_len)) ++ return 0; ++ + p = fs_path_alloc(); + if (!p) + return -ENOMEM; +@@ -5098,6 +5103,64 @@ static int send_extent_data(struct send_ctx *sctx, + return 0; + } + ++/* ++ * Search for a capability xattr related to sctx->cur_ino. If the capability is ++ * found, call send_set_xattr function to emit it. ++ * ++ * Return 0 if there isn't a capability, or when the capability was emitted ++ * successfully, or < 0 if an error occurred. ++ */ ++static int send_capabilities(struct send_ctx *sctx) ++{ ++ struct fs_path *fspath = NULL; ++ struct btrfs_path *path; ++ struct btrfs_dir_item *di; ++ struct extent_buffer *leaf; ++ unsigned long data_ptr; ++ char *buf = NULL; ++ int buf_len; ++ int ret = 0; ++ ++ path = alloc_path_for_send(); ++ if (!path) ++ return -ENOMEM; ++ ++ di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, ++ XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); ++ if (!di) { ++ /* There is no xattr for this inode */ ++ goto out; ++ } else if (IS_ERR(di)) { ++ ret = PTR_ERR(di); ++ goto out; ++ } ++ ++ leaf = path->nodes[0]; ++ buf_len = btrfs_dir_data_len(leaf, di); ++ ++ fspath = fs_path_alloc(); ++ buf = kmalloc(buf_len, GFP_KERNEL); ++ if (!fspath || !buf) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); ++ if (ret < 0) ++ goto out; ++ ++ data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); ++ read_extent_buffer(leaf, buf, data_ptr, buf_len); ++ ++ ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, ++ strlen(XATTR_NAME_CAPS), buf, buf_len); ++out: ++ kfree(buf); ++ fs_path_free(fspath); ++ btrfs_free_path(path); ++ return ret; ++} ++ + static int clone_range(struct send_ctx *sctx, + struct clone_root *clone_root, + const u64 disk_byte, +@@ -6001,6 +6064,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) + goto out; + } + ++ ret = send_capabilities(sctx); ++ if (ret < 0) ++ goto out; ++ + /* + * If other directory inodes depended on our current directory + * inode's move/rename, now do their move/rename operations. +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c +index e8a4b0ebe97f..7889a59a57fa 100644 +--- a/fs/btrfs/space-info.c ++++ b/fs/btrfs/space-info.c +@@ -462,6 +462,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, + struct reserve_ticket *ticket = NULL; + struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; + struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; ++ struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; + struct btrfs_trans_handle *trans; + u64 bytes_needed; + u64 reclaim_bytes = 0; +@@ -524,6 +525,11 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, + spin_lock(&delayed_refs_rsv->lock); + reclaim_bytes += delayed_refs_rsv->reserved; + spin_unlock(&delayed_refs_rsv->lock); ++ ++ spin_lock(&trans_rsv->lock); ++ reclaim_bytes += trans_rsv->reserved; ++ spin_unlock(&trans_rsv->lock); ++ + if (reclaim_bytes >= bytes_needed) + goto commit; + bytes_needed -= reclaim_bytes; +@@ -683,6 +689,34 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, + !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); + } + ++static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, ++ struct btrfs_space_info *space_info, ++ struct reserve_ticket *ticket) ++{ ++ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; ++ u64 min_bytes; ++ ++ if (global_rsv->space_info != space_info) ++ return false; ++ ++ spin_lock(&global_rsv->lock); ++ min_bytes = div_factor(global_rsv->size, 5); ++ if (global_rsv->reserved < min_bytes + ticket->bytes) { ++ spin_unlock(&global_rsv->lock); ++ return false; ++ } ++ global_rsv->reserved -= ticket->bytes; ++ ticket->bytes = 0; ++ list_del_init(&ticket->list); ++ wake_up(&ticket->wait); ++ space_info->tickets_id++; ++ if (global_rsv->reserved < global_rsv->size) ++ global_rsv->full = 0; ++ spin_unlock(&global_rsv->lock); ++ ++ return true; ++} ++ + /* + * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets + * @fs_info - fs_info for this fs +@@ -715,6 +749,10 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, + ticket = list_first_entry(&space_info->tickets, + struct reserve_ticket, list); + ++ if (ticket->steal && ++ steal_from_global_rsv(fs_info, space_info, ticket)) ++ return true; ++ + /* + * may_commit_transaction will avoid committing the transaction + * if it doesn't feel like the space reclaimed by the commit +@@ -934,6 +972,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, + + switch (flush) { + case BTRFS_RESERVE_FLUSH_ALL: ++ case BTRFS_RESERVE_FLUSH_ALL_STEAL: + wait_reserve_ticket(fs_info, space_info, ticket); + break; + case BTRFS_RESERVE_FLUSH_LIMIT: +@@ -1033,7 +1072,9 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, + ticket.bytes = orig_bytes; + ticket.error = 0; + init_waitqueue_head(&ticket.wait); +- if (flush == BTRFS_RESERVE_FLUSH_ALL) { ++ ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); ++ if (flush == BTRFS_RESERVE_FLUSH_ALL || ++ flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { + list_add_tail(&ticket.list, &space_info->tickets); + if (!space_info->flush) { + space_info->flush = 1; +diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h +index 8867e84aa33d..8b9a1d8fefcb 100644 +--- a/fs/btrfs/space-info.h ++++ b/fs/btrfs/space-info.h +@@ -72,6 +72,7 @@ struct btrfs_space_info { + struct reserve_ticket { + u64 bytes; + int error; ++ bool steal; + struct list_head list; + wait_queue_head_t wait; + }; +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index cdca0f656594..54589e940f9a 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -21,6 +21,7 @@ + #include "dev-replace.h" + #include "qgroup.h" + #include "block-group.h" ++#include "space-info.h" + + #define BTRFS_ROOT_TRANS_TAG 0 + +@@ -451,6 +452,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, + u64 num_bytes = 0; + u64 qgroup_reserved = 0; + bool reloc_reserved = false; ++ bool do_chunk_alloc = false; + int ret; + + /* Send isn't supposed to start transactions. */ +@@ -491,7 +493,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, + * refill that amount for whatever is missing in the reserve. + */ + num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); +- if (delayed_refs_rsv->full == 0) { ++ if (flush == BTRFS_RESERVE_FLUSH_ALL && ++ delayed_refs_rsv->full == 0) { + delayed_refs_bytes = num_bytes; + num_bytes <<= 1; + } +@@ -512,6 +515,9 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, + delayed_refs_bytes); + num_bytes -= delayed_refs_bytes; + } ++ ++ if (rsv->space_info->force_alloc) ++ do_chunk_alloc = true; + } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && + !delayed_refs_rsv->full) { + /* +@@ -593,6 +599,19 @@ got_it: + if (!current->journal_info) + current->journal_info = h; + ++ /* ++ * If the space_info is marked ALLOC_FORCE then we'll get upgraded to ++ * ALLOC_FORCE the first run through, and then we won't allocate for ++ * anybody else who races in later. We don't care about the return ++ * value here. ++ */ ++ if (do_chunk_alloc && num_bytes) { ++ u64 flags = h->block_rsv->space_info->flags; ++ ++ btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), ++ CHUNK_ALLOC_NO_FORCE); ++ } ++ + /* + * btrfs_record_root_in_trans() needs to alloc new extents, and may + * call btrfs_join_transaction() while we're also starting a +@@ -627,43 +646,10 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, + + struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( + struct btrfs_root *root, +- unsigned int num_items, +- int min_factor) ++ unsigned int num_items) + { +- struct btrfs_fs_info *fs_info = root->fs_info; +- struct btrfs_trans_handle *trans; +- u64 num_bytes; +- int ret; +- +- /* +- * We have two callers: unlink and block group removal. The +- * former should succeed even if we will temporarily exceed +- * quota and the latter operates on the extent root so +- * qgroup enforcement is ignored anyway. +- */ +- trans = start_transaction(root, num_items, TRANS_START, +- BTRFS_RESERVE_FLUSH_ALL, false); +- if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) +- return trans; +- +- trans = btrfs_start_transaction(root, 0); +- if (IS_ERR(trans)) +- return trans; +- +- num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); +- ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv, +- num_bytes, min_factor); +- if (ret) { +- btrfs_end_transaction(trans); +- return ERR_PTR(ret); +- } +- +- trans->block_rsv = &fs_info->trans_block_rsv; +- trans->bytes_reserved = num_bytes; +- trace_btrfs_space_reservation(fs_info, "transaction", +- trans->transid, num_bytes, 1); +- +- return trans; ++ return start_transaction(root, num_items, TRANS_START, ++ BTRFS_RESERVE_FLUSH_ALL_STEAL, false); + } + + struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h +index 2c5a6f6e5bb0..b15c31d23148 100644 +--- a/fs/btrfs/transaction.h ++++ b/fs/btrfs/transaction.h +@@ -181,8 +181,7 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, + unsigned int num_items); + struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( + struct btrfs_root *root, +- unsigned int num_items, +- int min_factor); ++ unsigned int num_items); + struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); + struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); + struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 3e64f49c394b..c8b0e5005f02 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -1223,6 +1223,8 @@ again: + &device->dev_state)) { + if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, + &device->dev_state) && ++ !test_bit(BTRFS_DEV_STATE_MISSING, ++ &device->dev_state) && + (!latest_dev || + device->generation > latest_dev->generation)) { + latest_dev = device; +@@ -2769,8 +2771,18 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path + ret = btrfs_commit_transaction(trans); + } + +- /* Update ctime/mtime for libblkid */ ++ /* ++ * Now that we have written a new super block to this device, check all ++ * other fs_devices list if device_path alienates any other scanned ++ * device. ++ * We can ignore the return value as it typically returns -EINVAL and ++ * only succeeds if the device was an alien. ++ */ ++ btrfs_forget_devices(device_path); ++ ++ /* Update ctime/mtime for blkid or udev */ + update_dev_time(device_path); ++ + return ret; + + error_sysfs: +diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h +index 98bd0e9ee7df..ca78fd709845 100644 +--- a/fs/ext4/ext4_extents.h ++++ b/fs/ext4/ext4_extents.h +@@ -170,10 +170,13 @@ struct partial_cluster { + (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) + #define EXT_LAST_INDEX(__hdr__) \ + (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) +-#define EXT_MAX_EXTENT(__hdr__) \ +- (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++#define EXT_MAX_EXTENT(__hdr__) \ ++ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ++ ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ ++ : 0) + #define EXT_MAX_INDEX(__hdr__) \ +- (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ++ ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0) + + static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode) + { +diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c +index 5508baa11bb6..8a28d47bd502 100644 +--- a/fs/ext4/fsync.c ++++ b/fs/ext4/fsync.c +@@ -44,30 +44,28 @@ + */ + static int ext4_sync_parent(struct inode *inode) + { +- struct dentry *dentry = NULL; +- struct inode *next; ++ struct dentry *dentry, *next; + int ret = 0; + + if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) + return 0; +- inode = igrab(inode); ++ dentry = d_find_any_alias(inode); ++ if (!dentry) ++ return 0; + while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { + ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); +- dentry = d_find_any_alias(inode); +- if (!dentry) +- break; +- next = igrab(d_inode(dentry->d_parent)); ++ ++ next = dget_parent(dentry); + dput(dentry); +- if (!next) +- break; +- iput(inode); +- inode = next; ++ dentry = next; ++ inode = dentry->d_inode; ++ + /* + * The directory inode may have gone through rmdir by now. But + * the inode itself and its blocks are still allocated (we hold +- * a reference to the inode so it didn't go through +- * ext4_evict_inode()) and so we are safe to flush metadata +- * blocks and the inode. ++ * a reference to the inode via its dentry), so it didn't go ++ * through ext4_evict_inode()) and so we are safe to flush ++ * metadata blocks and the inode. + */ + ret = sync_mapping_buffers(inode->i_mapping); + if (ret) +@@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode *inode) + if (ret) + break; + } +- iput(inode); ++ dput(dentry); + return ret; + } + +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index 491f9ee4040e..894a61010ae9 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -1820,8 +1820,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, + if (EXT4_I(inode)->i_file_acl) { + /* The inode already has an extended attribute block. */ + bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); +- if (IS_ERR(bs->bh)) +- return PTR_ERR(bs->bh); ++ if (IS_ERR(bs->bh)) { ++ error = PTR_ERR(bs->bh); ++ bs->bh = NULL; ++ return error; ++ } + ea_bdebug(bs->bh, "b_count=%d, refcount=%d", + atomic_read(&(bs->bh->b_count)), + le32_to_cpu(BHDR(bs->bh)->h_refcount)); +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 3edde3d6d089..a26ea1e6ba88 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -138,6 +138,7 @@ struct f2fs_mount_info { + int alloc_mode; /* segment allocation policy */ + int fsync_mode; /* fsync policy */ + bool test_dummy_encryption; /* test dummy encryption */ ++ block_t unusable_cap_perc; /* percentage for cap */ + block_t unusable_cap; /* Amount of space allowed to be + * unusable when disabling checkpoint + */ +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 5e1d4d9243a9..e36543c9f2b7 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -277,6 +277,22 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi) + F2FS_OPTION(sbi).s_resgid)); + } + ++static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) ++{ ++ if (!F2FS_OPTION(sbi).unusable_cap_perc) ++ return; ++ ++ if (F2FS_OPTION(sbi).unusable_cap_perc == 100) ++ F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; ++ else ++ F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * ++ F2FS_OPTION(sbi).unusable_cap_perc; ++ ++ f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", ++ F2FS_OPTION(sbi).unusable_cap, ++ F2FS_OPTION(sbi).unusable_cap_perc); ++} ++ + static void init_once(void *foo) + { + struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; +@@ -790,12 +806,7 @@ static int parse_options(struct super_block *sb, char *options) + return -EINVAL; + if (arg < 0 || arg > 100) + return -EINVAL; +- if (arg == 100) +- F2FS_OPTION(sbi).unusable_cap = +- sbi->user_block_count; +- else +- F2FS_OPTION(sbi).unusable_cap = +- (sbi->user_block_count / 100) * arg; ++ F2FS_OPTION(sbi).unusable_cap_perc = arg; + set_opt(sbi, DISABLE_CHECKPOINT); + break; + case Opt_checkpoint_disable_cap: +@@ -1735,6 +1746,7 @@ skip: + (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); + + limit_reserve_root(sbi); ++ adjust_unusable_cap_perc(sbi); + *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); + return 0; + restore_gc: +@@ -3397,6 +3409,7 @@ try_onemore: + sbi->reserved_blocks = 0; + sbi->current_reserved_blocks = 0; + limit_reserve_root(sbi); ++ adjust_unusable_cap_perc(sbi); + + for (i = 0; i < NR_INODE_TYPE; i++) { + INIT_LIST_HEAD(&sbi->inode_list[i]); +diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c +index 4f443703065e..0c71acc1b831 100644 +--- a/fs/xfs/xfs_bmap_util.c ++++ b/fs/xfs/xfs_bmap_util.c +@@ -1760,7 +1760,7 @@ xfs_swap_extents( + if (xfs_inode_has_cow_data(tip)) { + error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true); + if (error) +- return error; ++ goto out_unlock; + } + + /* +diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c +index 0abba171aa89..1264ac63e4e5 100644 +--- a/fs/xfs/xfs_buf.c ++++ b/fs/xfs/xfs_buf.c +@@ -1162,8 +1162,10 @@ xfs_buf_ioend( + bp->b_ops->verify_read(bp); + } + +- if (!bp->b_error) ++ if (!bp->b_error) { ++ bp->b_flags &= ~XBF_WRITE_FAIL; + bp->b_flags |= XBF_DONE; ++ } + + if (bp->b_iodone) + (*(bp->b_iodone))(bp); +@@ -1223,7 +1225,7 @@ xfs_bwrite( + + bp->b_flags |= XBF_WRITE; + bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | +- XBF_WRITE_FAIL | XBF_DONE); ++ XBF_DONE); + + error = xfs_buf_submit(bp); + if (error) +@@ -1929,7 +1931,7 @@ xfs_buf_delwri_submit_buffers( + * synchronously. Otherwise, drop the buffer from the delwri + * queue and submit async. + */ +- bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL); ++ bp->b_flags &= ~_XBF_DELWRI_Q; + bp->b_flags |= XBF_WRITE; + if (wait_list) { + bp->b_flags &= ~XBF_ASYNC; +diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c +index aeb95e7391c1..3cbf248af51f 100644 +--- a/fs/xfs/xfs_dquot.c ++++ b/fs/xfs/xfs_dquot.c +@@ -1116,13 +1116,12 @@ xfs_qm_dqflush( + dqb = bp->b_addr + dqp->q_bufoffset; + ddqp = &dqb->dd_diskdq; + +- /* +- * A simple sanity check in case we got a corrupted dquot. +- */ +- fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0); ++ /* sanity check the in-core structure before we flush */ ++ fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id), ++ 0); + if (fa) { + xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", +- be32_to_cpu(ddqp->d_id), fa); ++ be32_to_cpu(dqp->q_core.d_id), fa); + xfs_buf_relse(bp); + xfs_dqfunlock(dqp); + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); +diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h +index b072aeb1fd78..4d6fe87fd38f 100644 +--- a/include/linux/kgdb.h ++++ b/include/linux/kgdb.h +@@ -323,7 +323,7 @@ extern void gdbstub_exit(int status); + extern int kgdb_single_step; + extern atomic_t kgdb_active; + #define in_dbg_master() \ +- (raw_smp_processor_id() == atomic_read(&kgdb_active)) ++ (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) + extern bool dbg_is_early; + extern void __init dbg_late_init(void); + extern void kgdb_panic(const char *msg); +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index 8b5f758942a2..85804ba62215 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -709,6 +709,8 @@ typedef struct pglist_data { + /* + * Must be held any time you expect node_start_pfn, + * node_present_pages, node_spanned_pages or nr_zones to stay constant. ++ * Also synchronizes pgdat->first_deferred_pfn during deferred page ++ * init. + * + * pgdat_resize_lock() and pgdat_resize_unlock() are provided to + * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h +index 228f66347620..0ad57693f392 100644 +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -148,6 +148,8 @@ + + /* Vendors and devices. Sort key: vendor first, device next. */ + ++#define PCI_VENDOR_ID_LOONGSON 0x0014 ++ + #define PCI_VENDOR_ID_TTTECH 0x0357 + #define PCI_DEVICE_ID_TTTECH_MC322 0x000a + +@@ -548,7 +550,9 @@ + #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 + #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb + #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 ++#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b + #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 ++#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 + #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 + #define PCI_DEVICE_ID_AMD_LANCE 0x2000 + #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 +@@ -1829,6 +1833,12 @@ + #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 + #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 + ++#define PCI_VENDOR_ID_PERICOM 0x12D8 ++#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 ++#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 ++#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 ++#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 ++ + #define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 + #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 + #define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 +@@ -3008,6 +3018,7 @@ + #define PCI_DEVICE_ID_INTEL_84460GX 0x84ea + #define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500 + #define PCI_DEVICE_ID_INTEL_IXP2800 0x9004 ++#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b + #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 + + #define PCI_VENDOR_ID_SCALEMP 0x8686 +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h +index c49257a3b510..a132d875d351 100644 +--- a/include/linux/sched/mm.h ++++ b/include/linux/sched/mm.h +@@ -49,6 +49,8 @@ static inline void mmdrop(struct mm_struct *mm) + __mmdrop(mm); + } + ++void mmdrop(struct mm_struct *mm); ++ + /* + * This has to be called after a get_task_mm()/mmget_not_zero() + * followed by taking the mmap_sem for writing before modifying the +diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h +index a3adbe593505..4bdb5e4bbd6a 100644 +--- a/include/linux/skmsg.h ++++ b/include/linux/skmsg.h +@@ -457,4 +457,12 @@ static inline void psock_progs_drop(struct sk_psock_progs *progs) + psock_set_prog(&progs->skb_verdict, NULL); + } + ++int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); ++ ++static inline bool sk_psock_strp_enabled(struct sk_psock *psock) ++{ ++ if (!psock) ++ return false; ++ return psock->parser.enabled; ++} + #endif /* _LINUX_SKMSG_H */ +diff --git a/include/linux/string.h b/include/linux/string.h +index b6ccdc2c7f02..b2264355272d 100644 +--- a/include/linux/string.h ++++ b/include/linux/string.h +@@ -269,6 +269,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob + void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); + + #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) ++ ++#ifdef CONFIG_KASAN ++extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); ++extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); ++extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); ++extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); ++extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); ++extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); ++extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); ++extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); ++extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); ++extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); ++#else ++#define __underlying_memchr __builtin_memchr ++#define __underlying_memcmp __builtin_memcmp ++#define __underlying_memcpy __builtin_memcpy ++#define __underlying_memmove __builtin_memmove ++#define __underlying_memset __builtin_memset ++#define __underlying_strcat __builtin_strcat ++#define __underlying_strcpy __builtin_strcpy ++#define __underlying_strlen __builtin_strlen ++#define __underlying_strncat __builtin_strncat ++#define __underlying_strncpy __builtin_strncpy ++#endif ++ + __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) + { + size_t p_size = __builtin_object_size(p, 0); +@@ -276,14 +301,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); +- return __builtin_strncpy(p, q, size); ++ return __underlying_strncpy(p, q, size); + } + + __FORTIFY_INLINE char *strcat(char *p, const char *q) + { + size_t p_size = __builtin_object_size(p, 0); + if (p_size == (size_t)-1) +- return __builtin_strcat(p, q); ++ return __underlying_strcat(p, q); + if (strlcat(p, q, p_size) >= p_size) + fortify_panic(__func__); + return p; +@@ -297,7 +322,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) + /* Work around gcc excess stack consumption issue */ + if (p_size == (size_t)-1 || + (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) +- return __builtin_strlen(p); ++ return __underlying_strlen(p); + ret = strnlen(p, p_size); + if (p_size <= ret) + fortify_panic(__func__); +@@ -330,7 +355,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) + __write_overflow(); + if (len >= p_size) + fortify_panic(__func__); +- __builtin_memcpy(p, q, len); ++ __underlying_memcpy(p, q, len); + p[len] = '\0'; + } + return ret; +@@ -343,12 +368,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) +- return __builtin_strncat(p, q, count); ++ return __underlying_strncat(p, q, count); + p_len = strlen(p); + copy_len = strnlen(q, count); + if (p_size < p_len + copy_len + 1) + fortify_panic(__func__); +- __builtin_memcpy(p + p_len, q, copy_len); ++ __underlying_memcpy(p + p_len, q, copy_len); + p[p_len + copy_len] = '\0'; + return p; + } +@@ -360,7 +385,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); +- return __builtin_memset(p, c, size); ++ return __underlying_memset(p, c, size); + } + + __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) +@@ -375,7 +400,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) + } + if (p_size < size || q_size < size) + fortify_panic(__func__); +- return __builtin_memcpy(p, q, size); ++ return __underlying_memcpy(p, q, size); + } + + __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) +@@ -390,7 +415,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) + } + if (p_size < size || q_size < size) + fortify_panic(__func__); +- return __builtin_memmove(p, q, size); ++ return __underlying_memmove(p, q, size); + } + + extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); +@@ -416,7 +441,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) + } + if (p_size < size || q_size < size) + fortify_panic(__func__); +- return __builtin_memcmp(p, q, size); ++ return __underlying_memcmp(p, q, size); + } + + __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) +@@ -426,7 +451,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); +- return __builtin_memchr(p, c, size); ++ return __underlying_memchr(p, c, size); + } + + void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); +@@ -457,11 +482,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) +- return __builtin_strcpy(p, q); ++ return __underlying_strcpy(p, q); + memcpy(p, q, strlen(q) + 1); + return p; + } + ++/* Don't use these outside the FORITFY_SOURCE implementation */ ++#undef __underlying_memchr ++#undef __underlying_memcmp ++#undef __underlying_memcpy ++#undef __underlying_memmove ++#undef __underlying_memset ++#undef __underlying_strcat ++#undef __underlying_strcpy ++#undef __underlying_strlen ++#undef __underlying_strncat ++#undef __underlying_strncpy + #endif + + /** +diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h +index d4326d6662a4..b5a4eb14f809 100644 +--- a/include/linux/sunrpc/gss_api.h ++++ b/include/linux/sunrpc/gss_api.h +@@ -85,6 +85,7 @@ struct pf_desc { + u32 service; + char *name; + char *auth_domain_name; ++ struct auth_domain *domain; + bool datatouch; + }; + +diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h +index a4528b26c8aa..d229d27ab19e 100644 +--- a/include/linux/sunrpc/svcauth_gss.h ++++ b/include/linux/sunrpc/svcauth_gss.h +@@ -21,7 +21,8 @@ int gss_svc_init(void); + void gss_svc_shutdown(void); + int gss_svc_init_net(struct net *net); + void gss_svc_shutdown_net(struct net *net); +-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); ++struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, ++ char *name); + u32 svcauth_gss_flavor(struct auth_domain *dom); + + #endif /* __KERNEL__ */ +diff --git a/include/net/tls.h b/include/net/tls.h +index db26e3ec918f..0a065bdffa39 100644 +--- a/include/net/tls.h ++++ b/include/net/tls.h +@@ -590,6 +590,15 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk) + return !!tls_sw_ctx_tx(ctx); + } + ++static inline bool tls_sw_has_ctx_rx(const struct sock *sk) ++{ ++ struct tls_context *ctx = tls_get_ctx(sk); ++ ++ if (!ctx) ++ return false; ++ return !!tls_sw_ctx_rx(ctx); ++} ++ + void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); + void tls_device_write_space(struct sock *sk, struct tls_context *ctx); + +diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h +index 52641d8ca9e8..e735bc4075dc 100644 +--- a/include/uapi/linux/kvm.h ++++ b/include/uapi/linux/kvm.h +@@ -189,9 +189,11 @@ struct kvm_hyperv_exit { + #define KVM_EXIT_HYPERV_SYNIC 1 + #define KVM_EXIT_HYPERV_HCALL 2 + __u32 type; ++ __u32 pad1; + union { + struct { + __u32 msr; ++ __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; +diff --git a/kernel/audit.c b/kernel/audit.c +index fcfbb3476ccd..05ae208ad442 100644 +--- a/kernel/audit.c ++++ b/kernel/audit.c +@@ -879,7 +879,7 @@ main_queue: + return 0; + } + +-int audit_send_list(void *_dest) ++int audit_send_list_thread(void *_dest) + { + struct audit_netlink_list *dest = _dest; + struct sk_buff *skb; +@@ -923,19 +923,30 @@ out_kfree_skb: + return NULL; + } + ++static void audit_free_reply(struct audit_reply *reply) ++{ ++ if (!reply) ++ return; ++ ++ if (reply->skb) ++ kfree_skb(reply->skb); ++ if (reply->net) ++ put_net(reply->net); ++ kfree(reply); ++} ++ + static int audit_send_reply_thread(void *arg) + { + struct audit_reply *reply = (struct audit_reply *)arg; +- struct sock *sk = audit_get_sk(reply->net); + + audit_ctl_lock(); + audit_ctl_unlock(); + + /* Ignore failure. It'll only happen if the sender goes away, + because our timeout is set to infinite. */ +- netlink_unicast(sk, reply->skb, reply->portid, 0); +- put_net(reply->net); +- kfree(reply); ++ netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0); ++ reply->skb = NULL; ++ audit_free_reply(reply); + return 0; + } + +@@ -949,35 +960,32 @@ static int audit_send_reply_thread(void *arg) + * @payload: payload data + * @size: payload size + * +- * Allocates an skb, builds the netlink message, and sends it to the port id. +- * No failure notifications. ++ * Allocates a skb, builds the netlink message, and sends it to the port id. + */ + static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, + int multi, const void *payload, int size) + { +- struct net *net = sock_net(NETLINK_CB(request_skb).sk); +- struct sk_buff *skb; + struct task_struct *tsk; +- struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), +- GFP_KERNEL); ++ struct audit_reply *reply; + ++ reply = kzalloc(sizeof(*reply), GFP_KERNEL); + if (!reply) + return; + +- skb = audit_make_reply(seq, type, done, multi, payload, size); +- if (!skb) +- goto out; +- +- reply->net = get_net(net); ++ reply->skb = audit_make_reply(seq, type, done, multi, payload, size); ++ if (!reply->skb) ++ goto err; ++ reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); + reply->portid = NETLINK_CB(request_skb).portid; +- reply->skb = skb; + + tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); +- if (!IS_ERR(tsk)) +- return; +- kfree_skb(skb); +-out: +- kfree(reply); ++ if (IS_ERR(tsk)) ++ goto err; ++ ++ return; ++ ++err: ++ audit_free_reply(reply); + } + + /* +diff --git a/kernel/audit.h b/kernel/audit.h +index 6fb7160412d4..ddc22878433d 100644 +--- a/kernel/audit.h ++++ b/kernel/audit.h +@@ -229,7 +229,7 @@ struct audit_netlink_list { + struct sk_buff_head q; + }; + +-int audit_send_list(void *_dest); ++int audit_send_list_thread(void *_dest); + + extern int selinux_audit_rule_update(void); + +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c +index 026e34da4ace..a10e2997aa6c 100644 +--- a/kernel/auditfilter.c ++++ b/kernel/auditfilter.c +@@ -1161,11 +1161,8 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz) + */ + int audit_list_rules_send(struct sk_buff *request_skb, int seq) + { +- u32 portid = NETLINK_CB(request_skb).portid; +- struct net *net = sock_net(NETLINK_CB(request_skb).sk); + struct task_struct *tsk; + struct audit_netlink_list *dest; +- int err = 0; + + /* We can't just spew out the rules here because we might fill + * the available socket buffer space and deadlock waiting for +@@ -1173,25 +1170,26 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq) + * happen if we're actually running in the context of auditctl + * trying to _send_ the stuff */ + +- dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); ++ dest = kmalloc(sizeof(*dest), GFP_KERNEL); + if (!dest) + return -ENOMEM; +- dest->net = get_net(net); +- dest->portid = portid; ++ dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); ++ dest->portid = NETLINK_CB(request_skb).portid; + skb_queue_head_init(&dest->q); + + mutex_lock(&audit_filter_mutex); + audit_list_rules(seq, &dest->q); + mutex_unlock(&audit_filter_mutex); + +- tsk = kthread_run(audit_send_list, dest, "audit_send_list"); ++ tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list"); + if (IS_ERR(tsk)) { + skb_queue_purge(&dest->q); ++ put_net(dest->net); + kfree(dest); +- err = PTR_ERR(tsk); ++ return PTR_ERR(tsk); + } + +- return err; ++ return 0; + } + + int audit_comparator(u32 left, u32 op, u32 right) +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index 946cfdd3b2cc..e7af1ac69d75 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -1118,7 +1118,8 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); +- if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { ++ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || ++ !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { + err = -EPERM; + goto err_put; + } +diff --git a/kernel/cpu.c b/kernel/cpu.c +index d7890c1285bf..7527825ac7da 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -3,6 +3,7 @@ + * + * This code is licenced under the GPL. + */ ++#include + #include + #include + #include +@@ -564,6 +565,21 @@ static int bringup_cpu(unsigned int cpu) + return bringup_wait_for_ap(cpu); + } + ++static int finish_cpu(unsigned int cpu) ++{ ++ struct task_struct *idle = idle_thread_get(cpu); ++ struct mm_struct *mm = idle->active_mm; ++ ++ /* ++ * idle_task_exit() will have switched to &init_mm, now ++ * clean up any remaining active_mm state. ++ */ ++ if (mm != &init_mm) ++ idle->active_mm = &init_mm; ++ mmdrop(mm); ++ return 0; ++} ++ + /* + * Hotplug state machine related functions + */ +@@ -1434,7 +1450,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { + [CPUHP_BRINGUP_CPU] = { + .name = "cpu:bringup", + .startup.single = bringup_cpu, +- .teardown.single = NULL, ++ .teardown.single = finish_cpu, + .cant_stop = true, + }, + /* Final state before CPU kills itself */ +diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c +index cbca6879ab7d..44a259338e33 100644 +--- a/kernel/cpu_pm.c ++++ b/kernel/cpu_pm.c +@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); + */ + int cpu_pm_enter(void) + { +- int nr_calls; ++ int nr_calls = 0; + int ret = 0; + + ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); +@@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); + */ + int cpu_cluster_pm_enter(void) + { +- int nr_calls; ++ int nr_calls = 0; + int ret = 0; + + ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); +diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c +index f76d6f77dd5e..7d54c7c28054 100644 +--- a/kernel/debug/debug_core.c ++++ b/kernel/debug/debug_core.c +@@ -501,6 +501,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) + + if (exception_level > 1) { + dump_stack(); ++ kgdb_io_module_registered = false; + panic("Recursive entry to debugger"); + } + +@@ -634,6 +635,8 @@ return_normal: + if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) + goto kgdb_restore; + ++ atomic_inc(&ignore_console_lock_warning); ++ + /* Call the I/O driver's pre_exception routine */ + if (dbg_io_ops->pre_exception) + dbg_io_ops->pre_exception(); +@@ -706,6 +709,8 @@ cpu_master_loop: + if (dbg_io_ops->post_exception) + dbg_io_ops->post_exception(); + ++ atomic_dec(&ignore_console_lock_warning); ++ + if (!kgdb_single_step) { + raw_spin_unlock(&dbg_slave_lock); + /* Wait till all the CPUs have quit from the debugger. */ +diff --git a/kernel/exit.c b/kernel/exit.c +index 22dfaac9e48c..fa46977b9c07 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -713,8 +713,12 @@ void __noreturn do_exit(long code) + struct task_struct *tsk = current; + int group_dead; + +- profile_task_exit(tsk); +- kcov_task_exit(tsk); ++ /* ++ * We can get here from a kernel oops, sometimes with preemption off. ++ * Start by checking for critical errors. ++ * Then fix up important state like USER_DS and preemption. ++ * Then do everything else. ++ */ + + WARN_ON(blk_needs_flush_plug(tsk)); + +@@ -732,6 +736,16 @@ void __noreturn do_exit(long code) + */ + set_fs(USER_DS); + ++ if (unlikely(in_atomic())) { ++ pr_info("note: %s[%d] exited with preempt_count %d\n", ++ current->comm, task_pid_nr(current), ++ preempt_count()); ++ preempt_count_set(PREEMPT_ENABLED); ++ } ++ ++ profile_task_exit(tsk); ++ kcov_task_exit(tsk); ++ + ptrace_event(PTRACE_EVENT_EXIT, code); + + validate_creds_for_do_exit(tsk); +@@ -749,13 +763,6 @@ void __noreturn do_exit(long code) + + exit_signals(tsk); /* sets PF_EXITING */ + +- if (unlikely(in_atomic())) { +- pr_info("note: %s[%d] exited with preempt_count %d\n", +- current->comm, task_pid_nr(current), +- preempt_count()); +- preempt_count_set(PREEMPT_ENABLED); +- } +- + /* sync mm's RSS info before statistics gathering */ + if (tsk->mm) + sync_mm_rss(tsk->mm); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index e99d326fa569..361cbc2dc966 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -6177,13 +6177,14 @@ void idle_task_exit(void) + struct mm_struct *mm = current->active_mm; + + BUG_ON(cpu_online(smp_processor_id())); ++ BUG_ON(current != this_rq()->idle); + + if (mm != &init_mm) { + switch_mm(mm, &init_mm, current); +- current->active_mm = &init_mm; + finish_arch_post_lock_switch(); + } +- mmdrop(mm); ++ ++ /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ + } + + /* +@@ -7373,6 +7374,8 @@ static DEFINE_MUTEX(cfs_constraints_mutex); + + const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ + static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ ++/* More than 203 days if BW_SHIFT equals 20. */ ++static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; + + static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); + +@@ -7400,6 +7403,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) + if (period > max_cfs_quota_period) + return -EINVAL; + ++ /* ++ * Bound quota to defend quota against overflow during bandwidth shift. ++ */ ++ if (quota != RUNTIME_INF && quota > max_cfs_runtime) ++ return -EINVAL; ++ + /* + * Prevent race between setting of cfs_rq->runtime_enabled and + * unthrottle_offline_cfs_rqs(). +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 8a0e6bdba50d..2f81e4ae844e 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -4942,6 +4942,8 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) + if (!overrun) + break; + ++ idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); ++ + if (++count > 3) { + u64 new, old = ktime_to_ns(cfs_b->period); + +@@ -4971,8 +4973,6 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) + /* reset count so we don't come right back in here */ + count = 0; + } +- +- idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); + } + if (idle) + cfs_b->period_active = 0; +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 7bf917e4d63a..5b04bba4500d 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -9,6 +9,8 @@ + + int sched_rr_timeslice = RR_TIMESLICE; + int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; ++/* More than 4 hours if BW_SHIFT equals 20. */ ++static const u64 max_rt_runtime = MAX_BW; + + static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); + +@@ -2513,6 +2515,12 @@ static int tg_set_rt_bandwidth(struct task_group *tg, + if (rt_period == 0) + return -EINVAL; + ++ /* ++ * Bound quota to defend quota against overflow during bandwidth shift. ++ */ ++ if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime) ++ return -EINVAL; ++ + mutex_lock(&rt_constraints_mutex); + read_lock(&tasklist_lock); + err = __rt_schedulable(tg, rt_period, rt_runtime); +@@ -2634,7 +2642,9 @@ static int sched_rt_global_validate(void) + return -EINVAL; + + if ((sysctl_sched_rt_runtime != RUNTIME_INF) && +- (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) ++ ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) || ++ ((u64)sysctl_sched_rt_runtime * ++ NSEC_PER_USEC > max_rt_runtime))) + return -EINVAL; + + return 0; +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index c7e7481968bf..570659f1c6e2 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1889,6 +1889,8 @@ extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); + #define BW_SHIFT 20 + #define BW_UNIT (1 << BW_SHIFT) + #define RATIO_SHIFT 8 ++#define MAX_BW_BITS (64 - BW_SHIFT) ++#define MAX_BW ((1ULL << MAX_BW_BITS) - 1) + unsigned long to_ratio(u64 period, u64 runtime); + + extern void init_entity_runnable_average(struct sched_entity *se); +diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h +index 891e1c3549c4..afbd99987cf8 100644 +--- a/lib/mpi/longlong.h ++++ b/lib/mpi/longlong.h +@@ -653,7 +653,7 @@ do { \ + ************** MIPS/64 ************** + ***************************************/ + #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 +-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 ++#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) + /* + * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C + * code below, so we special case MIPS64r6 until the compiler can do better. +diff --git a/lib/test_kasan.c b/lib/test_kasan.c +index bd3d9ef7d39e..83344c9c38f4 100644 +--- a/lib/test_kasan.c ++++ b/lib/test_kasan.c +@@ -22,6 +22,14 @@ + + #include + ++/* ++ * We assign some test results to these globals to make sure the tests ++ * are not eliminated as dead code. ++ */ ++ ++int kasan_int_result; ++void *kasan_ptr_result; ++ + /* + * Note: test functions are marked noinline so that their names appear in + * reports. +@@ -603,7 +611,7 @@ static noinline void __init kasan_memchr(void) + if (!ptr) + return; + +- memchr(ptr, '1', size + 1); ++ kasan_ptr_result = memchr(ptr, '1', size + 1); + kfree(ptr); + } + +@@ -619,7 +627,7 @@ static noinline void __init kasan_memcmp(void) + return; + + memset(arr, 0, sizeof(arr)); +- memcmp(ptr, arr, size+1); ++ kasan_int_result = memcmp(ptr, arr, size + 1); + kfree(ptr); + } + +@@ -642,22 +650,22 @@ static noinline void __init kasan_strings(void) + * will likely point to zeroed byte. + */ + ptr += 16; +- strchr(ptr, '1'); ++ kasan_ptr_result = strchr(ptr, '1'); + + pr_info("use-after-free in strrchr\n"); +- strrchr(ptr, '1'); ++ kasan_ptr_result = strrchr(ptr, '1'); + + pr_info("use-after-free in strcmp\n"); +- strcmp(ptr, "2"); ++ kasan_int_result = strcmp(ptr, "2"); + + pr_info("use-after-free in strncmp\n"); +- strncmp(ptr, "2", 1); ++ kasan_int_result = strncmp(ptr, "2", 1); + + pr_info("use-after-free in strlen\n"); +- strlen(ptr); ++ kasan_int_result = strlen(ptr); + + pr_info("use-after-free in strnlen\n"); +- strnlen(ptr, 1); ++ kasan_int_result = strnlen(ptr, 1); + } + + static noinline void __init kasan_bitops(void) +@@ -724,11 +732,12 @@ static noinline void __init kasan_bitops(void) + __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); + + pr_info("out-of-bounds in test_bit\n"); +- (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); ++ kasan_int_result = test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); + + #if defined(clear_bit_unlock_is_negative_byte) + pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n"); +- clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits); ++ kasan_int_result = clear_bit_unlock_is_negative_byte(BITS_PER_LONG + ++ BITS_PER_BYTE, bits); + #endif + kfree(bits); + } +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 7ec5710afc99..da9040a6838f 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2301,6 +2301,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + { + spinlock_t *ptl; + struct mmu_notifier_range range; ++ bool was_locked = false; ++ pmd_t _pmd; + + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, + address & HPAGE_PMD_MASK, +@@ -2313,11 +2315,32 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + * pmd against. Otherwise we can end up replacing wrong page. + */ + VM_BUG_ON(freeze && !page); +- if (page && page != pmd_page(*pmd)) +- goto out; ++ if (page) { ++ VM_WARN_ON_ONCE(!PageLocked(page)); ++ was_locked = true; ++ if (page != pmd_page(*pmd)) ++ goto out; ++ } + ++repeat: + if (pmd_trans_huge(*pmd)) { +- page = pmd_page(*pmd); ++ if (!page) { ++ page = pmd_page(*pmd); ++ if (unlikely(!trylock_page(page))) { ++ get_page(page); ++ _pmd = *pmd; ++ spin_unlock(ptl); ++ lock_page(page); ++ spin_lock(ptl); ++ if (unlikely(!pmd_same(*pmd, _pmd))) { ++ unlock_page(page); ++ put_page(page); ++ page = NULL; ++ goto repeat; ++ } ++ put_page(page); ++ } ++ } + if (PageMlocked(page)) + clear_page_mlock(page); + } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) +@@ -2325,6 +2348,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + __split_huge_pmd_locked(vma, pmd, range.start, freeze); + out: + spin_unlock(ptl); ++ if (!was_locked && page) ++ unlock_page(page); + /* + * No need to double call mmu_notifier->invalidate_range() callback. + * They are 3 cases to consider inside __split_huge_pmd_locked(): +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 98d5c940facd..8686fe760f34 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -1640,7 +1640,6 @@ static void __init deferred_free_pages(unsigned long pfn, + } else if (!(pfn & nr_pgmask)) { + deferred_free_range(pfn - nr_free, nr_free); + nr_free = 1; +- touch_nmi_watchdog(); + } else { + nr_free++; + } +@@ -1670,7 +1669,6 @@ static unsigned long __init deferred_init_pages(struct zone *zone, + continue; + } else if (!page || !(pfn & nr_pgmask)) { + page = pfn_to_page(pfn); +- touch_nmi_watchdog(); + } else { + page++; + } +@@ -1793,6 +1791,13 @@ static int __init deferred_init_memmap(void *data) + BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); + pgdat->first_deferred_pfn = ULONG_MAX; + ++ /* ++ * Once we unlock here, the zone cannot be grown anymore, thus if an ++ * interrupt thread must allocate this early in boot, zone must be ++ * pre-grown prior to start of deferred page initialization. ++ */ ++ pgdat_resize_unlock(pgdat, &flags); ++ + /* Only the highest zone is deferred so find it */ + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + zone = pgdat->node_zones + zid; +@@ -1810,11 +1815,11 @@ static int __init deferred_init_memmap(void *data) + * that we can avoid introducing any issues with the buddy + * allocator. + */ +- while (spfn < epfn) ++ while (spfn < epfn) { + nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); ++ cond_resched(); ++ } + zone_empty: +- pgdat_resize_unlock(pgdat, &flags); +- + /* Sanity check that the next zone really is unpopulated */ + WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); + +@@ -1856,17 +1861,6 @@ deferred_grow_zone(struct zone *zone, unsigned int order) + + pgdat_resize_lock(pgdat, &flags); + +- /* +- * If deferred pages have been initialized while we were waiting for +- * the lock, return true, as the zone was grown. The caller will retry +- * this zone. We won't return to this function since the caller also +- * has this static branch. +- */ +- if (!static_branch_unlikely(&deferred_pages)) { +- pgdat_resize_unlock(pgdat, &flags); +- return true; +- } +- + /* + * If someone grew this zone while we were waiting for spinlock, return + * true, as there might be enough pages already. +@@ -1895,6 +1889,7 @@ deferred_grow_zone(struct zone *zone, unsigned int order) + first_deferred_pfn = spfn; + + nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); ++ touch_nmi_watchdog(); + + /* We should only stop along section boundaries */ + if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) +diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c +index 2614a9caee00..a39af0eefad3 100644 +--- a/net/batman-adv/bat_v_elp.c ++++ b/net/batman-adv/bat_v_elp.c +@@ -120,20 +120,7 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) + rtnl_lock(); + ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings); + rtnl_unlock(); +- +- /* Virtual interface drivers such as tun / tap interfaces, VLAN, etc +- * tend to initialize the interface throughput with some value for the +- * sake of having a throughput number to export via ethtool. This +- * exported throughput leaves batman-adv to conclude the interface +- * throughput is genuine (reflecting reality), thus no measurements +- * are necessary. +- * +- * Based on the observation that those interface types also tend to set +- * the link auto-negotiation to 'off', batman-adv shall check this +- * setting to differentiate between genuine link throughput information +- * and placeholders installed by virtual interfaces. +- */ +- if (ret == 0 && link_settings.base.autoneg == AUTONEG_ENABLE) { ++ if (ret == 0) { + /* link characteristics might change over time */ + if (link_settings.base.duplex == DUPLEX_FULL) + hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX; +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index c1d3a303d97f..88cd410e5728 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -4216,6 +4216,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, + case 0x11: /* Unsupported Feature or Parameter Value */ + case 0x1c: /* SCO interval rejected */ + case 0x1a: /* Unsupported Remote Feature */ ++ case 0x1e: /* Invalid LMP Parameters */ + case 0x1f: /* Unspecified error */ + case 0x20: /* Unsupported LMP Parameter value */ + if (conn->out) { +diff --git a/net/core/skmsg.c b/net/core/skmsg.c +index ded2d5227678..0536ea9298e4 100644 +--- a/net/core/skmsg.c ++++ b/net/core/skmsg.c +@@ -7,6 +7,7 @@ + + #include + #include ++#include + + static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce) + { +@@ -686,13 +687,75 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp) + return container_of(parser, struct sk_psock, parser); + } + +-static void sk_psock_verdict_apply(struct sk_psock *psock, +- struct sk_buff *skb, int verdict) ++static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb) + { + struct sk_psock *psock_other; + struct sock *sk_other; + bool ingress; + ++ sk_other = tcp_skb_bpf_redirect_fetch(skb); ++ if (unlikely(!sk_other)) { ++ kfree_skb(skb); ++ return; ++ } ++ psock_other = sk_psock(sk_other); ++ if (!psock_other || sock_flag(sk_other, SOCK_DEAD) || ++ !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { ++ kfree_skb(skb); ++ return; ++ } ++ ++ ingress = tcp_skb_bpf_ingress(skb); ++ if ((!ingress && sock_writeable(sk_other)) || ++ (ingress && ++ atomic_read(&sk_other->sk_rmem_alloc) <= ++ sk_other->sk_rcvbuf)) { ++ if (!ingress) ++ skb_set_owner_w(skb, sk_other); ++ skb_queue_tail(&psock_other->ingress_skb, skb); ++ schedule_work(&psock_other->work); ++ } else { ++ kfree_skb(skb); ++ } ++} ++ ++static void sk_psock_tls_verdict_apply(struct sk_psock *psock, ++ struct sk_buff *skb, int verdict) ++{ ++ switch (verdict) { ++ case __SK_REDIRECT: ++ sk_psock_skb_redirect(psock, skb); ++ break; ++ case __SK_PASS: ++ case __SK_DROP: ++ default: ++ break; ++ } ++} ++ ++int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) ++{ ++ struct bpf_prog *prog; ++ int ret = __SK_PASS; ++ ++ rcu_read_lock(); ++ prog = READ_ONCE(psock->progs.skb_verdict); ++ if (likely(prog)) { ++ tcp_skb_bpf_redirect_clear(skb); ++ ret = sk_psock_bpf_run(psock, prog, skb); ++ ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); ++ } ++ rcu_read_unlock(); ++ sk_psock_tls_verdict_apply(psock, skb, ret); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); ++ ++static void sk_psock_verdict_apply(struct sk_psock *psock, ++ struct sk_buff *skb, int verdict) ++{ ++ struct sock *sk_other; ++ + switch (verdict) { + case __SK_PASS: + sk_other = psock->sk; +@@ -711,25 +774,8 @@ static void sk_psock_verdict_apply(struct sk_psock *psock, + } + goto out_free; + case __SK_REDIRECT: +- sk_other = tcp_skb_bpf_redirect_fetch(skb); +- if (unlikely(!sk_other)) +- goto out_free; +- psock_other = sk_psock(sk_other); +- if (!psock_other || sock_flag(sk_other, SOCK_DEAD) || +- !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) +- goto out_free; +- ingress = tcp_skb_bpf_ingress(skb); +- if ((!ingress && sock_writeable(sk_other)) || +- (ingress && +- atomic_read(&sk_other->sk_rmem_alloc) <= +- sk_other->sk_rcvbuf)) { +- if (!ingress) +- skb_set_owner_w(skb, sk_other); +- skb_queue_tail(&psock_other->ingress_skb, skb); +- schedule_work(&psock_other->work); +- break; +- } +- /* fall-through */ ++ sk_psock_skb_redirect(psock, skb); ++ break; + case __SK_DROP: + /* fall-through */ + default: +@@ -783,9 +829,13 @@ static void sk_psock_strp_data_ready(struct sock *sk) + rcu_read_lock(); + psock = sk_psock(sk); + if (likely(psock)) { +- write_lock_bh(&sk->sk_callback_lock); +- strp_data_ready(&psock->parser.strp); +- write_unlock_bh(&sk->sk_callback_lock); ++ if (tls_sw_has_ctx_rx(sk)) { ++ psock->parser.saved_data_ready(sk); ++ } else { ++ write_lock_bh(&sk->sk_callback_lock); ++ strp_data_ready(&psock->parser.strp); ++ write_unlock_bh(&sk->sk_callback_lock); ++ } + } + rcu_read_unlock(); + } +diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c +index c3c93e95b46e..243e8107f456 100644 +--- a/net/netfilter/nft_nat.c ++++ b/net/netfilter/nft_nat.c +@@ -129,7 +129,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + priv->type = NF_NAT_MANIP_DST; + break; + default: +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + if (tb[NFTA_NAT_FAMILY] == NULL) +@@ -196,7 +196,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + if (tb[NFTA_NAT_FLAGS]) { + priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); + if (priv->flags & ~NF_NAT_RANGE_MASK) +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + return nf_ct_netns_get(ctx->net, family); +diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c +index 8fa924c8e282..9314999bf095 100644 +--- a/net/sunrpc/auth_gss/gss_mech_switch.c ++++ b/net/sunrpc/auth_gss/gss_mech_switch.c +@@ -36,6 +36,8 @@ gss_mech_free(struct gss_api_mech *gm) + + for (i = 0; i < gm->gm_pf_num; i++) { + pf = &gm->gm_pfs[i]; ++ if (pf->domain) ++ auth_domain_put(pf->domain); + kfree(pf->auth_domain_name); + pf->auth_domain_name = NULL; + } +@@ -58,6 +60,7 @@ make_auth_domain_name(char *name) + static int + gss_mech_svc_setup(struct gss_api_mech *gm) + { ++ struct auth_domain *dom; + struct pf_desc *pf; + int i, status; + +@@ -67,10 +70,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm) + status = -ENOMEM; + if (pf->auth_domain_name == NULL) + goto out; +- status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, +- pf->auth_domain_name); +- if (status) ++ dom = svcauth_gss_register_pseudoflavor( ++ pf->pseudoflavor, pf->auth_domain_name); ++ if (IS_ERR(dom)) { ++ status = PTR_ERR(dom); + goto out; ++ } ++ pf->domain = dom; + } + return 0; + out: +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index d9f7439e2431..fd91274e834d 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -800,7 +800,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) + + EXPORT_SYMBOL_GPL(svcauth_gss_flavor); + +-int ++struct auth_domain * + svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) + { + struct gss_domain *new; +@@ -817,21 +817,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) + new->h.flavour = &svcauthops_gss; + new->pseudoflavor = pseudoflavor; + +- stat = 0; + test = auth_domain_lookup(name, &new->h); +- if (test != &new->h) { /* Duplicate registration */ ++ if (test != &new->h) { ++ pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n", ++ name); ++ stat = -EADDRINUSE; + auth_domain_put(test); +- kfree(new->h.name); +- goto out_free_dom; ++ goto out_free_name; + } +- return 0; ++ return test; + ++out_free_name: ++ kfree(new->h.name); + out_free_dom: + kfree(new); + out: +- return stat; ++ return ERR_PTR(stat); + } +- + EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); + + static inline int +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index fbf6a496ee8b..70b203e5d5fd 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -1737,6 +1737,7 @@ int tls_sw_recvmsg(struct sock *sk, + long timeo; + bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); + bool is_peek = flags & MSG_PEEK; ++ bool bpf_strp_enabled; + int num_async = 0; + int pending; + +@@ -1747,6 +1748,7 @@ int tls_sw_recvmsg(struct sock *sk, + + psock = sk_psock_get(sk); + lock_sock(sk); ++ bpf_strp_enabled = sk_psock_strp_enabled(psock); + + /* Process pending decrypted records. It must be non-zero-copy */ + err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false, +@@ -1800,11 +1802,12 @@ int tls_sw_recvmsg(struct sock *sk, + + if (to_decrypt <= len && !is_kvec && !is_peek && + ctx->control == TLS_RECORD_TYPE_DATA && +- prot->version != TLS_1_3_VERSION) ++ prot->version != TLS_1_3_VERSION && ++ !bpf_strp_enabled) + zc = true; + + /* Do not use async mode if record is non-data */ +- if (ctx->control == TLS_RECORD_TYPE_DATA) ++ if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) + async_capable = ctx->async_capable; + else + async_capable = false; +@@ -1854,6 +1857,19 @@ int tls_sw_recvmsg(struct sock *sk, + goto pick_next_record; + + if (!zc) { ++ if (bpf_strp_enabled) { ++ err = sk_psock_tls_strp_read(psock, skb); ++ if (err != __SK_PASS) { ++ rxm->offset = rxm->offset + rxm->full_len; ++ rxm->full_len = 0; ++ if (err == __SK_DROP) ++ consume_skb(skb); ++ ctx->recv_pkt = NULL; ++ __strp_unpause(&ctx->strp); ++ continue; ++ } ++ } ++ + if (rxm->full_len > len) { + retain_skb = true; + chunk = len; +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index fbc2ee6d46fc..ee6bd945f3d6 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -243,7 +243,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, + + /* Portable EVM signatures must include an IMA hash */ + if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present) +- return -EPERM; ++ error = -EPERM; + out: + kfree(xattr_value); + kfree(desc); +diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h +index 3689081aaf38..be469fce19e1 100644 +--- a/security/integrity/ima/ima.h ++++ b/security/integrity/ima/ima.h +@@ -36,7 +36,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; + #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE + #define IMA_EVENT_NAME_LEN_MAX 255 + +-#define IMA_HASH_BITS 9 ++#define IMA_HASH_BITS 10 + #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) + + #define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 +@@ -52,6 +52,7 @@ extern int ima_policy_flag; + extern int ima_hash_algo; + extern int ima_appraise; + extern struct tpm_chip *ima_tpm_chip; ++extern const char boot_aggregate_name[]; + + /* IMA event related data */ + struct ima_event_data { +@@ -140,7 +141,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len, + int ima_calc_field_array_hash(struct ima_field_data *field_data, + struct ima_template_desc *desc, int num_fields, + struct ima_digest_data *hash); +-int __init ima_calc_boot_aggregate(struct ima_digest_data *hash); ++int ima_calc_boot_aggregate(struct ima_digest_data *hash); + void ima_add_violation(struct file *file, const unsigned char *filename, + struct integrity_iint_cache *iint, + const char *op, const char *cause); +@@ -175,9 +176,10 @@ struct ima_h_table { + }; + extern struct ima_h_table ima_htable; + +-static inline unsigned long ima_hash_key(u8 *digest) ++static inline unsigned int ima_hash_key(u8 *digest) + { +- return hash_long(*digest, IMA_HASH_BITS); ++ /* there is no point in taking a hash of part of a digest */ ++ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE; + } + + #define __ima_hooks(hook) \ +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index ad6cbbccc8d9..d5ad7b2539c7 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -645,7 +645,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len, + return calc_buffer_shash(buf, len, hash); + } + +-static void __init ima_pcrread(u32 idx, struct tpm_digest *d) ++static void ima_pcrread(u32 idx, struct tpm_digest *d) + { + if (!ima_tpm_chip) + return; +@@ -655,18 +655,29 @@ static void __init ima_pcrread(u32 idx, struct tpm_digest *d) + } + + /* +- * Calculate the boot aggregate hash ++ * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With ++ * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with ++ * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks, ++ * allowing firmware to configure and enable different banks. ++ * ++ * Knowing which TPM bank is read to calculate the boot_aggregate digest ++ * needs to be conveyed to a verifier. For this reason, use the same ++ * hash algorithm for reading the TPM PCRs as for calculating the boot ++ * aggregate digest as stored in the measurement list. + */ +-static int __init ima_calc_boot_aggregate_tfm(char *digest, +- struct crypto_shash *tfm) ++static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, ++ struct crypto_shash *tfm) + { +- struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} }; ++ struct tpm_digest d = { .alg_id = alg_id, .digest = {0} }; + int rc; + u32 i; + SHASH_DESC_ON_STACK(shash, tfm); + + shash->tfm = tfm; + ++ pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n", ++ d.alg_id); ++ + rc = crypto_shash_init(shash); + if (rc != 0) + return rc; +@@ -675,24 +686,48 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest, + for (i = TPM_PCR0; i < TPM_PCR8; i++) { + ima_pcrread(i, &d); + /* now accumulate with current aggregate */ +- rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE); ++ rc = crypto_shash_update(shash, d.digest, ++ crypto_shash_digestsize(tfm)); + } + if (!rc) + crypto_shash_final(shash, digest); + return rc; + } + +-int __init ima_calc_boot_aggregate(struct ima_digest_data *hash) ++int ima_calc_boot_aggregate(struct ima_digest_data *hash) + { + struct crypto_shash *tfm; +- int rc; ++ u16 crypto_id, alg_id; ++ int rc, i, bank_idx = -1; ++ ++ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { ++ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; ++ if (crypto_id == hash->algo) { ++ bank_idx = i; ++ break; ++ } ++ ++ if (crypto_id == HASH_ALGO_SHA256) ++ bank_idx = i; ++ ++ if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1) ++ bank_idx = i; ++ } ++ ++ if (bank_idx == -1) { ++ pr_err("No suitable TPM algorithm for boot aggregate\n"); ++ return 0; ++ } ++ ++ hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + hash->length = crypto_shash_digestsize(tfm); +- rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm); ++ alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id; ++ rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm); + + ima_free_tfm(tfm); + +diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c +index 5d55ade5f3b9..a94177042eaa 100644 +--- a/security/integrity/ima/ima_init.c ++++ b/security/integrity/ima/ima_init.c +@@ -21,13 +21,13 @@ + #include "ima.h" + + /* name for boot aggregate entry */ +-static const char boot_aggregate_name[] = "boot_aggregate"; ++const char boot_aggregate_name[] = "boot_aggregate"; + struct tpm_chip *ima_tpm_chip; + + /* Add the boot aggregate to the IMA measurement list and extend + * the PCR register. + * +- * Calculate the boot aggregate, a SHA1 over tpm registers 0-7, ++ * Calculate the boot aggregate, a hash over tpm registers 0-7, + * assuming a TPM chip exists, and zeroes if the TPM chip does not + * exist. Add the boot aggregate measurement to the measurement + * list and extend the PCR register. +@@ -51,15 +51,27 @@ static int __init ima_add_boot_aggregate(void) + int violation = 0; + struct { + struct ima_digest_data hdr; +- char digest[TPM_DIGEST_SIZE]; ++ char digest[TPM_MAX_DIGEST_SIZE]; + } hash; + + memset(iint, 0, sizeof(*iint)); + memset(&hash, 0, sizeof(hash)); + iint->ima_hash = &hash.hdr; +- iint->ima_hash->algo = HASH_ALGO_SHA1; +- iint->ima_hash->length = SHA1_DIGEST_SIZE; +- ++ iint->ima_hash->algo = ima_hash_algo; ++ iint->ima_hash->length = hash_digest_size[ima_hash_algo]; ++ ++ /* ++ * With TPM 2.0 hash agility, TPM chips could support multiple TPM ++ * PCR banks, allowing firmware to configure and enable different ++ * banks. The SHA1 bank is not necessarily enabled. ++ * ++ * Use the same hash algorithm for reading the TPM PCRs as for ++ * calculating the boot aggregate digest. Preference is given to ++ * the configured IMA default hash algorithm. Otherwise, use the ++ * TCG required banks - SHA256 for TPM 2.0, SHA1 for TPM 1.2. ++ * Ultimately select SHA1 also for TPM 2.0 if the SHA256 PCR bank ++ * is not found. ++ */ + if (ima_tpm_chip) { + result = ima_calc_boot_aggregate(&hash.hdr); + if (result < 0) { +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c +index 60027c643ecd..a768f37a0a4d 100644 +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -712,6 +712,9 @@ static int __init init_ima(void) + error = ima_init(); + } + ++ if (error) ++ return error; ++ + error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier); + if (error) + pr_warn("Couldn't register LSM notifier, error %d\n", error); +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c +index ee9aec5e98f0..558a7607bf93 100644 +--- a/security/integrity/ima/ima_policy.c ++++ b/security/integrity/ima/ima_policy.c +@@ -204,7 +204,7 @@ static struct ima_rule_entry *arch_policy_entry __ro_after_init; + static LIST_HEAD(ima_default_rules); + static LIST_HEAD(ima_policy_rules); + static LIST_HEAD(ima_temp_rules); +-static struct list_head *ima_rules; ++static struct list_head *ima_rules = &ima_default_rules; + + static int ima_policy __initdata; + +@@ -591,9 +591,12 @@ static void add_rules(struct ima_rule_entry *entries, int count, + list_add_tail(&entry->list, &ima_policy_rules); + } + if (entries[i].action == APPRAISE) { +- temp_ima_appraise |= ima_appraise_flag(entries[i].func); +- if (entries[i].func == POLICY_CHECK) +- temp_ima_appraise |= IMA_APPRAISE_POLICY; ++ if (entries != build_appraise_rules) ++ temp_ima_appraise |= ++ ima_appraise_flag(entries[i].func); ++ else ++ build_ima_appraise |= ++ ima_appraise_flag(entries[i].func); + } + } + } +@@ -712,7 +715,6 @@ void __init ima_init_policy(void) + ARRAY_SIZE(default_appraise_rules), + IMA_DEFAULT_POLICY); + +- ima_rules = &ima_default_rules; + ima_update_policy_flag(); + } + +diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c +index 32ae05d88257..1be146e17d9f 100644 +--- a/security/integrity/ima/ima_template_lib.c ++++ b/security/integrity/ima/ima_template_lib.c +@@ -288,6 +288,24 @@ int ima_eventdigest_init(struct ima_event_data *event_data, + goto out; + } + ++ if ((const char *)event_data->filename == boot_aggregate_name) { ++ if (ima_tpm_chip) { ++ hash.hdr.algo = HASH_ALGO_SHA1; ++ result = ima_calc_boot_aggregate(&hash.hdr); ++ ++ /* algo can change depending on available PCR banks */ ++ if (!result && hash.hdr.algo != HASH_ALGO_SHA1) ++ result = -EINVAL; ++ ++ if (result < 0) ++ memset(&hash, 0, sizeof(hash)); ++ } ++ ++ cur_digest = hash.hdr.digest; ++ cur_digestsize = hash_digest_size[HASH_ALGO_SHA1]; ++ goto out; ++ } ++ + if (!event_data->file) /* missing info to re-calculate the digest */ + return -EINVAL; + +diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c +index b2f87015d6e9..3f38583bed06 100644 +--- a/security/lockdown/lockdown.c ++++ b/security/lockdown/lockdown.c +@@ -177,7 +177,7 @@ static int __init lockdown_secfs_init(void) + { + struct dentry *dentry; + +- dentry = securityfs_create_file("lockdown", 0600, NULL, NULL, ++ dentry = securityfs_create_file("lockdown", 0644, NULL, NULL, + &lockdown_ops); + return PTR_ERR_OR_ZERO(dentry); + } +diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c +index 1260f5fb766e..dd7aabd94a92 100644 +--- a/security/selinux/ss/policydb.c ++++ b/security/selinux/ss/policydb.c +@@ -2496,6 +2496,7 @@ int policydb_read(struct policydb *p, void *fp) + if (rc) + goto bad; + ++ rc = -ENOMEM; + p->type_attr_map_array = kvcalloc(p->p_types.nprim, + sizeof(*p->type_attr_map_array), + GFP_KERNEL); +diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py +index 7e344a78a627..b8c082c9fd7d 100644 +--- a/tools/cgroup/iocost_monitor.py ++++ b/tools/cgroup/iocost_monitor.py +@@ -112,14 +112,14 @@ class IocStat: + + def dict(self, now): + return { 'device' : devname, +- 'timestamp' : str(now), +- 'enabled' : str(int(self.enabled)), +- 'running' : str(int(self.running)), +- 'period_ms' : str(self.period_ms), +- 'period_at' : str(self.period_at), +- 'period_vtime_at' : str(self.vperiod_at), +- 'busy_level' : str(self.busy_level), +- 'vrate_pct' : str(self.vrate_pct), } ++ 'timestamp' : now, ++ 'enabled' : self.enabled, ++ 'running' : self.running, ++ 'period_ms' : self.period_ms, ++ 'period_at' : self.period_at, ++ 'period_vtime_at' : self.vperiod_at, ++ 'busy_level' : self.busy_level, ++ 'vrate_pct' : self.vrate_pct, } + + def table_preamble_str(self): + state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF' +@@ -179,19 +179,19 @@ class IocgStat: + + def dict(self, now, path): + out = { 'cgroup' : path, +- 'timestamp' : str(now), +- 'is_active' : str(int(self.is_active)), +- 'weight' : str(self.weight), +- 'weight_active' : str(self.active), +- 'weight_inuse' : str(self.inuse), +- 'hweight_active_pct' : str(self.hwa_pct), +- 'hweight_inuse_pct' : str(self.hwi_pct), +- 'inflight_pct' : str(self.inflight_pct), +- 'debt_ms' : str(self.debt_ms), +- 'use_delay' : str(self.use_delay), +- 'delay_ms' : str(self.delay_ms), +- 'usage_pct' : str(self.usage), +- 'address' : str(hex(self.address)) } ++ 'timestamp' : now, ++ 'is_active' : self.is_active, ++ 'weight' : self.weight, ++ 'weight_active' : self.active, ++ 'weight_inuse' : self.inuse, ++ 'hweight_active_pct' : self.hwa_pct, ++ 'hweight_inuse_pct' : self.hwi_pct, ++ 'inflight_pct' : self.inflight_pct, ++ 'debt_ms' : self.debt_ms, ++ 'use_delay' : self.use_delay, ++ 'delay_ms' : self.delay_ms, ++ 'usage_pct' : self.usage, ++ 'address' : self.address } + for i in range(len(self.usages)): + out[f'usage_pct_{i}'] = str(self.usages[i]) + return out +diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c +index bd021a0eeef8..4cc69675c2a9 100644 +--- a/tools/lib/api/fs/fs.c ++++ b/tools/lib/api/fs/fs.c +@@ -90,6 +90,7 @@ struct fs { + const char * const *mounts; + char path[PATH_MAX]; + bool found; ++ bool checked; + long magic; + }; + +@@ -111,31 +112,37 @@ static struct fs fs__entries[] = { + .name = "sysfs", + .mounts = sysfs__fs_known_mountpoints, + .magic = SYSFS_MAGIC, ++ .checked = false, + }, + [FS__PROCFS] = { + .name = "proc", + .mounts = procfs__known_mountpoints, + .magic = PROC_SUPER_MAGIC, ++ .checked = false, + }, + [FS__DEBUGFS] = { + .name = "debugfs", + .mounts = debugfs__known_mountpoints, + .magic = DEBUGFS_MAGIC, ++ .checked = false, + }, + [FS__TRACEFS] = { + .name = "tracefs", + .mounts = tracefs__known_mountpoints, + .magic = TRACEFS_MAGIC, ++ .checked = false, + }, + [FS__HUGETLBFS] = { + .name = "hugetlbfs", + .mounts = hugetlbfs__known_mountpoints, + .magic = HUGETLBFS_MAGIC, ++ .checked = false, + }, + [FS__BPF_FS] = { + .name = "bpf", + .mounts = bpf_fs__known_mountpoints, + .magic = BPF_FS_MAGIC, ++ .checked = false, + }, + }; + +@@ -158,6 +165,7 @@ static bool fs__read_mounts(struct fs *fs) + } + + fclose(fp); ++ fs->checked = true; + return fs->found = found; + } + +@@ -220,6 +228,7 @@ static bool fs__env_override(struct fs *fs) + return false; + + fs->found = true; ++ fs->checked = true; + strncpy(fs->path, override_path, sizeof(fs->path) - 1); + fs->path[sizeof(fs->path) - 1] = '\0'; + return true; +@@ -246,6 +255,14 @@ static const char *fs__mountpoint(int idx) + if (fs->found) + return (const char *)fs->path; + ++ /* the mount point was already checked for the mount point ++ * but and did not exist, so return NULL to avoid scanning again. ++ * This makes the found and not found paths cost equivalent ++ * in case of multiple calls. ++ */ ++ if (fs->checked) ++ return NULL; ++ + return fs__get_mountpoint(fs); + } + +diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h +index 92d03b8396b1..3b70003e7cfb 100644 +--- a/tools/lib/api/fs/fs.h ++++ b/tools/lib/api/fs/fs.h +@@ -18,6 +18,18 @@ + const char *name##__mount(void); \ + bool name##__configured(void); \ + ++/* ++ * The xxxx__mountpoint() entry points find the first match mount point for each ++ * filesystems listed below, where xxxx is the filesystem type. ++ * ++ * The interface is as follows: ++ * ++ * - If a mount point is found on first call, it is cached and used for all ++ * subsequent calls. ++ * ++ * - If a mount point is not found, NULL is returned on first call and all ++ * subsequent calls. ++ */ + FS(sysfs) + FS(procfs) + FS(debugfs) +diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c +index 6122272943e6..9ef9f6201d8b 100644 +--- a/tools/lib/bpf/hashmap.c ++++ b/tools/lib/bpf/hashmap.c +@@ -56,7 +56,14 @@ struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, + + void hashmap__clear(struct hashmap *map) + { ++ struct hashmap_entry *cur, *tmp; ++ int bkt; ++ ++ hashmap__for_each_entry_safe(map, cur, tmp, bkt) { ++ free(cur); ++ } + free(map->buckets); ++ map->buckets = NULL; + map->cap = map->cap_bits = map->sz = 0; + } + +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c +index 281cc65276e0..2a1dbf52fc9a 100644 +--- a/tools/lib/bpf/libbpf.c ++++ b/tools/lib/bpf/libbpf.c +@@ -5358,9 +5358,12 @@ void perf_buffer__free(struct perf_buffer *pb) + if (!pb) + return; + if (pb->cpu_bufs) { +- for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) { ++ for (i = 0; i < pb->cpu_cnt; i++) { + struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; + ++ if (!cpu_buf) ++ continue; ++ + bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); + perf_buffer__free_cpu_buf(pb, cpu_buf); + } +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index fcc6cd404f56..48b234d8f251 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -865,6 +865,12 @@ static int add_special_section_alts(struct objtool_file *file) + } + + if (special_alt->group) { ++ if (!special_alt->orig_len) { ++ WARN_FUNC("empty alternative entry", ++ orig_insn->sec, orig_insn->offset); ++ continue; ++ } ++ + ret = handle_group_alt(file, special_alt, orig_insn, + &new_insn); + if (ret) +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c +index 26bc5923e6b5..2f05f59e9758 100644 +--- a/tools/perf/builtin-probe.c ++++ b/tools/perf/builtin-probe.c +@@ -364,6 +364,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs) + + for (k = 0; k < pev->ntevs; k++) { + struct probe_trace_event *tev = &pev->tevs[k]; ++ /* Skipped events have no event name */ ++ if (!tev->event) ++ continue; + + /* We use tev's name for showing new events */ + show_perf_probe_event(tev->group, tev->event, pev, +diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c +index e11ddf86f2b3..ab2e130dc07a 100644 +--- a/tools/perf/util/dso.c ++++ b/tools/perf/util/dso.c +@@ -47,6 +47,7 @@ char dso__symtab_origin(const struct dso *dso) + [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', + [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', + [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', ++ [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', + [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', + [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', + [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', +@@ -129,6 +130,21 @@ int dso__read_binary_type_filename(const struct dso *dso, + snprintf(filename + len, size - len, "%s", dso->long_name); + break; + ++ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: ++ /* ++ * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in ++ * /usr/lib/debug/lib when it is expected to be in ++ * /usr/lib/debug/usr/lib ++ */ ++ if (strlen(dso->long_name) < 9 || ++ strncmp(dso->long_name, "/usr/lib/", 9)) { ++ ret = -1; ++ break; ++ } ++ len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); ++ snprintf(filename + len, size - len, "%s", dso->long_name + 4); ++ break; ++ + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + { + const char *last_slash; +diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h +index e4dddb76770d..69bb77d19164 100644 +--- a/tools/perf/util/dso.h ++++ b/tools/perf/util/dso.h +@@ -30,6 +30,7 @@ enum dso_binary_type { + DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__GUEST_KMODULE, +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c +index 92b07be0b48b..a5cb1a3a1064 100644 +--- a/tools/perf/util/probe-event.c ++++ b/tools/perf/util/probe-event.c +@@ -102,7 +102,7 @@ void exit_probe_symbol_maps(void) + symbol__exit(); + } + +-static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) ++static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap) + { + /* kmap->ref_reloc_sym should be set if host_machine is initialized */ + struct kmap *kmap; +@@ -114,6 +114,10 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) + kmap = map__kmap(map); + if (!kmap) + return NULL; ++ ++ if (pmap) ++ *pmap = map; ++ + return kmap->ref_reloc_sym; + } + +@@ -125,7 +129,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, + struct map *map; + + /* ref_reloc_sym is just a label. Need a special fix*/ +- reloc_sym = kernel_get_ref_reloc_sym(); ++ reloc_sym = kernel_get_ref_reloc_sym(NULL); + if (reloc_sym && strcmp(name, reloc_sym->name) == 0) + *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; + else { +@@ -232,21 +236,22 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs) + static bool kprobe_blacklist__listed(unsigned long address); + static bool kprobe_warn_out_range(const char *symbol, unsigned long address) + { +- u64 etext_addr = 0; +- int ret; +- +- /* Get the address of _etext for checking non-probable text symbol */ +- ret = kernel_get_symbol_address_by_name("_etext", &etext_addr, +- false, false); ++ struct map *map; ++ bool ret = false; + +- if (ret == 0 && etext_addr < address) +- pr_warning("%s is out of .text, skip it.\n", symbol); +- else if (kprobe_blacklist__listed(address)) ++ map = kernel_get_module_map(NULL); ++ if (map) { ++ ret = address <= map->start || map->end < address; ++ if (ret) ++ pr_warning("%s is out of .text, skip it.\n", symbol); ++ map__put(map); ++ } ++ if (!ret && kprobe_blacklist__listed(address)) { + pr_warning("%s is blacklisted function, skip it.\n", symbol); +- else +- return false; ++ ret = true; ++ } + +- return true; ++ return ret; + } + + /* +@@ -745,6 +750,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + int ntevs) + { + struct ref_reloc_sym *reloc_sym; ++ struct map *map; + char *tmp; + int i, skipped = 0; + +@@ -753,7 +759,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + return post_process_offline_probe_trace_events(tevs, ntevs, + symbol_conf.vmlinux_name); + +- reloc_sym = kernel_get_ref_reloc_sym(); ++ reloc_sym = kernel_get_ref_reloc_sym(&map); + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + return -EINVAL; +@@ -764,9 +770,13 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + continue; + if (tevs[i].point.retprobe && !kretprobe_offset_is_supported()) + continue; +- /* If we found a wrong one, mark it by NULL symbol */ ++ /* ++ * If we found a wrong one, mark it by NULL symbol. ++ * Since addresses in debuginfo is same as objdump, we need ++ * to convert it to addresses on memory. ++ */ + if (kprobe_warn_out_range(tevs[i].point.symbol, +- tevs[i].point.address)) { ++ map__objdump_2mem(map, tevs[i].point.address))) { + tmp = NULL; + skipped++; + } else { +@@ -2922,7 +2932,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, + /* Note that the symbols in the kmodule are not relocated */ + if (!pev->uprobes && !pev->target && + (!pp->retprobe || kretprobe_offset_is_supported())) { +- reloc_sym = kernel_get_ref_reloc_sym(); ++ reloc_sym = kernel_get_ref_reloc_sym(NULL); + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + ret = -EINVAL; +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c +index aaf3b24fffa4..dc9d495e3d6a 100644 +--- a/tools/perf/util/probe-finder.c ++++ b/tools/perf/util/probe-finder.c +@@ -101,6 +101,7 @@ enum dso_binary_type distro_dwarf_types[] = { + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c +index a8f80e427674..901ad7f6f4dc 100644 +--- a/tools/perf/util/symbol.c ++++ b/tools/perf/util/symbol.c +@@ -79,6 +79,7 @@ static enum dso_binary_type binary_type_symtab[] = { + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + +@@ -1220,6 +1221,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) + + m->end = old_map->start; + list_add_tail(&m->node, &merged); ++ new_map->pgoff += old_map->end - new_map->start; + new_map->start = old_map->end; + } + } else { +@@ -1240,6 +1242,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) + * |new......| -> |new...| + * |old....| -> |old....| + */ ++ new_map->pgoff += old_map->end - new_map->start; + new_map->start = old_map->end; + } + } +@@ -1530,6 +1533,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, + case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: + case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: + case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: ++ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: + case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + return !kmod && dso->kernel == DSO_TYPE_USER; +diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config +index 5dc109f4c097..b9601f13cf03 100644 +--- a/tools/testing/selftests/bpf/config ++++ b/tools/testing/selftests/bpf/config +@@ -25,6 +25,7 @@ CONFIG_XDP_SOCKETS=y + CONFIG_FTRACE_SYSCALLS=y + CONFIG_IPV6_TUNNEL=y + CONFIG_IPV6_GRE=y ++CONFIG_IPV6_SEG6_BPF=y + CONFIG_NET_FOU=m + CONFIG_NET_FOU_IP_TUNNELS=y + CONFIG_IPV6_FOU=m +diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +index 92563898867c..9f3634c9971d 100644 +--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c ++++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +@@ -523,6 +523,7 @@ void test_flow_dissector(void) + CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); + } + ++ close(tap_fd); + bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); + bpf_object__close(obj); + } +diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c +index 3bf18364c67c..8cb3469dd11f 100644 +--- a/tools/testing/selftests/bpf/test_progs.c ++++ b/tools/testing/selftests/bpf/test_progs.c +@@ -293,6 +293,7 @@ int extract_build_id(char *build_id, size_t size) + len = size; + memcpy(build_id, line, len); + build_id[len] = '\0'; ++ free(line); + return 0; + err: + fclose(fp); diff --git a/patch/kernel/odroidxu4-current/patch-5.4.48-49.patch b/patch/kernel/odroidxu4-current/patch-5.4.48-49.patch new file mode 100644 index 000000000..29d865f52 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.48-49.patch @@ -0,0 +1,11998 @@ +diff --git a/Makefile b/Makefile +index fee4101b5d22..72230ad23299 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 48 ++SUBLEVEL = 49 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi +index de981d629bdd..fdd267819319 100644 +--- a/arch/arm/boot/dts/r8a7743.dtsi ++++ b/arch/arm/boot/dts/r8a7743.dtsi +@@ -338,7 +338,7 @@ + #thermal-sensor-cells = <0>; + }; + +- ipmmu_sy0: mmu@e6280000 { ++ ipmmu_sy0: iommu@e6280000 { + compatible = "renesas,ipmmu-r8a7743", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6280000 0 0x1000>; +@@ -348,7 +348,7 @@ + status = "disabled"; + }; + +- ipmmu_sy1: mmu@e6290000 { ++ ipmmu_sy1: iommu@e6290000 { + compatible = "renesas,ipmmu-r8a7743", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6290000 0 0x1000>; +@@ -357,7 +357,7 @@ + status = "disabled"; + }; + +- ipmmu_ds: mmu@e6740000 { ++ ipmmu_ds: iommu@e6740000 { + compatible = "renesas,ipmmu-r8a7743", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6740000 0 0x1000>; +@@ -367,7 +367,7 @@ + status = "disabled"; + }; + +- ipmmu_mp: mmu@ec680000 { ++ ipmmu_mp: iommu@ec680000 { + compatible = "renesas,ipmmu-r8a7743", + "renesas,ipmmu-vmsa"; + reg = <0 0xec680000 0 0x1000>; +@@ -376,7 +376,7 @@ + status = "disabled"; + }; + +- ipmmu_mx: mmu@fe951000 { ++ ipmmu_mx: iommu@fe951000 { + compatible = "renesas,ipmmu-r8a7743", + "renesas,ipmmu-vmsa"; + reg = <0 0xfe951000 0 0x1000>; +@@ -386,7 +386,7 @@ + status = "disabled"; + }; + +- ipmmu_gp: mmu@e62a0000 { ++ ipmmu_gp: iommu@e62a0000 { + compatible = "renesas,ipmmu-r8a7743", + "renesas,ipmmu-vmsa"; + reg = <0 0xe62a0000 0 0x1000>; +diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi +index fa74a262107b..8264481bf876 100644 +--- a/arch/arm/boot/dts/r8a7744.dtsi ++++ b/arch/arm/boot/dts/r8a7744.dtsi +@@ -338,7 +338,7 @@ + #thermal-sensor-cells = <0>; + }; + +- ipmmu_sy0: mmu@e6280000 { ++ ipmmu_sy0: iommu@e6280000 { + compatible = "renesas,ipmmu-r8a7744", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6280000 0 0x1000>; +@@ -348,7 +348,7 @@ + status = "disabled"; + }; + +- ipmmu_sy1: mmu@e6290000 { ++ ipmmu_sy1: iommu@e6290000 { + compatible = "renesas,ipmmu-r8a7744", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6290000 0 0x1000>; +@@ -357,7 +357,7 @@ + status = "disabled"; + }; + +- ipmmu_ds: mmu@e6740000 { ++ ipmmu_ds: iommu@e6740000 { + compatible = "renesas,ipmmu-r8a7744", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6740000 0 0x1000>; +@@ -367,7 +367,7 @@ + status = "disabled"; + }; + +- ipmmu_mp: mmu@ec680000 { ++ ipmmu_mp: iommu@ec680000 { + compatible = "renesas,ipmmu-r8a7744", + "renesas,ipmmu-vmsa"; + reg = <0 0xec680000 0 0x1000>; +@@ -376,7 +376,7 @@ + status = "disabled"; + }; + +- ipmmu_mx: mmu@fe951000 { ++ ipmmu_mx: iommu@fe951000 { + compatible = "renesas,ipmmu-r8a7744", + "renesas,ipmmu-vmsa"; + reg = <0 0xfe951000 0 0x1000>; +@@ -386,7 +386,7 @@ + status = "disabled"; + }; + +- ipmmu_gp: mmu@e62a0000 { ++ ipmmu_gp: iommu@e62a0000 { + compatible = "renesas,ipmmu-r8a7744", + "renesas,ipmmu-vmsa"; + reg = <0 0xe62a0000 0 0x1000>; +diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi +index c53f7ff20695..c306713f2ab7 100644 +--- a/arch/arm/boot/dts/r8a7745.dtsi ++++ b/arch/arm/boot/dts/r8a7745.dtsi +@@ -302,7 +302,7 @@ + resets = <&cpg 407>; + }; + +- ipmmu_sy0: mmu@e6280000 { ++ ipmmu_sy0: iommu@e6280000 { + compatible = "renesas,ipmmu-r8a7745", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6280000 0 0x1000>; +@@ -312,7 +312,7 @@ + status = "disabled"; + }; + +- ipmmu_sy1: mmu@e6290000 { ++ ipmmu_sy1: iommu@e6290000 { + compatible = "renesas,ipmmu-r8a7745", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6290000 0 0x1000>; +@@ -321,7 +321,7 @@ + status = "disabled"; + }; + +- ipmmu_ds: mmu@e6740000 { ++ ipmmu_ds: iommu@e6740000 { + compatible = "renesas,ipmmu-r8a7745", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6740000 0 0x1000>; +@@ -331,7 +331,7 @@ + status = "disabled"; + }; + +- ipmmu_mp: mmu@ec680000 { ++ ipmmu_mp: iommu@ec680000 { + compatible = "renesas,ipmmu-r8a7745", + "renesas,ipmmu-vmsa"; + reg = <0 0xec680000 0 0x1000>; +@@ -340,7 +340,7 @@ + status = "disabled"; + }; + +- ipmmu_mx: mmu@fe951000 { ++ ipmmu_mx: iommu@fe951000 { + compatible = "renesas,ipmmu-r8a7745", + "renesas,ipmmu-vmsa"; + reg = <0 0xfe951000 0 0x1000>; +@@ -350,7 +350,7 @@ + status = "disabled"; + }; + +- ipmmu_gp: mmu@e62a0000 { ++ ipmmu_gp: iommu@e62a0000 { + compatible = "renesas,ipmmu-r8a7745", + "renesas,ipmmu-vmsa"; + reg = <0 0xe62a0000 0 0x1000>; +diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi +index 5a2747758f67..e3ba00a22eeb 100644 +--- a/arch/arm/boot/dts/r8a7790.dtsi ++++ b/arch/arm/boot/dts/r8a7790.dtsi +@@ -427,7 +427,7 @@ + #thermal-sensor-cells = <0>; + }; + +- ipmmu_sy0: mmu@e6280000 { ++ ipmmu_sy0: iommu@e6280000 { + compatible = "renesas,ipmmu-r8a7790", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6280000 0 0x1000>; +@@ -437,7 +437,7 @@ + status = "disabled"; + }; + +- ipmmu_sy1: mmu@e6290000 { ++ ipmmu_sy1: iommu@e6290000 { + compatible = "renesas,ipmmu-r8a7790", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6290000 0 0x1000>; +@@ -446,7 +446,7 @@ + status = "disabled"; + }; + +- ipmmu_ds: mmu@e6740000 { ++ ipmmu_ds: iommu@e6740000 { + compatible = "renesas,ipmmu-r8a7790", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6740000 0 0x1000>; +@@ -456,7 +456,7 @@ + status = "disabled"; + }; + +- ipmmu_mp: mmu@ec680000 { ++ ipmmu_mp: iommu@ec680000 { + compatible = "renesas,ipmmu-r8a7790", + "renesas,ipmmu-vmsa"; + reg = <0 0xec680000 0 0x1000>; +@@ -465,7 +465,7 @@ + status = "disabled"; + }; + +- ipmmu_mx: mmu@fe951000 { ++ ipmmu_mx: iommu@fe951000 { + compatible = "renesas,ipmmu-r8a7790", + "renesas,ipmmu-vmsa"; + reg = <0 0xfe951000 0 0x1000>; +@@ -475,7 +475,7 @@ + status = "disabled"; + }; + +- ipmmu_rt: mmu@ffc80000 { ++ ipmmu_rt: iommu@ffc80000 { + compatible = "renesas,ipmmu-r8a7790", + "renesas,ipmmu-vmsa"; + reg = <0 0xffc80000 0 0x1000>; +diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi +index 6f875502453c..a26f86ccc579 100644 +--- a/arch/arm/boot/dts/r8a7791.dtsi ++++ b/arch/arm/boot/dts/r8a7791.dtsi +@@ -350,7 +350,7 @@ + #thermal-sensor-cells = <0>; + }; + +- ipmmu_sy0: mmu@e6280000 { ++ ipmmu_sy0: iommu@e6280000 { + compatible = "renesas,ipmmu-r8a7791", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6280000 0 0x1000>; +@@ -360,7 +360,7 @@ + status = "disabled"; + }; + +- ipmmu_sy1: mmu@e6290000 { ++ ipmmu_sy1: iommu@e6290000 { + compatible = "renesas,ipmmu-r8a7791", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6290000 0 0x1000>; +@@ -369,7 +369,7 @@ + status = "disabled"; + }; + +- ipmmu_ds: mmu@e6740000 { ++ ipmmu_ds: iommu@e6740000 { + compatible = "renesas,ipmmu-r8a7791", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6740000 0 0x1000>; +@@ -379,7 +379,7 @@ + status = "disabled"; + }; + +- ipmmu_mp: mmu@ec680000 { ++ ipmmu_mp: iommu@ec680000 { + compatible = "renesas,ipmmu-r8a7791", + "renesas,ipmmu-vmsa"; + reg = <0 0xec680000 0 0x1000>; +@@ -388,7 +388,7 @@ + status = "disabled"; + }; + +- ipmmu_mx: mmu@fe951000 { ++ ipmmu_mx: iommu@fe951000 { + compatible = "renesas,ipmmu-r8a7791", + "renesas,ipmmu-vmsa"; + reg = <0 0xfe951000 0 0x1000>; +@@ -398,7 +398,7 @@ + status = "disabled"; + }; + +- ipmmu_rt: mmu@ffc80000 { ++ ipmmu_rt: iommu@ffc80000 { + compatible = "renesas,ipmmu-r8a7791", + "renesas,ipmmu-vmsa"; + reg = <0 0xffc80000 0 0x1000>; +@@ -407,7 +407,7 @@ + status = "disabled"; + }; + +- ipmmu_gp: mmu@e62a0000 { ++ ipmmu_gp: iommu@e62a0000 { + compatible = "renesas,ipmmu-r8a7791", + "renesas,ipmmu-vmsa"; + reg = <0 0xe62a0000 0 0x1000>; +diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi +index bf05110fac4e..fa3839795018 100644 +--- a/arch/arm/boot/dts/r8a7793.dtsi ++++ b/arch/arm/boot/dts/r8a7793.dtsi +@@ -336,7 +336,7 @@ + #thermal-sensor-cells = <0>; + }; + +- ipmmu_sy0: mmu@e6280000 { ++ ipmmu_sy0: iommu@e6280000 { + compatible = "renesas,ipmmu-r8a7793", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6280000 0 0x1000>; +@@ -346,7 +346,7 @@ + status = "disabled"; + }; + +- ipmmu_sy1: mmu@e6290000 { ++ ipmmu_sy1: iommu@e6290000 { + compatible = "renesas,ipmmu-r8a7793", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6290000 0 0x1000>; +@@ -355,7 +355,7 @@ + status = "disabled"; + }; + +- ipmmu_ds: mmu@e6740000 { ++ ipmmu_ds: iommu@e6740000 { + compatible = "renesas,ipmmu-r8a7793", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6740000 0 0x1000>; +@@ -365,7 +365,7 @@ + status = "disabled"; + }; + +- ipmmu_mp: mmu@ec680000 { ++ ipmmu_mp: iommu@ec680000 { + compatible = "renesas,ipmmu-r8a7793", + "renesas,ipmmu-vmsa"; + reg = <0 0xec680000 0 0x1000>; +@@ -374,7 +374,7 @@ + status = "disabled"; + }; + +- ipmmu_mx: mmu@fe951000 { ++ ipmmu_mx: iommu@fe951000 { + compatible = "renesas,ipmmu-r8a7793", + "renesas,ipmmu-vmsa"; + reg = <0 0xfe951000 0 0x1000>; +@@ -384,7 +384,7 @@ + status = "disabled"; + }; + +- ipmmu_rt: mmu@ffc80000 { ++ ipmmu_rt: iommu@ffc80000 { + compatible = "renesas,ipmmu-r8a7793", + "renesas,ipmmu-vmsa"; + reg = <0 0xffc80000 0 0x1000>; +@@ -393,7 +393,7 @@ + status = "disabled"; + }; + +- ipmmu_gp: mmu@e62a0000 { ++ ipmmu_gp: iommu@e62a0000 { + compatible = "renesas,ipmmu-r8a7793", + "renesas,ipmmu-vmsa"; + reg = <0 0xe62a0000 0 0x1000>; +diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi +index 8d797d34816e..9dd952479e68 100644 +--- a/arch/arm/boot/dts/r8a7794.dtsi ++++ b/arch/arm/boot/dts/r8a7794.dtsi +@@ -290,7 +290,7 @@ + resets = <&cpg 407>; + }; + +- ipmmu_sy0: mmu@e6280000 { ++ ipmmu_sy0: iommu@e6280000 { + compatible = "renesas,ipmmu-r8a7794", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6280000 0 0x1000>; +@@ -300,7 +300,7 @@ + status = "disabled"; + }; + +- ipmmu_sy1: mmu@e6290000 { ++ ipmmu_sy1: iommu@e6290000 { + compatible = "renesas,ipmmu-r8a7794", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6290000 0 0x1000>; +@@ -309,7 +309,7 @@ + status = "disabled"; + }; + +- ipmmu_ds: mmu@e6740000 { ++ ipmmu_ds: iommu@e6740000 { + compatible = "renesas,ipmmu-r8a7794", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6740000 0 0x1000>; +@@ -319,7 +319,7 @@ + status = "disabled"; + }; + +- ipmmu_mp: mmu@ec680000 { ++ ipmmu_mp: iommu@ec680000 { + compatible = "renesas,ipmmu-r8a7794", + "renesas,ipmmu-vmsa"; + reg = <0 0xec680000 0 0x1000>; +@@ -328,7 +328,7 @@ + status = "disabled"; + }; + +- ipmmu_mx: mmu@fe951000 { ++ ipmmu_mx: iommu@fe951000 { + compatible = "renesas,ipmmu-r8a7794", + "renesas,ipmmu-vmsa"; + reg = <0 0xfe951000 0 0x1000>; +@@ -338,7 +338,7 @@ + status = "disabled"; + }; + +- ipmmu_gp: mmu@e62a0000 { ++ ipmmu_gp: iommu@e62a0000 { + compatible = "renesas,ipmmu-r8a7794", + "renesas,ipmmu-vmsa"; + reg = <0 0xe62a0000 0 0x1000>; +diff --git a/arch/arm/boot/dts/stm32mp157a-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-avenger96.dts +index 2e4742c53d04..7b8c3f25861c 100644 +--- a/arch/arm/boot/dts/stm32mp157a-avenger96.dts ++++ b/arch/arm/boot/dts/stm32mp157a-avenger96.dts +@@ -91,6 +91,9 @@ + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; ++ reset-gpios = <&gpioz 2 GPIO_ACTIVE_LOW>; ++ reset-delay-us = <1000>; ++ + phy0: ethernet-phy@7 { + reg = <7>; + }; +diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts +index d277d043031b..4c6704e4c57e 100644 +--- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts ++++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts +@@ -31,7 +31,7 @@ + + pwr_led { + label = "bananapi-m2-zero:red:pwr"; +- gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */ ++ gpios = <&r_pio 0 10 GPIO_ACTIVE_LOW>; /* PL10 */ + default-state = "on"; + }; + }; +diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi +index dfae90adbb7c..ce64bfb22f22 100644 +--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi ++++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi +@@ -31,7 +31,7 @@ + #interrupt-cells = <1>; + ranges; + +- nor_flash: flash@0,00000000 { ++ nor_flash: flash@0 { + compatible = "arm,vexpress-flash", "cfi-flash"; + reg = <0 0x00000000 0x04000000>, + <4 0x00000000 0x04000000>; +@@ -41,13 +41,13 @@ + }; + }; + +- psram@1,00000000 { ++ psram@100000000 { + compatible = "arm,vexpress-psram", "mtd-ram"; + reg = <1 0x00000000 0x02000000>; + bank-width = <4>; + }; + +- ethernet@2,02000000 { ++ ethernet@202000000 { + compatible = "smsc,lan9118", "smsc,lan9115"; + reg = <2 0x02000000 0x10000>; + interrupts = <15>; +@@ -59,14 +59,14 @@ + vddvario-supply = <&v2m_fixed_3v3>; + }; + +- usb@2,03000000 { ++ usb@203000000 { + compatible = "nxp,usb-isp1761"; + reg = <2 0x03000000 0x20000>; + interrupts = <16>; + port1-otg; + }; + +- iofpga@3,00000000 { ++ iofpga@300000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; +diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig +index 982eabc36163..2406cab73835 100644 +--- a/arch/arm/mach-integrator/Kconfig ++++ b/arch/arm/mach-integrator/Kconfig +@@ -4,6 +4,8 @@ menuconfig ARCH_INTEGRATOR + depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6 + select ARM_AMBA + select COMMON_CLK_VERSATILE ++ select CMA ++ select DMA_CMA + select HAVE_TCM + select ICST + select MFD_SYSCON +@@ -35,14 +37,13 @@ config INTEGRATOR_IMPD1 + select ARM_VIC + select GPIO_PL061 + select GPIOLIB ++ select REGULATOR ++ select REGULATOR_FIXED_VOLTAGE + help + The IM-PD1 is an add-on logic module for the Integrator which + allows ARM(R) Ltd PrimeCells to be developed and evaluated. + The IM-PD1 can be found on the Integrator/PP2 platform. + +- To compile this driver as a module, choose M here: the +- module will be called impd1. +- + config INTEGRATOR_CM7TDMI + bool "Integrator/CM7TDMI core module" + depends on ARCH_INTEGRATOR_AP +diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +index bb4a2acb9970..502c4ac45c29 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +@@ -1728,18 +1728,18 @@ + }; + + sram: sram@fffc0000 { +- compatible = "amlogic,meson-axg-sram", "mmio-sram"; ++ compatible = "mmio-sram"; + reg = <0x0 0xfffc0000 0x0 0x20000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0xfffc0000 0x20000>; + +- cpu_scp_lpri: scp-shmem@13000 { ++ cpu_scp_lpri: scp-sram@13000 { + compatible = "amlogic,meson-axg-scp-shmem"; + reg = <0x13000 0x400>; + }; + +- cpu_scp_hpri: scp-shmem@13400 { ++ cpu_scp_hpri: scp-sram@13400 { + compatible = "amlogic,meson-axg-scp-shmem"; + reg = <0x13400 0x400>; + }; +diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +index 6733050d735f..ce230d6ac35c 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +@@ -345,20 +345,20 @@ + }; + + sram: sram@c8000000 { +- compatible = "amlogic,meson-gx-sram", "amlogic,meson-gxbb-sram", "mmio-sram"; ++ compatible = "mmio-sram"; + reg = <0x0 0xc8000000 0x0 0x14000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0xc8000000 0x14000>; + +- cpu_scp_lpri: scp-shmem@0 { +- compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem"; ++ cpu_scp_lpri: scp-sram@0 { ++ compatible = "amlogic,meson-gxbb-scp-shmem"; + reg = <0x13000 0x400>; + }; + +- cpu_scp_hpri: scp-shmem@200 { +- compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem"; ++ cpu_scp_hpri: scp-sram@200 { ++ compatible = "amlogic,meson-gxbb-scp-shmem"; + reg = <0x13400 0x400>; + }; + }; +diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi +index 15fe81738e94..dfb23dfc0b0f 100644 +--- a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi ++++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi +@@ -8,7 +8,7 @@ + gic: interrupt-controller@2c001000 { + compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic"; + #interrupt-cells = <3>; +- #address-cells = <2>; ++ #address-cells = <1>; + interrupt-controller; + reg = <0x0 0x2c001000 0 0x1000>, + <0x0 0x2c002000 0 0x2000>, +diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi +index f2c75c756039..906f51935b36 100644 +--- a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi ++++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi +@@ -8,9 +8,9 @@ + gic: interrupt-controller@2f000000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; +- #address-cells = <2>; +- #size-cells = <2>; +- ranges; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x2f000000 0x100000>; + interrupt-controller; + reg = <0x0 0x2f000000 0x0 0x10000>, + <0x0 0x2f100000 0x0 0x200000>, +@@ -22,7 +22,7 @@ + its: its@2f020000 { + compatible = "arm,gic-v3-its"; + msi-controller; +- reg = <0x0 0x2f020000 0x0 0x20000>; ++ reg = <0x20000 0x20000>; + }; + }; + }; +diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi +index 3f78373f708a..05d1657170b4 100644 +--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi ++++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi +@@ -107,51 +107,51 @@ + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 63>; +- interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 1 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 2 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 3 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 4 &gic 0 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 5 &gic 0 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 6 &gic 0 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 7 &gic 0 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 8 &gic 0 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 9 &gic 0 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 10 &gic 0 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 11 &gic 0 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 12 &gic 0 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 13 &gic 0 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 14 &gic 0 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 15 &gic 0 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 16 &gic 0 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 17 &gic 0 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 18 &gic 0 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 19 &gic 0 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 20 &gic 0 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 21 &gic 0 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 22 &gic 0 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 23 &gic 0 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 24 &gic 0 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 25 &gic 0 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 26 &gic 0 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 27 &gic 0 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 28 &gic 0 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 29 &gic 0 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 30 &gic 0 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 31 &gic 0 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 32 &gic 0 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 33 &gic 0 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 34 &gic 0 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 35 &gic 0 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 36 &gic 0 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 37 &gic 0 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 38 &gic 0 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 39 &gic 0 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 40 &gic 0 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 41 &gic 0 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 42 &gic 0 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; +- +- ethernet@2,02000000 { ++ interrupt-map = <0 0 0 &gic 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 1 &gic 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 2 &gic 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 3 &gic 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 4 &gic 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 5 &gic 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 6 &gic 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 7 &gic 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 8 &gic 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 9 &gic 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 10 &gic 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 11 &gic 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 12 &gic 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 13 &gic 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 14 &gic 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 15 &gic 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 16 &gic 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 17 &gic 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 18 &gic 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 19 &gic 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 20 &gic 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 21 &gic 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 22 &gic 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 23 &gic 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 24 &gic 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 25 &gic 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 26 &gic 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 27 &gic 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 28 &gic 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 29 &gic 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 30 &gic 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 31 &gic 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 32 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 33 &gic 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 34 &gic 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 35 &gic 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 36 &gic 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 37 &gic 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 38 &gic 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 39 &gic 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 40 &gic 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 41 &gic 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 42 &gic 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; ++ ++ ethernet@202000000 { + compatible = "smsc,lan91c111"; + reg = <2 0x02000000 0x10000>; + interrupts = <15>; +@@ -178,7 +178,7 @@ + clock-output-names = "v2m:refclk32khz"; + }; + +- iofpga@3,00000000 { ++ iofpga@300000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; +diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi +index 8c11660bbe40..c47f76b01c4b 100644 +--- a/arch/arm64/boot/dts/arm/juno-base.dtsi ++++ b/arch/arm64/boot/dts/arm/juno-base.dtsi +@@ -62,35 +62,35 @@ + <0x0 0x2c02f000 0 0x2000>, + <0x0 0x2c04f000 0 0x2000>, + <0x0 0x2c06f000 0 0x2000>; +- #address-cells = <2>; ++ #address-cells = <1>; + #interrupt-cells = <3>; +- #size-cells = <2>; ++ #size-cells = <1>; + interrupt-controller; + interrupts = ; +- ranges = <0 0 0 0x2c1c0000 0 0x40000>; ++ ranges = <0 0 0x2c1c0000 0x40000>; + + v2m_0: v2m@0 { + compatible = "arm,gic-v2m-frame"; + msi-controller; +- reg = <0 0 0 0x10000>; ++ reg = <0 0x10000>; + }; + + v2m@10000 { + compatible = "arm,gic-v2m-frame"; + msi-controller; +- reg = <0 0x10000 0 0x10000>; ++ reg = <0x10000 0x10000>; + }; + + v2m@20000 { + compatible = "arm,gic-v2m-frame"; + msi-controller; +- reg = <0 0x20000 0 0x10000>; ++ reg = <0x20000 0x10000>; + }; + + v2m@30000 { + compatible = "arm,gic-v2m-frame"; + msi-controller; +- reg = <0 0x30000 0 0x10000>; ++ reg = <0x30000 0x10000>; + }; + }; + +@@ -519,10 +519,10 @@ + <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 7>; +- interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 0 2 &gic 0 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 0 3 &gic 0 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 0 4 &gic 0 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-map = <0 0 0 1 &gic 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 0 2 &gic 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 0 3 &gic 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 0 4 &gic 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + msi-parent = <&v2m_0>; + status = "disabled"; + iommu-map-mask = <0x0>; /* RC has no means to output PCI RID */ +@@ -786,19 +786,19 @@ + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 15>; +- interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 1 &gic 0 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 2 &gic 0 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 3 &gic 0 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 4 &gic 0 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 5 &gic 0 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 6 &gic 0 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 7 &gic 0 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 8 &gic 0 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 9 &gic 0 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 10 &gic 0 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 11 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, +- <0 0 12 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-map = <0 0 0 &gic 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 1 &gic 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 2 &gic 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 3 &gic 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 4 &gic 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 5 &gic 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 6 &gic 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 7 &gic 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 8 &gic 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 9 &gic 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 10 &gic 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 11 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, ++ <0 0 12 &gic 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>; + }; + + site2: tlx@60000000 { +@@ -808,6 +808,6 @@ + ranges = <0 0 0x60000000 0x10000000>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0>; +- interrupt-map = <0 0 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-map = <0 0 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; + }; + }; +diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi +index 9f60dacb4f80..1234a8cfc0a9 100644 +--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi ++++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi +@@ -103,7 +103,7 @@ + }; + }; + +- flash@0,00000000 { ++ flash@0 { + /* 2 * 32MiB NOR Flash memory mounted on CS0 */ + compatible = "arm,vexpress-flash", "cfi-flash"; + reg = <0 0x00000000 0x04000000>; +@@ -120,7 +120,7 @@ + }; + }; + +- ethernet@2,00000000 { ++ ethernet@200000000 { + compatible = "smsc,lan9118", "smsc,lan9115"; + reg = <2 0x00000000 0x10000>; + interrupts = <3>; +@@ -133,7 +133,7 @@ + vddvario-supply = <&mb_fixed_3v3>; + }; + +- iofpga@3,00000000 { ++ iofpga@300000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; +diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi +index 57b0b9d7f3fa..29e6962c70bd 100644 +--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi ++++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi +@@ -9,7 +9,7 @@ + motherboard { + arm,v2m-memory-map = "rs2"; + +- iofpga@3,00000000 { ++ iofpga@300000000 { + virtio-p9@140000 { + compatible = "virtio,mmio"; + reg = <0x140000 0x200>; +diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi +index 03a7bf079c8f..ad20076357f5 100644 +--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi ++++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi +@@ -17,14 +17,14 @@ + #interrupt-cells = <1>; + ranges; + +- flash@0,00000000 { ++ flash@0 { + compatible = "arm,vexpress-flash", "cfi-flash"; + reg = <0 0x00000000 0x04000000>, + <4 0x00000000 0x04000000>; + bank-width = <4>; + }; + +- ethernet@2,02000000 { ++ ethernet@202000000 { + compatible = "smsc,lan91c111"; + reg = <2 0x02000000 0x10000>; + interrupts = <15>; +@@ -51,7 +51,7 @@ + clock-output-names = "v2m:refclk32khz"; + }; + +- iofpga@3,00000000 { ++ iofpga@300000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +index 5f350cc71a2f..c3668187b844 100644 +--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts ++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +@@ -95,7 +95,7 @@ + }; + + sfp: sfp { +- compatible = "sff,sfp+"; ++ compatible = "sff,sfp"; + i2c-bus = <&i2c0>; + los-gpio = <&moxtet_sfp 0 GPIO_ACTIVE_HIGH>; + tx-fault-gpio = <&moxtet_sfp 1 GPIO_ACTIVE_HIGH>; +@@ -171,6 +171,8 @@ + marvell,pad-type = "sd"; + vqmmc-supply = <&vsdio_reg>; + mmc-pwrseq = <&sdhci1_pwrseq>; ++ /* forbid SDR104 for FCC purposes */ ++ sdhci-caps-mask = <0x2 0x0>; + status = "okay"; + }; + +diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi +index 5891b7151432..dec5e4113ce4 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi +@@ -238,21 +238,21 @@ + cpu_on = <0x84000003>; + }; + +- clk26m: oscillator@0 { ++ clk26m: oscillator0 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <26000000>; + clock-output-names = "clk26m"; + }; + +- clk32k: oscillator@1 { ++ clk32k: oscillator1 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32000>; + clock-output-names = "clk32k"; + }; + +- cpum_ck: oscillator@2 { ++ cpum_ck: oscillator2 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; +@@ -268,19 +268,19 @@ + sustainable-power = <1500>; /* milliwatts */ + + trips { +- threshold: trip-point@0 { ++ threshold: trip-point0 { + temperature = <68000>; + hysteresis = <2000>; + type = "passive"; + }; + +- target: trip-point@1 { ++ target: trip-point1 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + +- cpu_crit: cpu_crit@0 { ++ cpu_crit: cpu_crit0 { + temperature = <115000>; + hysteresis = <2000>; + type = "critical"; +@@ -288,13 +288,13 @@ + }; + + cooling-maps { +- map@0 { ++ map0 { + trip = <&target>; + cooling-device = <&cpu0 0 0>, + <&cpu1 0 0>; + contribution = <3072>; + }; +- map@1 { ++ map1 { + trip = <&target>; + cooling-device = <&cpu2 0 0>, + <&cpu3 0 0>; +@@ -308,7 +308,7 @@ + #address-cells = <2>; + #size-cells = <2>; + ranges; +- vpu_dma_reserved: vpu_dma_mem_region { ++ vpu_dma_reserved: vpu_dma_mem_region@b7000000 { + compatible = "shared-dma-pool"; + reg = <0 0xb7000000 0 0x500000>; + alignment = <0x1000>; +@@ -360,7 +360,7 @@ + reg = <0 0x10005000 0 0x1000>; + }; + +- pio: pinctrl@10005000 { ++ pio: pinctrl@1000b000 { + compatible = "mediatek,mt8173-pinctrl"; + reg = <0 0x1000b000 0 0x1000>; + mediatek,pctl-regmap = <&syscfg_pctl_a>; +@@ -567,7 +567,7 @@ + status = "disabled"; + }; + +- gic: interrupt-controller@10220000 { ++ gic: interrupt-controller@10221000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + interrupt-parent = <&gic>; +diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi +index 02909a48dfcd..7899759a12f8 100644 +--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi ++++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi +@@ -32,7 +32,7 @@ + + phy-reset-gpios = <&gpio TEGRA194_MAIN_GPIO(G, 5) GPIO_ACTIVE_LOW>; + phy-handle = <&phy>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + + mdio { + #address-cells = <1>; +diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi +index 457b815d57f4..2f3926719434 100644 +--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi ++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi +@@ -1192,7 +1192,7 @@ + + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x30100000 0x0 0x30100000 0x0 0x00100000 /* downstream I/O (1MB) */ +- 0xc2000000 0x12 0x00000000 0x12 0x00000000 0x0 0x30000000 /* prefetchable memory (768MB) */ ++ 0xc3000000 0x12 0x00000000 0x12 0x00000000 0x0 0x30000000 /* prefetchable memory (768MB) */ + 0x82000000 0x0 0x40000000 0x12 0x30000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */ + }; + +@@ -1238,7 +1238,7 @@ + + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x32100000 0x0 0x32100000 0x0 0x00100000 /* downstream I/O (1MB) */ +- 0xc2000000 0x12 0x40000000 0x12 0x40000000 0x0 0x30000000 /* prefetchable memory (768MB) */ ++ 0xc3000000 0x12 0x40000000 0x12 0x40000000 0x0 0x30000000 /* prefetchable memory (768MB) */ + 0x82000000 0x0 0x40000000 0x12 0x70000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */ + }; + +@@ -1284,7 +1284,7 @@ + + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x34100000 0x0 0x34100000 0x0 0x00100000 /* downstream I/O (1MB) */ +- 0xc2000000 0x12 0x80000000 0x12 0x80000000 0x0 0x30000000 /* prefetchable memory (768MB) */ ++ 0xc3000000 0x12 0x80000000 0x12 0x80000000 0x0 0x30000000 /* prefetchable memory (768MB) */ + 0x82000000 0x0 0x40000000 0x12 0xb0000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */ + }; + +@@ -1330,7 +1330,7 @@ + + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x36100000 0x0 0x36100000 0x0 0x00100000 /* downstream I/O (1MB) */ +- 0xc2000000 0x14 0x00000000 0x14 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ ++ 0xc3000000 0x14 0x00000000 0x14 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ + 0x82000000 0x0 0x40000000 0x17 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ + }; + +@@ -1376,7 +1376,7 @@ + + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x38100000 0x0 0x38100000 0x0 0x00100000 /* downstream I/O (1MB) */ +- 0xc2000000 0x18 0x00000000 0x18 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ ++ 0xc3000000 0x18 0x00000000 0x18 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ + 0x82000000 0x0 0x40000000 0x1b 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ + }; + +@@ -1426,7 +1426,7 @@ + + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x3a100000 0x0 0x3a100000 0x0 0x00100000 /* downstream I/O (1MB) */ +- 0xc2000000 0x1c 0x00000000 0x1c 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ ++ 0xc3000000 0x1c 0x00000000 0x1c 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ + 0x82000000 0x0 0x40000000 0x1f 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ + }; + +diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi +index 5ea9fb8f2f87..340da154d4e3 100644 +--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi +@@ -212,7 +212,7 @@ + thermal-sensors = <&tsens 3>; + + trips { +- cpu2_3_alert0: trip-point@0 { ++ cpu2_3_alert0: trip-point0 { + temperature = <75000>; + hysteresis = <2000>; + type = "passive"; +@@ -242,7 +242,7 @@ + thermal-sensors = <&tsens 2>; + + trips { +- gpu_alert0: trip-point@0 { ++ gpu_alert0: trip-point0 { + temperature = <75000>; + hysteresis = <2000>; + type = "passive"; +@@ -262,7 +262,7 @@ + thermal-sensors = <&tsens 1>; + + trips { +- cam_alert0: trip-point@0 { ++ cam_alert0: trip-point0 { + temperature = <75000>; + hysteresis = <2000>; + type = "hot"; +@@ -277,7 +277,7 @@ + thermal-sensors = <&tsens 0>; + + trips { +- modem_alert0: trip-point@0 { ++ modem_alert0: trip-point0 { + temperature = <85000>; + hysteresis = <2000>; + type = "hot"; +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi +index fbb8ce78f95b..d303df3887d9 100644 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi +@@ -1681,16 +1681,16 @@ + "csi_clk_mux", + "vfe0", + "vfe1"; +- interrupts = , +- , +- , +- , +- , +- , +- , +- , +- , +- ; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; + interrupt-names = "csiphy0", + "csiphy1", + "csiphy2", +diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi +index b6e304748a57..c0b197458665 100644 +--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi ++++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi +@@ -73,18 +73,8 @@ + reg = <0xc000>; + gpio-controller; + #gpio-cells = <2>; +- interrupts = <0x0 0xc0 0x0 IRQ_TYPE_NONE>, +- <0x0 0xc1 0x0 IRQ_TYPE_NONE>, +- <0x0 0xc2 0x0 IRQ_TYPE_NONE>, +- <0x0 0xc3 0x0 IRQ_TYPE_NONE>, +- <0x0 0xc4 0x0 IRQ_TYPE_NONE>, +- <0x0 0xc5 0x0 IRQ_TYPE_NONE>, +- <0x0 0xc6 0x0 IRQ_TYPE_NONE>, +- <0x0 0xc7 0x0 IRQ_TYPE_NONE>, +- <0x0 0xc8 0x0 IRQ_TYPE_NONE>, +- <0x0 0xc9 0x0 IRQ_TYPE_NONE>, +- <0x0 0xca 0x0 IRQ_TYPE_NONE>, +- <0x0 0xcb 0x0 IRQ_TYPE_NONE>; ++ interrupt-controller; ++ #interrupt-cells = <2>; + }; + }; + +diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi +index 322379d5c31f..40b5d75a4a1d 100644 +--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi ++++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi +@@ -62,18 +62,8 @@ + reg = <0xc000>; + gpio-controller; + #gpio-cells = <2>; +- interrupts = <0x2 0xc0 0x0 IRQ_TYPE_NONE>, +- <0x2 0xc1 0x0 IRQ_TYPE_NONE>, +- <0x2 0xc2 0x0 IRQ_TYPE_NONE>, +- <0x2 0xc3 0x0 IRQ_TYPE_NONE>, +- <0x2 0xc4 0x0 IRQ_TYPE_NONE>, +- <0x2 0xc5 0x0 IRQ_TYPE_NONE>, +- <0x2 0xc6 0x0 IRQ_TYPE_NONE>, +- <0x2 0xc7 0x0 IRQ_TYPE_NONE>, +- <0x2 0xc8 0x0 IRQ_TYPE_NONE>, +- <0x2 0xc9 0x0 IRQ_TYPE_NONE>, +- <0x2 0xca 0x0 IRQ_TYPE_NONE>, +- <0x2 0xcb 0x0 IRQ_TYPE_NONE>; ++ interrupt-controller; ++ #interrupt-cells = <2>; + }; + }; + +diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi +index eb0e9a090e42..cf05e0685d10 100644 +--- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi ++++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi +@@ -56,18 +56,8 @@ + reg = <0xc000>; + gpio-controller; + #gpio-cells = <2>; +- interrupts = <0x4 0xc0 0x0 IRQ_TYPE_NONE>, +- <0x4 0xc1 0x0 IRQ_TYPE_NONE>, +- <0x4 0xc2 0x0 IRQ_TYPE_NONE>, +- <0x4 0xc3 0x0 IRQ_TYPE_NONE>, +- <0x4 0xc4 0x0 IRQ_TYPE_NONE>, +- <0x4 0xc5 0x0 IRQ_TYPE_NONE>, +- <0x4 0xc6 0x0 IRQ_TYPE_NONE>, +- <0x4 0xc7 0x0 IRQ_TYPE_NONE>, +- <0x4 0xc8 0x0 IRQ_TYPE_NONE>, +- <0x4 0xc9 0x0 IRQ_TYPE_NONE>, +- <0x4 0xca 0x0 IRQ_TYPE_NONE>, +- <0x4 0xcb 0x0 IRQ_TYPE_NONE>; ++ interrupt-controller; ++ #interrupt-cells = <2>; + }; + }; + +diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c +index 38ee1514cd9c..b4a160795824 100644 +--- a/arch/arm64/kernel/hw_breakpoint.c ++++ b/arch/arm64/kernel/hw_breakpoint.c +@@ -730,6 +730,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, + return 0; + } + ++static int watchpoint_report(struct perf_event *wp, unsigned long addr, ++ struct pt_regs *regs) ++{ ++ int step = is_default_overflow_handler(wp); ++ struct arch_hw_breakpoint *info = counter_arch_bp(wp); ++ ++ info->trigger = addr; ++ ++ /* ++ * If we triggered a user watchpoint from a uaccess routine, then ++ * handle the stepping ourselves since userspace really can't help ++ * us with this. ++ */ ++ if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0) ++ step = 1; ++ else ++ perf_bp_event(wp, regs); ++ ++ return step; ++} ++ + static int watchpoint_handler(unsigned long addr, unsigned int esr, + struct pt_regs *regs) + { +@@ -739,7 +760,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, + u64 val; + struct perf_event *wp, **slots; + struct debug_info *debug_info; +- struct arch_hw_breakpoint *info; + struct arch_hw_breakpoint_ctrl ctrl; + + slots = this_cpu_ptr(wp_on_reg); +@@ -777,25 +797,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, + if (dist != 0) + continue; + +- info = counter_arch_bp(wp); +- info->trigger = addr; +- perf_bp_event(wp, regs); +- +- /* Do we need to handle the stepping? */ +- if (is_default_overflow_handler(wp)) +- step = 1; ++ step = watchpoint_report(wp, addr, regs); + } +- if (min_dist > 0 && min_dist != -1) { +- /* No exact match found. */ +- wp = slots[closest_match]; +- info = counter_arch_bp(wp); +- info->trigger = addr; +- perf_bp_event(wp, regs); + +- /* Do we need to handle the stepping? */ +- if (is_default_overflow_handler(wp)) +- step = 1; +- } ++ /* No exact match found? */ ++ if (min_dist > 0 && min_dist != -1) ++ step = watchpoint_report(slots[closest_match], addr, regs); ++ + rcu_read_unlock(); + + if (!step) +diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c +index 62b0eb6cf69a..84eab0f5e00a 100644 +--- a/arch/m68k/coldfire/pci.c ++++ b/arch/m68k/coldfire/pci.c +@@ -216,8 +216,10 @@ static int __init mcf_pci_init(void) + + /* Keep a virtual mapping to IO/config space active */ + iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE); +- if (iospace == 0) ++ if (iospace == 0) { ++ pci_free_host_bridge(bridge); + return -ENODEV; ++ } + pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n", + (u32) iospace); + +diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S +index e4a78571f883..c6481cfc5220 100644 +--- a/arch/openrisc/kernel/entry.S ++++ b/arch/openrisc/kernel/entry.S +@@ -1166,13 +1166,13 @@ ENTRY(__sys_clone) + l.movhi r29,hi(sys_clone) + l.ori r29,r29,lo(sys_clone) + l.j _fork_save_extra_regs_and_call +- l.addi r7,r1,0 ++ l.nop + + ENTRY(__sys_fork) + l.movhi r29,hi(sys_fork) + l.ori r29,r29,lo(sys_fork) + l.j _fork_save_extra_regs_and_call +- l.addi r3,r1,0 ++ l.nop + + ENTRY(sys_rt_sigreturn) + l.jal _sys_rt_sigreturn +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index 44431dc06982..ad620637cbd1 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -747,6 +747,7 @@ config THREAD_SHIFT + range 13 15 + default "15" if PPC_256K_PAGES + default "14" if PPC64 ++ default "14" if KASAN + default "13" + help + Used to define the stack size. The default is almost always what you +diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h +index a143d394ff46..e1eb8aa9cfbb 100644 +--- a/arch/powerpc/include/asm/book3s/64/pgtable.h ++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h +@@ -998,10 +998,25 @@ extern struct page *pgd_page(pgd_t pgd); + #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) + #define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) + +-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) +-#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) +-#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) +-#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) ++static inline unsigned long pgd_index(unsigned long address) ++{ ++ return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); ++} ++ ++static inline unsigned long pud_index(unsigned long address) ++{ ++ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); ++} ++ ++static inline unsigned long pmd_index(unsigned long address) ++{ ++ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); ++} ++ ++static inline unsigned long pte_index(unsigned long address) ++{ ++ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); ++} + + /* + * Find an entry in a page-table-directory. We combine the address region +diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h +index a9993e7a443b..64b998db9d3e 100644 +--- a/arch/powerpc/include/asm/processor.h ++++ b/arch/powerpc/include/asm/processor.h +@@ -291,7 +291,6 @@ struct thread_struct { + #else + #define INIT_THREAD { \ + .ksp = INIT_SP, \ +- .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ + .addr_limit = KERNEL_DS, \ + .fpexc_mode = 0, \ + .fscr = FSCR_TAR | FSCR_EBB \ +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S +index d0018dd17e0a..70ac8a6ba0c1 100644 +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -1090,17 +1090,19 @@ EXC_COMMON_BEGIN(machine_check_idle_common) + bl machine_check_queue_event + + /* +- * We have not used any non-volatile GPRs here, and as a rule +- * most exception code including machine check does not. +- * Therefore PACA_NAPSTATELOST does not need to be set. Idle +- * wakeup will restore volatile registers. ++ * GPR-loss wakeups are relatively straightforward, because the ++ * idle sleep code has saved all non-volatile registers on its ++ * own stack, and r1 in PACAR1. + * +- * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce. ++ * For no-loss wakeups the r1 and lr registers used by the ++ * early machine check handler have to be restored first. r2 is ++ * the kernel TOC, so no need to restore it. + * + * Then decrement MCE nesting after finishing with the stack. + */ + ld r3,_MSR(r1) + ld r4,_LINK(r1) ++ ld r1,GPR1(r1) + + lhz r11,PACA_IN_MCE(r13) + subi r11,r11,1 +@@ -1109,7 +1111,7 @@ EXC_COMMON_BEGIN(machine_check_idle_common) + mtlr r4 + rlwinm r10,r3,47-31,30,31 + cmpwi cr1,r10,2 +- bltlr cr1 /* no state loss, return to idle caller */ ++ bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ + b idle_return_gpr_loss + #endif + +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S +index ad79fddb974d..780f527eabd2 100644 +--- a/arch/powerpc/kernel/head_64.S ++++ b/arch/powerpc/kernel/head_64.S +@@ -945,15 +945,8 @@ start_here_multiplatform: + std r0,0(r4) + #endif + +- /* The following gets the stack set up with the regs */ +- /* pointing to the real addr of the kernel stack. This is */ +- /* all done to support the C function call below which sets */ +- /* up the htab. This is done because we have relocated the */ +- /* kernel but are still running in real mode. */ +- +- LOAD_REG_ADDR(r3,init_thread_union) +- + /* set up a stack pointer */ ++ LOAD_REG_ADDR(r3,init_thread_union) + LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) + add r1,r3,r1 + li r0,0 +diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c +index c4ed328a7b96..7a1c11a7cba5 100644 +--- a/arch/powerpc/kernel/machine_kexec.c ++++ b/arch/powerpc/kernel/machine_kexec.c +@@ -114,11 +114,12 @@ void machine_kexec(struct kimage *image) + + void __init reserve_crashkernel(void) + { +- unsigned long long crash_size, crash_base; ++ unsigned long long crash_size, crash_base, total_mem_sz; + int ret; + ++ total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size(); + /* use common parsing */ +- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), ++ ret = parse_crashkernel(boot_command_line, total_mem_sz, + &crash_size, &crash_base); + if (ret == 0 && crash_size > 0) { + crashk_res.start = crash_base; +@@ -177,6 +178,7 @@ void __init reserve_crashkernel(void) + /* Crash kernel trumps memory limit */ + if (memory_limit && memory_limit <= crashk_res.end) { + memory_limit = crashk_res.end + 1; ++ total_mem_sz = memory_limit; + printk("Adjusted memory limit for crashkernel, now 0x%llx\n", + memory_limit); + } +@@ -185,7 +187,7 @@ void __init reserve_crashkernel(void) + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crashk_res.start >> 20), +- (unsigned long)(memblock_phys_mem_size() >> 20)); ++ (unsigned long)(total_mem_sz >> 20)); + + if (!memblock_is_region_memory(crashk_res.start, crash_size) || + memblock_reserve(crashk_res.start, crash_size)) { +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c +index 639ceae7da9d..bd0c258a1d5d 100644 +--- a/arch/powerpc/kernel/process.c ++++ b/arch/powerpc/kernel/process.c +@@ -1218,29 +1218,31 @@ struct task_struct *__switch_to(struct task_struct *prev, + static void show_instructions(struct pt_regs *regs) + { + int i; ++ unsigned long nip = regs->nip; + unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int)); + + printk("Instruction dump:"); + ++ /* ++ * If we were executing with the MMU off for instructions, adjust pc ++ * rather than printing XXXXXXXX. ++ */ ++ if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) { ++ pc = (unsigned long)phys_to_virt(pc); ++ nip = (unsigned long)phys_to_virt(regs->nip); ++ } ++ + for (i = 0; i < NR_INSN_TO_PRINT; i++) { + int instr; + + if (!(i % 8)) + pr_cont("\n"); + +-#if !defined(CONFIG_BOOKE) +- /* If executing with the IMMU off, adjust pc rather +- * than print XXXXXXXX. +- */ +- if (!(regs->msr & MSR_IR)) +- pc = (unsigned long)phys_to_virt(pc); +-#endif +- + if (!__kernel_text_address(pc) || + probe_kernel_address((const void *)pc, instr)) { + pr_cont("XXXXXXXX "); + } else { +- if (regs->nip == pc) ++ if (nip == pc) + pr_cont("<%08x> ", instr); + else + pr_cont("%08x ", instr); +diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c +index 2d415c36a61d..43b56f8f6beb 100644 +--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c ++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c +@@ -353,7 +353,13 @@ static struct kmem_cache *kvm_pmd_cache; + + static pte_t *kvmppc_pte_alloc(void) + { +- return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); ++ pte_t *pte; ++ ++ pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); ++ /* pmd_populate() will only reference _pa(pte). */ ++ kmemleak_ignore(pte); ++ ++ return pte; + } + + static void kvmppc_pte_free(pte_t *ptep) +@@ -363,7 +369,13 @@ static void kvmppc_pte_free(pte_t *ptep) + + static pmd_t *kvmppc_pmd_alloc(void) + { +- return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); ++ pmd_t *pmd; ++ ++ pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); ++ /* pud_populate() will only reference _pa(pmd). */ ++ kmemleak_ignore(pmd); ++ ++ return pmd; + } + + static void kvmppc_pmd_free(pmd_t *pmdp) +diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c +index 5834db0a54c6..03b947429e4d 100644 +--- a/arch/powerpc/kvm/book3s_64_vio.c ++++ b/arch/powerpc/kvm/book3s_64_vio.c +@@ -74,6 +74,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, + struct kvmppc_spapr_tce_iommu_table *stit, *tmp; + struct iommu_table_group *table_group = NULL; + ++ rcu_read_lock(); + list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { + + table_group = iommu_group_get_iommudata(grp); +@@ -88,7 +89,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, + kref_put(&stit->kref, kvm_spapr_tce_liobn_put); + } + } ++ cond_resched_rcu(); + } ++ rcu_read_unlock(); + } + + extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, +@@ -106,12 +109,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, + if (!f.file) + return -EBADF; + ++ rcu_read_lock(); + list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { + if (stt == f.file->private_data) { + found = true; + break; + } + } ++ rcu_read_unlock(); + + fdput(f); + +@@ -144,6 +149,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, + if (!tbl) + return -EINVAL; + ++ rcu_read_lock(); + list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { + if (tbl != stit->tbl) + continue; +@@ -151,14 +157,17 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, + if (!kref_get_unless_zero(&stit->kref)) { + /* stit is being destroyed */ + iommu_tce_table_put(tbl); ++ rcu_read_unlock(); + return -ENOTTY; + } + /* + * The table is already known to this KVM, we just increased + * its KVM reference counter and can return. + */ ++ rcu_read_unlock(); + return 0; + } ++ rcu_read_unlock(); + + stit = kzalloc(sizeof(*stit), GFP_KERNEL); + if (!stit) { +@@ -364,18 +373,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, + if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) + return H_TOO_HARD; + ++ rcu_read_lock(); + list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { + unsigned long hpa = 0; + struct mm_iommu_table_group_mem_t *mem; + long shift = stit->tbl->it_page_shift; + + mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); +- if (!mem) +- return H_TOO_HARD; +- +- if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) ++ if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) { ++ rcu_read_unlock(); + return H_TOO_HARD; ++ } + } ++ rcu_read_unlock(); + + return H_SUCCESS; + } +diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c +index 84d5fab94f8f..1424a120710e 100644 +--- a/arch/powerpc/mm/book3s32/mmu.c ++++ b/arch/powerpc/mm/book3s32/mmu.c +@@ -187,6 +187,7 @@ void mmu_mark_initmem_nx(void) + int i; + unsigned long base = (unsigned long)_stext - PAGE_OFFSET; + unsigned long top = (unsigned long)_etext - PAGE_OFFSET; ++ unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; + unsigned long size; + + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) +@@ -201,9 +202,10 @@ void mmu_mark_initmem_nx(void) + size = block_size(base, top); + size = max(size, 128UL << 10); + if ((top - base) > size) { +- if (strict_kernel_rwx_enabled()) +- pr_warn("Kernel _etext not properly aligned\n"); + size <<= 1; ++ if (strict_kernel_rwx_enabled() && base + size > border) ++ pr_warn("Some RW data is getting mapped X. " ++ "Adjust CONFIG_DATA_SHIFT to avoid that.\n"); + } + setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); + base += size; +diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c +index f7ed2f187cb0..784f8df17f73 100644 +--- a/arch/powerpc/mm/ptdump/shared.c ++++ b/arch/powerpc/mm/ptdump/shared.c +@@ -30,6 +30,11 @@ static const struct flag_info flag_array[] = { + .val = _PAGE_PRESENT, + .set = "present", + .clear = " ", ++ }, { ++ .mask = _PAGE_COHERENT, ++ .val = _PAGE_COHERENT, ++ .set = "coherent", ++ .clear = " ", + }, { + .mask = _PAGE_GUARDED, + .val = _PAGE_GUARDED, +diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c +index 573e0b309c0c..48e8f4b17b91 100644 +--- a/arch/powerpc/perf/hv-24x7.c ++++ b/arch/powerpc/perf/hv-24x7.c +@@ -1400,16 +1400,6 @@ static void h_24x7_event_read(struct perf_event *event) + h24x7hw = &get_cpu_var(hv_24x7_hw); + h24x7hw->events[i] = event; + put_cpu_var(h24x7hw); +- /* +- * Clear the event count so we can compute the _change_ +- * in the 24x7 raw counter value at the end of the txn. +- * +- * Note that we could alternatively read the 24x7 value +- * now and save its value in event->hw.prev_count. But +- * that would require issuing a hcall, which would then +- * defeat the purpose of using the txn interface. +- */ +- local64_set(&event->count, 0); + } + + put_cpu_var(hv_24x7_reqb); +diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c +index e6e2adcc7b64..c13d64c3b019 100644 +--- a/arch/powerpc/platforms/4xx/pci.c ++++ b/arch/powerpc/platforms/4xx/pci.c +@@ -1242,7 +1242,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) + if (mbase == NULL) { + printk(KERN_ERR "%pOF: Can't map internal config space !", + port->node); +- goto done; ++ return; + } + + while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA) +@@ -1252,9 +1252,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) + } + if (attempt) + port->link = 1; +-done: + iounmap(mbase); +- + } + + static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { +diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c +index 423be34f0f5f..f42fe4e86ce5 100644 +--- a/arch/powerpc/platforms/ps3/mm.c ++++ b/arch/powerpc/platforms/ps3/mm.c +@@ -200,13 +200,14 @@ void ps3_mm_vas_destroy(void) + { + int result; + +- DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); +- + if (map.vas_id) { + result = lv1_select_virtual_address_space(0); +- BUG_ON(result); +- result = lv1_destruct_virtual_address_space(map.vas_id); +- BUG_ON(result); ++ result += lv1_destruct_virtual_address_space(map.vas_id); ++ ++ if (result) { ++ lv1_panic(0); ++ } ++ + map.vas_id = 0; + } + } +@@ -304,19 +305,20 @@ static void ps3_mm_region_destroy(struct mem_region *r) + int result; + + if (!r->destroy) { +- pr_info("%s:%d: Not destroying high region: %llxh %llxh\n", +- __func__, __LINE__, r->base, r->size); + return; + } + +- DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); +- + if (r->base) { + result = lv1_release_memory(r->base); +- BUG_ON(result); ++ ++ if (result) { ++ lv1_panic(0); ++ } ++ + r->size = r->base = r->offset = 0; + map.total = map.rm.size; + } ++ + ps3_mm_set_repository_highmem(NULL); + } + +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c +index 753adeb624f2..13ef77fd648f 100644 +--- a/arch/powerpc/platforms/pseries/ras.c ++++ b/arch/powerpc/platforms/pseries/ras.c +@@ -395,10 +395,11 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) + /* + * Some versions of FWNMI place the buffer inside the 4kB page starting at + * 0x7000. Other versions place it inside the rtas buffer. We check both. ++ * Minimum size of the buffer is 16 bytes. + */ + #define VALID_FWNMI_BUFFER(A) \ +- ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \ +- (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16)))) ++ ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \ ++ (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16)))) + + static inline struct rtas_error_log *fwnmi_get_errlog(void) + { +diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h +index f073292e9fdb..d9d5de0f67ff 100644 +--- a/arch/s390/include/asm/syscall.h ++++ b/arch/s390/include/asm/syscall.h +@@ -33,7 +33,17 @@ static inline void syscall_rollback(struct task_struct *task, + static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) + { +- return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0; ++ unsigned long error = regs->gprs[2]; ++#ifdef CONFIG_COMPAT ++ if (test_tsk_thread_flag(task, TIF_31BIT)) { ++ /* ++ * Sign-extend the value so (int)-EFOO becomes (long)-EFOO ++ * and will match correctly in comparisons. ++ */ ++ error = (long)(int)error; ++ } ++#endif ++ return IS_ERR_VALUE(error) ? error : 0; + } + + static inline long syscall_get_return_value(struct task_struct *task, +diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c +index cc3ad64479ac..9e256d4d1f4c 100644 +--- a/arch/sparc/mm/srmmu.c ++++ b/arch/sparc/mm/srmmu.c +@@ -379,7 +379,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm) + return NULL; + page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); + if (!pgtable_pte_page_ctor(page)) { +- __free_page(page); + return NULL; + } + return page; +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile +index e2839b5c246c..6539c50fb9aa 100644 +--- a/arch/x86/boot/Makefile ++++ b/arch/x86/boot/Makefile +@@ -87,7 +87,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE + + SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) + +-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' ++sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' + + quiet_cmd_zoffset = ZOFFSET $@ + cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 25b8c45467fc..fce94c799f01 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -2099,7 +2099,7 @@ void __init init_apic_mappings(void) + unsigned int new_apicid; + + if (apic_validate_deadline_timer()) +- pr_debug("TSC deadline timer available\n"); ++ pr_info("TSC deadline timer available\n"); + + if (x2apic_mode) { + boot_cpu_physical_apicid = read_apic_id(); +diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c +index 87ef69a72c52..7bb4c3cbf4dc 100644 +--- a/arch/x86/kernel/idt.c ++++ b/arch/x86/kernel/idt.c +@@ -318,7 +318,11 @@ void __init idt_setup_apic_and_irq_gates(void) + + #ifdef CONFIG_X86_LOCAL_APIC + for_each_clear_bit_from(i, system_vectors, NR_VECTORS) { +- set_bit(i, system_vectors); ++ /* ++ * Don't set the non assigned system vectors in the ++ * system_vectors bitmap. Otherwise they show up in ++ * /proc/interrupts. ++ */ + entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR); + set_intr_gate(i, entry); + } +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index 43fc13c831af..62c39baea39e 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -746,16 +746,11 @@ asm( + NOKPROBE_SYMBOL(kretprobe_trampoline); + STACK_FRAME_NON_STANDARD(kretprobe_trampoline); + +-static struct kprobe kretprobe_kprobe = { +- .addr = (void *)kretprobe_trampoline, +-}; +- + /* + * Called from kretprobe_trampoline + */ + __used __visible void *trampoline_handler(struct pt_regs *regs) + { +- struct kprobe_ctlblk *kcb; + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *tmp; +@@ -765,16 +760,12 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) + void *frame_pointer; + bool skipped = false; + +- preempt_disable(); +- + /* + * Set a dummy kprobe for avoiding kretprobe recursion. + * Since kretprobe never run in kprobe handler, kprobe must not + * be running at this point. + */ +- kcb = get_kprobe_ctlblk(); +- __this_cpu_write(current_kprobe, &kretprobe_kprobe); +- kcb->kprobe_status = KPROBE_HIT_ACTIVE; ++ kprobe_busy_begin(); + + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); +@@ -850,7 +841,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) + __this_cpu_write(current_kprobe, &ri->rp->kp); + ri->ret_addr = correct_ret_addr; + ri->rp->handler(ri, regs); +- __this_cpu_write(current_kprobe, &kretprobe_kprobe); ++ __this_cpu_write(current_kprobe, &kprobe_busy); + } + + recycle_rp_inst(ri, &empty_rp); +@@ -866,8 +857,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) + + kretprobe_hash_unlock(current, &flags); + +- __this_cpu_write(current_kprobe, NULL); +- preempt_enable(); ++ kprobe_busy_end(); + + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); +diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile +index fb4ee5444379..9733d1cc791d 100644 +--- a/arch/x86/purgatory/Makefile ++++ b/arch/x86/purgatory/Makefile +@@ -17,7 +17,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS + LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib + targets += purgatory.ro + ++# Sanitizer, etc. runtimes are unavailable and cannot be linked here. ++GCOV_PROFILE := n + KASAN_SANITIZE := n ++UBSAN_SANITIZE := n + KCOV_INSTRUMENT := n + + # These are adjustments to the compiler flags used for objects that +@@ -25,7 +28,7 @@ KCOV_INSTRUMENT := n + + PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel + PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss +-PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) ++PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING + + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That + # in turn leaves some undefined symbols like __fentry__ in purgatory and not +diff --git a/crypto/algboss.c b/crypto/algboss.c +index a62149d6c839..2d41e67532c0 100644 +--- a/crypto/algboss.c ++++ b/crypto/algboss.c +@@ -188,8 +188,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) + if (IS_ERR(thread)) + goto err_put_larval; + +- wait_for_completion_interruptible(&larval->completion); +- + return NOTIFY_STOP; + + err_put_larval: +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index e2c8ab408bed..4c3bdffe0c3a 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -74,14 +74,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, + return PTR_ERR(areq); + + /* convert iovecs of output buffers into RX SGL */ +- err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); ++ err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len); + if (err) + goto free; + +- /* Process only as much RX buffers for which we have TX data */ +- if (len > ctx->used) +- len = ctx->used; +- + /* + * If more buffers are to be expected to be processed, process only + * full block size buffers. +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 581595b35573..35f75c691d7c 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -41,7 +41,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -6592,7 +6591,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) + /* perform each probe asynchronously */ + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; +- async_schedule(async_port_probe, ap); ++ ap->cookie = async_schedule(async_port_probe, ap); + } + + return 0; +@@ -6732,11 +6731,11 @@ void ata_host_detach(struct ata_host *host) + { + int i; + +- /* Ensure ata_port probe has completed */ +- async_synchronize_full(); +- +- for (i = 0; i < host->n_ports; i++) ++ for (i = 0; i < host->n_ports; i++) { ++ /* Ensure ata_port probe has completed */ ++ async_synchronize_cookie(host->ports[i]->cookie + 1); + ata_port_detach(host->ports[i]); ++ } + + /* the host is dead now, dissociate ACPI */ + ata_acpi_dissociate(host); +diff --git a/drivers/base/platform.c b/drivers/base/platform.c +index 604a461848c9..0b67d41bab8f 100644 +--- a/drivers/base/platform.c ++++ b/drivers/base/platform.c +@@ -802,6 +802,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv, + /* temporary section violation during probe() */ + drv->probe = probe; + retval = code = __platform_driver_register(drv, module); ++ if (retval) ++ return retval; + + /* + * Fixup that section violation, being paranoid about code scanning +diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c +index c5c6487a19d5..7b55811c2a81 100644 +--- a/drivers/block/ps3disk.c ++++ b/drivers/block/ps3disk.c +@@ -454,7 +454,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) + queue->queuedata = dev; + + blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); +- blk_queue_segment_boundary(queue, -1UL); + blk_queue_dma_alignment(queue, dev->blk_size-1); + blk_queue_logical_block_size(queue, dev->blk_size); + +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index 90f5292e2051..ac656a6d5daf 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + + #define IPMI_DRIVER_VERSION "39.2" + +@@ -1170,7 +1171,7 @@ static void free_user_work(struct work_struct *work) + remove_work); + + cleanup_srcu_struct(&user->release_barrier); +- kfree(user); ++ vfree(user); + } + + int ipmi_create_user(unsigned int if_num, +@@ -1202,7 +1203,7 @@ int ipmi_create_user(unsigned int if_num, + if (rv) + return rv; + +- new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); ++ new_user = vzalloc(sizeof(*new_user)); + if (!new_user) + return -ENOMEM; + +@@ -1249,7 +1250,7 @@ int ipmi_create_user(unsigned int if_num, + + out_kfree: + srcu_read_unlock(&ipmi_interfaces_srcu, index); +- kfree(new_user); ++ vfree(new_user); + return rv; + } + EXPORT_SYMBOL(ipmi_create_user); +diff --git a/drivers/char/mem.c b/drivers/char/mem.c +index 43dd0891ca1e..31cae88a730b 100644 +--- a/drivers/char/mem.c ++++ b/drivers/char/mem.c +@@ -31,11 +31,15 @@ + #include + #include + #include ++#include ++#include ++#include + + #ifdef CONFIG_IA64 + # include + #endif + ++#define DEVMEM_MINOR 1 + #define DEVPORT_MINOR 4 + + static inline unsigned long size_inside_page(unsigned long start, +@@ -805,12 +809,64 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) + return ret; + } + ++static struct inode *devmem_inode; ++ ++#ifdef CONFIG_IO_STRICT_DEVMEM ++void revoke_devmem(struct resource *res) ++{ ++ struct inode *inode = READ_ONCE(devmem_inode); ++ ++ /* ++ * Check that the initialization has completed. Losing the race ++ * is ok because it means drivers are claiming resources before ++ * the fs_initcall level of init and prevent /dev/mem from ++ * establishing mappings. ++ */ ++ if (!inode) ++ return; ++ ++ /* ++ * The expectation is that the driver has successfully marked ++ * the resource busy by this point, so devmem_is_allowed() ++ * should start returning false, however for performance this ++ * does not iterate the entire resource range. ++ */ ++ if (devmem_is_allowed(PHYS_PFN(res->start)) && ++ devmem_is_allowed(PHYS_PFN(res->end))) { ++ /* ++ * *cringe* iomem=relaxed says "go ahead, what's the ++ * worst that can happen?" ++ */ ++ return; ++ } ++ ++ unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); ++} ++#endif ++ + static int open_port(struct inode *inode, struct file *filp) + { ++ int rc; ++ + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + +- return security_locked_down(LOCKDOWN_DEV_MEM); ++ rc = security_locked_down(LOCKDOWN_DEV_MEM); ++ if (rc) ++ return rc; ++ ++ if (iminor(inode) != DEVMEM_MINOR) ++ return 0; ++ ++ /* ++ * Use a unified address space to have a single point to manage ++ * revocations when drivers want to take over a /dev/mem mapped ++ * range. ++ */ ++ inode->i_mapping = devmem_inode->i_mapping; ++ filp->f_mapping = inode->i_mapping; ++ ++ return 0; + } + + #define zero_lseek null_lseek +@@ -885,7 +941,7 @@ static const struct memdev { + fmode_t fmode; + } devlist[] = { + #ifdef CONFIG_DEVMEM +- [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, ++ [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, + #endif + #ifdef CONFIG_DEVKMEM + [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, +@@ -939,6 +995,45 @@ static char *mem_devnode(struct device *dev, umode_t *mode) + + static struct class *mem_class; + ++static int devmem_fs_init_fs_context(struct fs_context *fc) ++{ ++ return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; ++} ++ ++static struct file_system_type devmem_fs_type = { ++ .name = "devmem", ++ .owner = THIS_MODULE, ++ .init_fs_context = devmem_fs_init_fs_context, ++ .kill_sb = kill_anon_super, ++}; ++ ++static int devmem_init_inode(void) ++{ ++ static struct vfsmount *devmem_vfs_mount; ++ static int devmem_fs_cnt; ++ struct inode *inode; ++ int rc; ++ ++ rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt); ++ if (rc < 0) { ++ pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc); ++ return rc; ++ } ++ ++ inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb); ++ if (IS_ERR(inode)) { ++ rc = PTR_ERR(inode); ++ pr_err("Cannot allocate inode for /dev/mem: %d\n", rc); ++ simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt); ++ return rc; ++ } ++ ++ /* publish /dev/mem initialized */ ++ WRITE_ONCE(devmem_inode, inode); ++ ++ return 0; ++} ++ + static int __init chr_dev_init(void) + { + int minor; +@@ -960,6 +1055,8 @@ static int __init chr_dev_init(void) + */ + if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) + continue; ++ if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0) ++ continue; + + device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), + NULL, devlist[minor].name); +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c +index 802e488fd3c3..6e5d635f030f 100644 +--- a/drivers/clk/bcm/clk-bcm2835.c ++++ b/drivers/clk/bcm/clk-bcm2835.c +@@ -1448,13 +1448,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, + return &clock->hw; + } + +-static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman, ++static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman, + const struct bcm2835_gate_data *data) + { +- return clk_register_gate(cprman->dev, data->name, data->parent, +- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, +- cprman->regs + data->ctl_reg, +- CM_GATE_BIT, 0, &cprman->regs_lock); ++ return clk_hw_register_gate(cprman->dev, data->name, data->parent, ++ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, ++ cprman->regs + data->ctl_reg, ++ CM_GATE_BIT, 0, &cprman->regs_lock); + } + + typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, +diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c +index b1318e6b655b..675cab6fa781 100644 +--- a/drivers/clk/clk-ast2600.c ++++ b/drivers/clk/clk-ast2600.c +@@ -599,14 +599,22 @@ static const u32 ast2600_a0_axi_ahb_div_table[] = { + 2, 2, 3, 5, + }; + +-static const u32 ast2600_a1_axi_ahb_div_table[] = { +- 4, 6, 2, 4, ++static const u32 ast2600_a1_axi_ahb_div0_tbl[] = { ++ 3, 2, 3, 4, ++}; ++ ++static const u32 ast2600_a1_axi_ahb_div1_tbl[] = { ++ 3, 4, 6, 8, ++}; ++ ++static const u32 ast2600_a1_axi_ahb200_tbl[] = { ++ 3, 4, 3, 4, 2, 2, 2, 2, + }; + + static void __init aspeed_g6_cc(struct regmap *map) + { + struct clk_hw *hw; +- u32 val, div, chip_id, axi_div, ahb_div; ++ u32 val, div, divbits, chip_id, axi_div, ahb_div; + + clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000); + +@@ -636,11 +644,22 @@ static void __init aspeed_g6_cc(struct regmap *map) + else + axi_div = 2; + ++ divbits = (val >> 11) & 0x3; + regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id); +- if (chip_id & BIT(16)) +- ahb_div = ast2600_a1_axi_ahb_div_table[(val >> 11) & 0x3]; +- else ++ if (chip_id & BIT(16)) { ++ if (!divbits) { ++ ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3]; ++ if (val & BIT(16)) ++ ahb_div *= 2; ++ } else { ++ if (val & BIT(16)) ++ ahb_div = ast2600_a1_axi_ahb_div1_tbl[divbits]; ++ else ++ ahb_div = ast2600_a1_axi_ahb_div0_tbl[divbits]; ++ } ++ } else { + ahb_div = ast2600_a0_axi_ahb_div_table[(val >> 11) & 0x3]; ++ } + + hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, axi_div * ahb_div); + aspeed_g6_clk_data->hws[ASPEED_CLK_AHB] = hw; +diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c +index 8856ce476ccf..082178a0f41a 100644 +--- a/drivers/clk/meson/meson8b.c ++++ b/drivers/clk/meson/meson8b.c +@@ -1071,7 +1071,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = { + * Meson8m2: vid2_pll + */ + .parent_hws = (const struct clk_hw *[]) { +- &meson8b_hdmi_pll_dco.hw ++ &meson8b_hdmi_pll_lvds_out.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, +@@ -1207,7 +1207,7 @@ static struct clk_regmap meson8b_vclk_in_en = { + + static struct clk_regmap meson8b_vclk_div1_gate = { + .data = &(struct clk_regmap_gate_data){ +- .offset = HHI_VID_CLK_DIV, ++ .offset = HHI_VID_CLK_CNTL, + .bit_idx = 0, + }, + .hw.init = &(struct clk_init_data){ +@@ -1237,7 +1237,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = { + + static struct clk_regmap meson8b_vclk_div2_div_gate = { + .data = &(struct clk_regmap_gate_data){ +- .offset = HHI_VID_CLK_DIV, ++ .offset = HHI_VID_CLK_CNTL, + .bit_idx = 1, + }, + .hw.init = &(struct clk_init_data){ +@@ -1267,7 +1267,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = { + + static struct clk_regmap meson8b_vclk_div4_div_gate = { + .data = &(struct clk_regmap_gate_data){ +- .offset = HHI_VID_CLK_DIV, ++ .offset = HHI_VID_CLK_CNTL, + .bit_idx = 2, + }, + .hw.init = &(struct clk_init_data){ +@@ -1297,7 +1297,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = { + + static struct clk_regmap meson8b_vclk_div6_div_gate = { + .data = &(struct clk_regmap_gate_data){ +- .offset = HHI_VID_CLK_DIV, ++ .offset = HHI_VID_CLK_CNTL, + .bit_idx = 3, + }, + .hw.init = &(struct clk_init_data){ +@@ -1327,7 +1327,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = { + + static struct clk_regmap meson8b_vclk_div12_div_gate = { + .data = &(struct clk_regmap_gate_data){ +- .offset = HHI_VID_CLK_DIV, ++ .offset = HHI_VID_CLK_CNTL, + .bit_idx = 4, + }, + .hw.init = &(struct clk_init_data){ +@@ -1910,6 +1910,13 @@ static struct clk_regmap meson8b_mali = { + }, + }; + ++static const struct reg_sequence meson8m2_gp_pll_init_regs[] = { ++ { .reg = HHI_GP_PLL_CNTL2, .def = 0x59c88000 }, ++ { .reg = HHI_GP_PLL_CNTL3, .def = 0xca463823 }, ++ { .reg = HHI_GP_PLL_CNTL4, .def = 0x0286a027 }, ++ { .reg = HHI_GP_PLL_CNTL5, .def = 0x00003000 }, ++}; ++ + static const struct pll_params_table meson8m2_gp_pll_params_table[] = { + PLL_PARAMS(182, 3), + { /* sentinel */ }, +@@ -1943,6 +1950,8 @@ static struct clk_regmap meson8m2_gp_pll_dco = { + .width = 1, + }, + .table = meson8m2_gp_pll_params_table, ++ .init_regs = meson8m2_gp_pll_init_regs, ++ .init_count = ARRAY_SIZE(meson8m2_gp_pll_init_regs), + }, + .hw.init = &(struct clk_init_data){ + .name = "gp_pll_dco", +@@ -3491,54 +3500,87 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { + static const struct meson8b_clk_reset_line { + u32 reg; + u8 bit_idx; ++ bool active_low; + } meson8b_clk_reset_bits[] = { + [CLKC_RESET_L2_CACHE_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 30 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 30, ++ .active_low = false, + }, + [CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 29 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 29, ++ .active_low = false, + }, + [CLKC_RESET_SCU_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 28 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 28, ++ .active_low = false, + }, + [CLKC_RESET_CPU3_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 27 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 27, ++ .active_low = false, + }, + [CLKC_RESET_CPU2_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 26 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 26, ++ .active_low = false, + }, + [CLKC_RESET_CPU1_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 25 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 25, ++ .active_low = false, + }, + [CLKC_RESET_CPU0_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 24 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 24, ++ .active_low = false, + }, + [CLKC_RESET_A5_GLOBAL_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 18 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 18, ++ .active_low = false, + }, + [CLKC_RESET_A5_AXI_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 17 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 17, ++ .active_low = false, + }, + [CLKC_RESET_A5_ABP_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 16 ++ .reg = HHI_SYS_CPU_CLK_CNTL0, ++ .bit_idx = 16, ++ .active_low = false, + }, + [CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET] = { +- .reg = HHI_SYS_CPU_CLK_CNTL1, .bit_idx = 30 ++ .reg = HHI_SYS_CPU_CLK_CNTL1, ++ .bit_idx = 30, ++ .active_low = false, + }, + [CLKC_RESET_VID_CLK_CNTL_SOFT_RESET] = { +- .reg = HHI_VID_CLK_CNTL, .bit_idx = 15 ++ .reg = HHI_VID_CLK_CNTL, ++ .bit_idx = 15, ++ .active_low = false, + }, + [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST] = { +- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 7 ++ .reg = HHI_VID_DIVIDER_CNTL, ++ .bit_idx = 7, ++ .active_low = false, + }, + [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE] = { +- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 3 ++ .reg = HHI_VID_DIVIDER_CNTL, ++ .bit_idx = 3, ++ .active_low = false, + }, + [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST] = { +- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 1 ++ .reg = HHI_VID_DIVIDER_CNTL, ++ .bit_idx = 1, ++ .active_low = true, + }, + [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE] = { +- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 0 ++ .reg = HHI_VID_DIVIDER_CNTL, ++ .bit_idx = 0, ++ .active_low = true, + }, + }; + +@@ -3547,22 +3589,22 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev, + { + struct meson8b_clk_reset *meson8b_clk_reset = + container_of(rcdev, struct meson8b_clk_reset, reset); +- unsigned long flags; + const struct meson8b_clk_reset_line *reset; ++ unsigned int value = 0; ++ unsigned long flags; + + if (id >= ARRAY_SIZE(meson8b_clk_reset_bits)) + return -EINVAL; + + reset = &meson8b_clk_reset_bits[id]; + ++ if (assert != reset->active_low) ++ value = BIT(reset->bit_idx); ++ + spin_lock_irqsave(&meson_clk_lock, flags); + +- if (assert) +- regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, +- BIT(reset->bit_idx), BIT(reset->bit_idx)); +- else +- regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, +- BIT(reset->bit_idx), 0); ++ regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, ++ BIT(reset->bit_idx), value); + + spin_unlock_irqrestore(&meson_clk_lock, flags); + +diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h +index c889fbeec30f..c91fb07fcb65 100644 +--- a/drivers/clk/meson/meson8b.h ++++ b/drivers/clk/meson/meson8b.h +@@ -20,6 +20,10 @@ + * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf + */ + #define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */ ++#define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */ ++#define HHI_GP_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */ ++#define HHI_GP_PLL_CNTL4 0x4C /* 0x13 offset in data sheet */ ++#define HHI_GP_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */ + #define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */ + #define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */ + #define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */ +diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c +index 4e329a7baf2b..17e4a5a2a9fd 100644 +--- a/drivers/clk/qcom/gcc-msm8916.c ++++ b/drivers/clk/qcom/gcc-msm8916.c +@@ -260,7 +260,7 @@ static struct clk_pll gpll0 = { + .l_reg = 0x21004, + .m_reg = 0x21008, + .n_reg = 0x2100c, +- .config_reg = 0x21014, ++ .config_reg = 0x21010, + .mode_reg = 0x21000, + .status_reg = 0x2101c, + .status_bit = 17, +@@ -287,7 +287,7 @@ static struct clk_pll gpll1 = { + .l_reg = 0x20004, + .m_reg = 0x20008, + .n_reg = 0x2000c, +- .config_reg = 0x20014, ++ .config_reg = 0x20010, + .mode_reg = 0x20000, + .status_reg = 0x2001c, + .status_bit = 17, +@@ -314,7 +314,7 @@ static struct clk_pll gpll2 = { + .l_reg = 0x4a004, + .m_reg = 0x4a008, + .n_reg = 0x4a00c, +- .config_reg = 0x4a014, ++ .config_reg = 0x4a010, + .mode_reg = 0x4a000, + .status_reg = 0x4a01c, + .status_bit = 17, +@@ -341,7 +341,7 @@ static struct clk_pll bimc_pll = { + .l_reg = 0x23004, + .m_reg = 0x23008, + .n_reg = 0x2300c, +- .config_reg = 0x23014, ++ .config_reg = 0x23010, + .mode_reg = 0x23000, + .status_reg = 0x2301c, + .status_bit = 17, +diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c +index 132cc96895e3..6f9612c169af 100644 +--- a/drivers/clk/renesas/renesas-cpg-mssr.c ++++ b/drivers/clk/renesas/renesas-cpg-mssr.c +@@ -800,7 +800,8 @@ static int cpg_mssr_suspend_noirq(struct device *dev) + /* Save module registers with bits under our control */ + for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) { + if (priv->smstpcr_saved[reg].mask) +- priv->smstpcr_saved[reg].val = ++ priv->smstpcr_saved[reg].val = priv->stbyctrl ? ++ readb(priv->base + STBCR(reg)) : + readl(priv->base + SMSTPCR(reg)); + } + +@@ -860,8 +861,9 @@ static int cpg_mssr_resume_noirq(struct device *dev) + } + + if (!i) +- dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n", +- priv->base + SMSTPCR(reg), oldval & mask); ++ dev_warn(dev, "Failed to enable %s%u[0x%x]\n", ++ priv->stbyctrl ? "STB" : "SMSTP", reg, ++ oldval & mask); + } + + return 0; +diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c +index 27fd274e92f8..dfef5f0833db 100644 +--- a/drivers/clk/samsung/clk-exynos5420.c ++++ b/drivers/clk/samsung/clk-exynos5420.c +@@ -540,7 +540,7 @@ static const struct samsung_div_clock exynos5800_div_clks[] __initconst = { + + static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { + GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", +- GATE_BUS_TOP, 24, 0, 0), ++ GATE_BUS_TOP, 24, CLK_IS_CRITICAL, 0), + GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", + GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), + }; +@@ -940,25 +940,25 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { + GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", + GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), + GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", +- GATE_BUS_TOP, 5, 0, 0), ++ GATE_BUS_TOP, 5, CLK_IS_CRITICAL, 0), + GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", + GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0), + GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", + GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), + GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", +- GATE_BUS_TOP, 8, 0, 0), ++ GATE_BUS_TOP, 8, CLK_IS_CRITICAL, 0), + GATE(CLK_PCLK66_GPIO, "pclk66_gpio", "mout_user_pclk66_gpio", + GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0), + GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen", + GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0), + GATE(0, "aclk266_isp", "mout_user_aclk266_isp", +- GATE_BUS_TOP, 13, 0, 0), ++ GATE_BUS_TOP, 13, CLK_IS_CRITICAL, 0), + GATE(0, "aclk166", "mout_user_aclk166", + GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), + GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", + GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0), + GATE(0, "aclk400_isp", "mout_user_aclk400_isp", +- GATE_BUS_TOP, 16, 0, 0), ++ GATE_BUS_TOP, 16, CLK_IS_CRITICAL, 0), + GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", + GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0), + GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", +@@ -1158,8 +1158,10 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { + GATE_IP_GSCL1, 3, 0, 0), + GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333", + GATE_IP_GSCL1, 4, 0, 0), +- GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0), +- GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0), ++ GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, ++ CLK_IS_CRITICAL, 0), ++ GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, ++ CLK_IS_CRITICAL, 0), + GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333", + GATE_IP_GSCL1, 16, 0, 0), + GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", +diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c +index 4b1aa9382ad2..6f29ecd0442e 100644 +--- a/drivers/clk/samsung/clk-exynos5433.c ++++ b/drivers/clk/samsung/clk-exynos5433.c +@@ -1706,7 +1706,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = { + GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric", + ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric", +- ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0), ++ ENABLE_SCLK_PERIC, 6, ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), + GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC, + 5, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC, +diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c +index 640270f51aa5..eb8862752c2b 100644 +--- a/drivers/clk/sprd/pll.c ++++ b/drivers/clk/sprd/pll.c +@@ -105,7 +105,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll, + + cfg = kcalloc(regs_num, sizeof(*cfg), GFP_KERNEL); + if (!cfg) +- return -ENOMEM; ++ return parent_rate; + + for (i = 0; i < regs_num; i++) + cfg[i] = sprd_pll_read(pll, i); +diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c +index 4413b6e04a8e..55873d4b7603 100644 +--- a/drivers/clk/st/clk-flexgen.c ++++ b/drivers/clk/st/clk-flexgen.c +@@ -375,6 +375,7 @@ static void __init st_of_flexgen_setup(struct device_node *np) + break; + } + ++ flex_flags &= ~CLK_IS_CRITICAL; + of_clk_detect_critical(np, i, &flex_flags); + + /* +diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c +index 27201fd26e44..e1aa1fbac48a 100644 +--- a/drivers/clk/sunxi/clk-sunxi.c ++++ b/drivers/clk/sunxi/clk-sunxi.c +@@ -90,7 +90,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req) + * Round down the frequency to the closest multiple of either + * 6 or 16 + */ +- u32 round_freq_6 = round_down(freq_mhz, 6); ++ u32 round_freq_6 = rounddown(freq_mhz, 6); + u32 round_freq_16 = round_down(freq_mhz, 16); + + if (round_freq_6 > round_freq_16) +diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c +index 6a89936ba03a..eaa43575cfa5 100644 +--- a/drivers/clk/ti/composite.c ++++ b/drivers/clk/ti/composite.c +@@ -196,6 +196,7 @@ cleanup: + if (!cclk->comp_clks[i]) + continue; + list_del(&cclk->comp_clks[i]->link); ++ kfree(cclk->comp_clks[i]->parent_names); + kfree(cclk->comp_clks[i]); + } + +diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c +index a11f93ecbf34..6f057ab9df03 100644 +--- a/drivers/clk/zynqmp/clkc.c ++++ b/drivers/clk/zynqmp/clkc.c +@@ -558,7 +558,7 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, + { + int j; + u32 num_nodes, clk_dev_id; +- char *clk_out = NULL; ++ char *clk_out[MAX_NODES]; + struct clock_topology *nodes; + struct clk_hw *hw = NULL; + +@@ -572,16 +572,16 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, + * Intermediate clock names are postfixed with type of clock. + */ + if (j != (num_nodes - 1)) { +- clk_out = kasprintf(GFP_KERNEL, "%s%s", clk_name, ++ clk_out[j] = kasprintf(GFP_KERNEL, "%s%s", clk_name, + clk_type_postfix[nodes[j].type]); + } else { +- clk_out = kasprintf(GFP_KERNEL, "%s", clk_name); ++ clk_out[j] = kasprintf(GFP_KERNEL, "%s", clk_name); + } + + if (!clk_topology[nodes[j].type]) + continue; + +- hw = (*clk_topology[nodes[j].type])(clk_out, clk_dev_id, ++ hw = (*clk_topology[nodes[j].type])(clk_out[j], clk_dev_id, + parent_names, + num_parents, + &nodes[j]); +@@ -590,9 +590,12 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, + __func__, clk_dev_id, clk_name, + PTR_ERR(hw)); + +- parent_names[0] = clk_out; ++ parent_names[0] = clk_out[j]; + } +- kfree(clk_out); ++ ++ for (j = 0; j < num_nodes; j++) ++ kfree(clk_out[j]); ++ + return hw; + } + +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c +index ac80bc6af093..aba5db3c0588 100644 +--- a/drivers/crypto/omap-sham.c ++++ b/drivers/crypto/omap-sham.c +@@ -165,8 +165,6 @@ struct omap_sham_hmac_ctx { + }; + + struct omap_sham_ctx { +- struct omap_sham_dev *dd; +- + unsigned long flags; + + /* fallback stuff */ +@@ -918,27 +916,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) + return 0; + } + ++struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) ++{ ++ struct omap_sham_dev *dd; ++ ++ if (ctx->dd) ++ return ctx->dd; ++ ++ spin_lock_bh(&sham.lock); ++ dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list); ++ list_move_tail(&dd->list, &sham.dev_list); ++ ctx->dd = dd; ++ spin_unlock_bh(&sham.lock); ++ ++ return dd; ++} ++ + static int omap_sham_init(struct ahash_request *req) + { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); +- struct omap_sham_dev *dd = NULL, *tmp; ++ struct omap_sham_dev *dd; + int bs = 0; + +- spin_lock_bh(&sham.lock); +- if (!tctx->dd) { +- list_for_each_entry(tmp, &sham.dev_list, list) { +- dd = tmp; +- break; +- } +- tctx->dd = dd; +- } else { +- dd = tctx->dd; +- } +- spin_unlock_bh(&sham.lock); ++ ctx->dd = NULL; + +- ctx->dd = dd; ++ dd = omap_sham_find_dev(ctx); ++ if (!dd) ++ return -ENODEV; + + ctx->flags = 0; + +@@ -1187,8 +1193,7 @@ err1: + static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) + { + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); +- struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); +- struct omap_sham_dev *dd = tctx->dd; ++ struct omap_sham_dev *dd = ctx->dd; + + ctx->op = op; + +@@ -1198,7 +1203,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) + static int omap_sham_update(struct ahash_request *req) + { + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); +- struct omap_sham_dev *dd = ctx->dd; ++ struct omap_sham_dev *dd = omap_sham_find_dev(ctx); + + if (!req->nbytes) + return 0; +@@ -1302,21 +1307,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, + struct omap_sham_hmac_ctx *bctx = tctx->base; + int bs = crypto_shash_blocksize(bctx->shash); + int ds = crypto_shash_digestsize(bctx->shash); +- struct omap_sham_dev *dd = NULL, *tmp; + int err, i; + +- spin_lock_bh(&sham.lock); +- if (!tctx->dd) { +- list_for_each_entry(tmp, &sham.dev_list, list) { +- dd = tmp; +- break; +- } +- tctx->dd = dd; +- } else { +- dd = tctx->dd; +- } +- spin_unlock_bh(&sham.lock); +- + err = crypto_shash_setkey(tctx->fallback, key, keylen); + if (err) + return err; +@@ -1334,7 +1326,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, + + memset(bctx->ipad + keylen, 0, bs - keylen); + +- if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { ++ if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) { + memcpy(bctx->opad, bctx->ipad, bs); + + for (i = 0; i < bs; i++) { +@@ -2136,6 +2128,7 @@ static int omap_sham_probe(struct platform_device *pdev) + } + + dd->flags |= dd->pdata->flags; ++ sham.flags |= dd->pdata->flags; + + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); +@@ -2163,6 +2156,9 @@ static int omap_sham_probe(struct platform_device *pdev) + spin_unlock(&sham.lock); + + for (i = 0; i < dd->pdata->algs_info_size; i++) { ++ if (dd->pdata->algs_info[i].registered) ++ break; ++ + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { + struct ahash_alg *alg; + +@@ -2214,9 +2210,11 @@ static int omap_sham_remove(struct platform_device *pdev) + list_del(&dd->list); + spin_unlock(&sham.lock); + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) +- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) ++ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { + crypto_unregister_ahash( + &dd->pdata->algs_info[i].algs_list[j]); ++ dd->pdata->algs_info[i].registered--; ++ } + tasklet_kill(&dd->done_task); + pm_runtime_disable(&pdev->dev); + +diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c +index ad02dc6747a4..0317b614b680 100644 +--- a/drivers/extcon/extcon-adc-jack.c ++++ b/drivers/extcon/extcon-adc-jack.c +@@ -124,7 +124,7 @@ static int adc_jack_probe(struct platform_device *pdev) + for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++); + data->num_conditions = i; + +- data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel); ++ data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel); + if (IS_ERR(data->chan)) + return PTR_ERR(data->chan); + +@@ -164,7 +164,6 @@ static int adc_jack_remove(struct platform_device *pdev) + + free_irq(data->irq, data); + cancel_work_sync(&data->handler.work); +- iio_channel_release(data->chan); + + return 0; + } +diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c +index e48d971ffb61..a3b11bc71dcb 100644 +--- a/drivers/firmware/imx/imx-scu.c ++++ b/drivers/firmware/imx/imx-scu.c +@@ -300,6 +300,7 @@ static int imx_scu_probe(struct platform_device *pdev) + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to request mbox chan %s ret %d\n", + chan_name, ret); ++ kfree(chan_name); + return ret; + } + +diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c +index 4802ab170fe5..b9fdc20b4eb9 100644 +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -9,7 +9,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -441,8 +440,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, + struct qcom_scm_mem_map_info *mem_to_map; + phys_addr_t mem_to_map_phys; + phys_addr_t dest_phys; +- phys_addr_t ptr_phys; +- dma_addr_t ptr_dma; ++ dma_addr_t ptr_phys; + size_t mem_to_map_sz; + size_t dest_sz; + size_t src_sz; +@@ -459,10 +457,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, + ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + + ALIGN(dest_sz, SZ_64); + +- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); ++ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); + if (!ptr) + return -ENOMEM; +- ptr_phys = dma_to_phys(__scm->dev, ptr_dma); + + /* Fill source vmid detail */ + src = ptr; +@@ -490,7 +487,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, + + ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, + ptr_phys, src_sz, dest_phys, dest_sz); +- dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); ++ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); + if (ret) { + dev_err(__scm->dev, + "Assign memory protection call failed %d\n", ret); +diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c +index 62f924489db5..5942343a5d6e 100644 +--- a/drivers/fpga/dfl-afu-dma-region.c ++++ b/drivers/fpga/dfl-afu-dma-region.c +@@ -61,10 +61,10 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, + region->pages); + if (pinned < 0) { + ret = pinned; +- goto put_pages; ++ goto free_pages; + } else if (pinned != npages) { + ret = -EFAULT; +- goto free_pages; ++ goto put_pages; + } + + dev_dbg(dev, "%d pages pinned\n", pinned); +diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c +index 92e127e74813..ed6061b5cca1 100644 +--- a/drivers/gpio/gpio-dwapb.c ++++ b/drivers/gpio/gpio-dwapb.c +@@ -49,7 +49,9 @@ + #define GPIO_EXT_PORTC 0x58 + #define GPIO_EXT_PORTD 0x5c + ++#define DWAPB_DRIVER_NAME "gpio-dwapb" + #define DWAPB_MAX_PORTS 4 ++ + #define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */ + #define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */ + #define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */ +@@ -398,7 +400,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, + return; + + err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, +- "gpio-dwapb", handle_level_irq, ++ DWAPB_DRIVER_NAME, handle_level_irq, + IRQ_NOREQUEST, 0, + IRQ_GC_INIT_NESTED_LOCK); + if (err) { +@@ -455,7 +457,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, + */ + err = devm_request_irq(gpio->dev, pp->irq[0], + dwapb_irq_handler_mfd, +- IRQF_SHARED, "gpio-dwapb-mfd", gpio); ++ IRQF_SHARED, DWAPB_DRIVER_NAME, gpio); + if (err) { + dev_err(gpio->dev, "error requesting IRQ\n"); + irq_domain_remove(gpio->domain); +@@ -533,26 +535,33 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio, + dwapb_configure_irqs(gpio, port, pp); + + err = gpiochip_add_data(&port->gc, port); +- if (err) ++ if (err) { + dev_err(gpio->dev, "failed to register gpiochip for port%d\n", + port->idx); +- else +- port->is_registered = true; ++ return err; ++ } + + /* Add GPIO-signaled ACPI event support */ +- if (pp->has_irq) +- acpi_gpiochip_request_interrupts(&port->gc); ++ acpi_gpiochip_request_interrupts(&port->gc); + +- return err; ++ port->is_registered = true; ++ ++ return 0; + } + + static void dwapb_gpio_unregister(struct dwapb_gpio *gpio) + { + unsigned int m; + +- for (m = 0; m < gpio->nr_ports; ++m) +- if (gpio->ports[m].is_registered) +- gpiochip_remove(&gpio->ports[m].gc); ++ for (m = 0; m < gpio->nr_ports; ++m) { ++ struct dwapb_gpio_port *port = &gpio->ports[m]; ++ ++ if (!port->is_registered) ++ continue; ++ ++ acpi_gpiochip_free_interrupts(&port->gc); ++ gpiochip_remove(&port->gc); ++ } + } + + static struct dwapb_platform_data * +@@ -836,7 +845,7 @@ static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend, + + static struct platform_driver dwapb_gpio_driver = { + .driver = { +- .name = "gpio-dwapb", ++ .name = DWAPB_DRIVER_NAME, + .pm = &dwapb_gpio_pm_ops, + .of_match_table = of_match_ptr(dwapb_of_match), + .acpi_match_table = ACPI_PTR(dwapb_acpi_match), +@@ -850,3 +859,4 @@ module_platform_driver(dwapb_gpio_driver); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Jamie Iles"); + MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver"); ++MODULE_ALIAS("platform:" DWAPB_DRIVER_NAME); +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c +index 3edc1762803a..29ba26742c8f 100644 +--- a/drivers/gpio/gpio-pca953x.c ++++ b/drivers/gpio/gpio-pca953x.c +@@ -306,8 +306,22 @@ static const struct regmap_config pca953x_i2c_regmap = { + .volatile_reg = pca953x_volatile_register, + + .cache_type = REGCACHE_RBTREE, +- /* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */ +- .max_register = 0xff, ++ .max_register = 0x7f, ++}; ++ ++static const struct regmap_config pca953x_ai_i2c_regmap = { ++ .reg_bits = 8, ++ .val_bits = 8, ++ ++ .read_flag_mask = REG_ADDR_AI, ++ .write_flag_mask = REG_ADDR_AI, ++ ++ .readable_reg = pca953x_readable_register, ++ .writeable_reg = pca953x_writeable_register, ++ .volatile_reg = pca953x_volatile_register, ++ ++ .cache_type = REGCACHE_RBTREE, ++ .max_register = 0x7f, + }; + + static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off, +@@ -318,18 +332,6 @@ static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off, + int pinctrl = (reg & PCAL_PINCTRL_MASK) << 1; + u8 regaddr = pinctrl | addr | (off / BANK_SZ); + +- /* Single byte read doesn't need AI bit set. */ +- if (!addrinc) +- return regaddr; +- +- /* Chips with 24 and more GPIOs always support Auto Increment */ +- if (write && NBANK(chip) > 2) +- regaddr |= REG_ADDR_AI; +- +- /* PCA9575 needs address-increment on multi-byte writes */ +- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) +- regaddr |= REG_ADDR_AI; +- + return regaddr; + } + +@@ -897,6 +899,7 @@ static int pca953x_probe(struct i2c_client *client, + int ret; + u32 invert = 0; + struct regulator *reg; ++ const struct regmap_config *regmap_config; + + chip = devm_kzalloc(&client->dev, + sizeof(struct pca953x_chip), GFP_KERNEL); +@@ -960,7 +963,17 @@ static int pca953x_probe(struct i2c_client *client, + + i2c_set_clientdata(client, chip); + +- chip->regmap = devm_regmap_init_i2c(client, &pca953x_i2c_regmap); ++ pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK); ++ ++ if (NBANK(chip) > 2 || PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) { ++ dev_info(&client->dev, "using AI\n"); ++ regmap_config = &pca953x_ai_i2c_regmap; ++ } else { ++ dev_info(&client->dev, "using no AI\n"); ++ regmap_config = &pca953x_i2c_regmap; ++ } ++ ++ chip->regmap = devm_regmap_init_i2c(client, regmap_config); + if (IS_ERR(chip->regmap)) { + ret = PTR_ERR(chip->regmap); + goto err_exit; +@@ -991,7 +1004,6 @@ static int pca953x_probe(struct i2c_client *client, + /* initialize cached registers from their original values. + * we can't share this chip with another i2c master. + */ +- pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK); + + if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) { + chip->regs = &pca953x_regs; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 9fd12e108a70..4fad0b603b3a 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -929,7 +929,7 @@ static int dm_late_init(void *handle) + unsigned int linear_lut[16]; + int i; + struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; +- bool ret = false; ++ bool ret; + + for (i = 0; i < 16; i++) + linear_lut[i] = 0xFFFF * i / 15; +@@ -945,13 +945,10 @@ static int dm_late_init(void *handle) + */ + params.min_abm_backlight = 0x28F; + +- /* todo will enable for navi10 */ +- if (adev->asic_type <= CHIP_RAVEN) { +- ret = dmcu_load_iram(dmcu, params); ++ ret = dmcu_load_iram(dmcu, params); + +- if (!ret) +- return -EINVAL; +- } ++ if (!ret) ++ return -EINVAL; + + return detect_mst_link_for_all_connectors(adev->ddev); + } +diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +index 221e0f56389f..823843cd2613 100644 +--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +@@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info( + + /* Sort voltage table from low to high*/ + if (result == BP_RESULT_OK) { +- struct clock_voltage_caps temp = {0, 0}; + uint32_t i; + uint32_t j; + +@@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info( + info->disp_clk_voltage[j].max_supported_clk < + info->disp_clk_voltage[j-1].max_supported_clk) { + /* swap j and j - 1*/ +- temp = info->disp_clk_voltage[j-1]; +- info->disp_clk_voltage[j-1] = +- info->disp_clk_voltage[j]; +- info->disp_clk_voltage[j] = temp; ++ swap(info->disp_clk_voltage[j - 1], ++ info->disp_clk_voltage[j]); + } + } + } +diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +index dff65c0fe82f..7873abea4112 100644 +--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c ++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +@@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info( + + struct atom_common_table_header *header; + struct atom_data_revision revision; +- +- struct clock_voltage_caps temp = {0, 0}; + uint32_t i; + uint32_t j; + +@@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info( + info->disp_clk_voltage[j-1].max_supported_clk + ) { + /* swap j and j - 1*/ +- temp = info->disp_clk_voltage[j-1]; +- info->disp_clk_voltage[j-1] = +- info->disp_clk_voltage[j]; +- info->disp_clk_voltage[j] = temp; ++ swap(info->disp_clk_voltage[j - 1], ++ info->disp_clk_voltage[j]); + } + } + } +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 2028dc017f7a..47e7d11ca0c9 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -907,15 +907,11 @@ static void program_timing_sync( + + /* set first pipe with plane as master */ + for (j = 0; j < group_size; j++) { +- struct pipe_ctx *temp; +- + if (pipe_set[j]->plane_state) { + if (j == 0) + break; + +- temp = pipe_set[0]; +- pipe_set[0] = pipe_set[j]; +- pipe_set[j] = temp; ++ swap(pipe_set[0], pipe_set[j]); + break; + } + } +@@ -2230,6 +2226,12 @@ void dc_commit_updates_for_stream(struct dc *dc, + + copy_stream_update_to_stream(dc, context, stream, stream_update); + ++ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { ++ DC_ERROR("Mode validation failed for stream update!\n"); ++ dc_release_state(context); ++ return; ++ } ++ + commit_planes_for_stream( + dc, + srf_updates, +diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +index 2d8f14b69117..207435fa4f2c 100644 +--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c ++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +@@ -799,7 +799,7 @@ static bool build_regamma(struct pwl_float_data_ex *rgb_regamma, + pow_buffer_ptr = -1; // reset back to no optimize + ret = true; + release: +- kfree(coeff); ++ kvfree(coeff); + return ret; + } + +diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +index 15590fd86ef4..2e71ca3e19f5 100644 +--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c ++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +@@ -239,7 +239,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) + + switch (dev_id) { + case 0x67BA: +- case 0x66B1: ++ case 0x67B1: + smu_data->power_tune_defaults = &defaults_hawaii_pro; + break; + case 0x67B8: +diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c +index 4c766624b20d..2337b3827e6a 100644 +--- a/drivers/gpu/drm/drm_connector.c ++++ b/drivers/gpu/drm/drm_connector.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + + #include + +@@ -511,6 +512,10 @@ int drm_connector_register(struct drm_connector *connector) + drm_mode_object_register(connector->dev, &connector->base); + + connector->registration_state = DRM_CONNECTOR_REGISTERED; ++ ++ /* Let userspace know we have a new connector */ ++ drm_sysfs_hotplug_event(connector->dev); ++ + goto unlock; + + err_debugfs: +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c +index 4b7aaad07423..006d6087700f 100644 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -3498,6 +3499,17 @@ fail: + return ret; + } + ++static int do_get_act_status(struct drm_dp_aux *aux) ++{ ++ int ret; ++ u8 status; ++ ++ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); ++ if (ret < 0) ++ return ret; ++ ++ return status; ++} + + /** + * drm_dp_check_act_status() - Check ACT handled status. +@@ -3507,33 +3519,29 @@ fail: + */ + int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) + { +- u8 status; +- int ret; +- int count = 0; +- +- do { +- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); +- +- if (ret < 0) { +- DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); +- goto fail; +- } +- +- if (status & DP_PAYLOAD_ACT_HANDLED) +- break; +- count++; +- udelay(100); +- +- } while (count < 30); +- +- if (!(status & DP_PAYLOAD_ACT_HANDLED)) { +- DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); +- ret = -EINVAL; +- goto fail; ++ /* ++ * There doesn't seem to be any recommended retry count or timeout in ++ * the MST specification. Since some hubs have been observed to take ++ * over 1 second to update their payload allocations under certain ++ * conditions, we use a rather large timeout value. ++ */ ++ const int timeout_ms = 3000; ++ int ret, status; ++ ++ ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, ++ status & DP_PAYLOAD_ACT_HANDLED || status < 0, ++ 200, timeout_ms * USEC_PER_MSEC); ++ if (ret < 0 && status >= 0) { ++ DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n", ++ timeout_ms, status); ++ return -EINVAL; ++ } else if (status < 0) { ++ DRM_DEBUG_KMS("Failed to read payload table status: %d\n", ++ status); ++ return status; + } ++ + return 0; +-fail: +- return ret; + } + EXPORT_SYMBOL(drm_dp_check_act_status); + +diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c +index cf804389f5ec..d50a7884e69e 100644 +--- a/drivers/gpu/drm/drm_encoder_slave.c ++++ b/drivers/gpu/drm/drm_encoder_slave.c +@@ -84,7 +84,7 @@ int drm_i2c_encoder_init(struct drm_device *dev, + + err = encoder_drv->encoder_init(client, dev, encoder); + if (err) +- goto fail_unregister; ++ goto fail_module_put; + + if (info->platform_data) + encoder->slave_funcs->set_config(&encoder->base, +@@ -92,9 +92,10 @@ int drm_i2c_encoder_init(struct drm_device *dev, + + return 0; + ++fail_module_put: ++ module_put(module); + fail_unregister: + i2c_unregister_device(client); +- module_put(module); + fail: + return err; + } +diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c +index dd2bc85f43cc..4fd2f6cd03c1 100644 +--- a/drivers/gpu/drm/drm_sysfs.c ++++ b/drivers/gpu/drm/drm_sysfs.c +@@ -293,9 +293,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector) + return PTR_ERR(connector->kdev); + } + +- /* Let userspace know we have a new connector */ +- drm_sysfs_hotplug_event(dev); +- + if (connector->ddc) + return sysfs_create_link(&connector->kdev->kobj, + &connector->ddc->dev.kobj, "ddc"); +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c +index 4ab6531a4a74..2efc317c90df 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp.c ++++ b/drivers/gpu/drm/i915/display/intel_dp.c +@@ -1292,8 +1292,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, + bool is_tc_port = intel_phy_is_tc(i915, phy); + i915_reg_t ch_ctl, ch_data[5]; + u32 aux_clock_divider; +- enum intel_display_power_domain aux_domain = +- intel_aux_power_domain(intel_dig_port); ++ enum intel_display_power_domain aux_domain; + intel_wakeref_t aux_wakeref; + intel_wakeref_t pps_wakeref; + int i, ret, recv_bytes; +@@ -1308,6 +1307,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, + if (is_tc_port) + intel_tc_port_lock(intel_dig_port); + ++ aux_domain = intel_aux_power_domain(intel_dig_port); ++ + aux_wakeref = intel_display_power_get(i915, aux_domain); + pps_wakeref = pps_lock(intel_dp); + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +index 4c4954e8ce0a..3f875aebbd23 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +@@ -36,7 +36,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) + unsigned long last_pfn = 0; /* suppress gcc warning */ + unsigned int max_segment = i915_sg_segment_size(); + unsigned int sg_page_sizes; +- struct pagevec pvec; + gfp_t noreclaim; + int ret; + +@@ -188,13 +187,17 @@ err_sg: + sg_mark_end(sg); + err_pages: + mapping_clear_unevictable(mapping); +- pagevec_init(&pvec); +- for_each_sgt_page(page, sgt_iter, st) { +- if (!pagevec_add(&pvec, page)) ++ if (sg != st->sgl) { ++ struct pagevec pvec; ++ ++ pagevec_init(&pvec); ++ for_each_sgt_page(page, sgt_iter, st) { ++ if (!pagevec_add(&pvec, page)) ++ check_release_pagevec(&pvec); ++ } ++ if (pagevec_count(&pvec)) + check_release_pagevec(&pvec); + } +- if (pagevec_count(&pvec)) +- check_release_pagevec(&pvec); + sg_free_table(st); + kfree(st); + +diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c +index f24096e27bef..21417ac8e878 100644 +--- a/drivers/gpu/drm/i915/i915_cmd_parser.c ++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c +@@ -572,6 +572,9 @@ struct drm_i915_reg_descriptor { + #define REG32(_reg, ...) \ + { .addr = (_reg), __VA_ARGS__ } + ++#define REG32_IDX(_reg, idx) \ ++ { .addr = _reg(idx) } ++ + /* + * Convenience macro for adding 64-bit registers. + * +@@ -669,6 +672,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), + REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), ++ REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE), + REG64_IDX(BCS_GPR, 0), + REG64_IDX(BCS_GPR, 1), + REG64_IDX(BCS_GPR, 2), +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 37e3dd3c1a9d..4193a9970251 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -3500,6 +3500,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) + + val = I915_READ(GEN11_DE_HPD_IMR); + val &= ~hotplug_irqs; ++ val |= ~enabled_irqs & hotplug_irqs; + I915_WRITE(GEN11_DE_HPD_IMR, val); + POSTING_READ(GEN11_DE_HPD_IMR); + +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +index 99cd6e62a971..7829247de60e 100644 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +@@ -1359,6 +1359,10 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) + { + u64 busy_cycles, busy_time; + ++ /* Only read the gpu busy if the hardware is already active */ ++ if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0) ++ return 0; ++ + busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, + REG_A5XX_RBBM_PERFCTR_RBBM_0_HI); + +@@ -1367,6 +1371,8 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) + + gpu->devfreq.busy_cycles = busy_cycles; + ++ pm_runtime_put(&gpu->pdev->dev); ++ + if (WARN_ON(busy_time > ~0LU)) + return ~0LU; + +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +index 85f14feafdec..e62b286947a7 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +@@ -107,6 +107,13 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) + struct msm_gpu *gpu = &adreno_gpu->base; + int ret; + ++ /* ++ * This can get called from devfreq while the hardware is idle. Don't ++ * bring up the power if it isn't already active ++ */ ++ if (pm_runtime_get_if_in_use(gmu->dev) == 0) ++ return; ++ + gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); + + gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, +@@ -133,6 +140,7 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) + * for now leave it at max so that the performance is nominal. + */ + icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); ++ pm_runtime_put(gmu->dev); + } + + void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 686c34d706b0..be68d4e6551c 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -803,6 +803,11 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + u64 busy_cycles, busy_time; + ++ ++ /* Only read the gpu busy if the hardware is already active */ ++ if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0) ++ return 0; ++ + busy_cycles = gmu_read64(&a6xx_gpu->gmu, + REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L, + REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H); +@@ -812,6 +817,8 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) + + gpu->devfreq.busy_cycles = busy_cycles; + ++ pm_runtime_put(a6xx_gpu->gmu.dev); ++ + if (WARN_ON(busy_time > ~0LU)) + return ~0LU; + +diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +index 91cd76a2bab1..77823ccdd0f8 100644 +--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c ++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +@@ -1037,7 +1037,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) + + return 0; + fail: +- mdp5_destroy(pdev); ++ if (mdp5_kms) ++ mdp5_destroy(pdev); + return ret; + } + +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c +index 9b16a08eb4d9..bf6d41fb0c9f 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c +@@ -27,10 +27,10 @@ void + gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc) + { + struct nvkm_device *device = ior->disp->engine.subdev.device; +- const u32 hoff = head * 0x800; ++ const u32 soff = nv50_ior_base(ior); + const u32 ctrl = scdc & 0x3; + +- nvkm_mask(device, 0x61c5bc + hoff, 0x00000003, ctrl); ++ nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl); + + ior->tmds.high_speed = !!(scdc & 0x2); + } +diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c +index bfc1631093e9..9bdbe0db8795 100644 +--- a/drivers/gpu/drm/qxl/qxl_kms.c ++++ b/drivers/gpu/drm/qxl/qxl_kms.c +@@ -218,7 +218,7 @@ int qxl_device_init(struct qxl_device *qdev, + &(qdev->ram_header->cursor_ring_hdr), + sizeof(struct qxl_command), + QXL_CURSOR_RING_SIZE, +- qdev->io_base + QXL_IO_NOTIFY_CMD, ++ qdev->io_base + QXL_IO_NOTIFY_CURSOR, + false, + &qdev->cursor_event); + +diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h +index 7ad3f06c127e..00ca35f07ba5 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h ++++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h +@@ -148,7 +148,7 @@ + #define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3 + + #define SUN4I_HDMI_DDC_CLK_REG 0x528 +-#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3) ++#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3) + #define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7) + + #define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540 +diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +index 2ff780114106..12430b9d4e93 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c ++++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +@@ -33,7 +33,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate, + unsigned long best_rate = 0; + u8 best_m = 0, best_n = 0, _m, _n; + +- for (_m = 0; _m < 8; _m++) { ++ for (_m = 0; _m < 16; _m++) { + for (_n = 0; _n < 8; _n++) { + unsigned long tmp_rate; + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 13b7222ef2c9..c552a6bc627e 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -1147,6 +1147,9 @@ + #define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882 + #define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883 + ++#define USB_VENDOR_ID_TRUST 0x145f ++#define USB_DEVICE_ID_TRUST_PANORA_TABLET 0x0212 ++ + #define USB_VENDOR_ID_TURBOX 0x062a + #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201 + #define USB_DEVICE_ID_ASUS_MD_5110 0x5110 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index 90ec2390ef68..168fdaa1999f 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -168,6 +168,7 @@ static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET), HID_QUIRK_MULTI_INPUT | HID_QUIRK_HIDINPUT_FORCE }, + { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, +diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +index aa2dbed30fc3..6cf59fd26ad7 100644 +--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c ++++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +@@ -480,6 +480,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data, + sizeof(ldr_xfer_query_resp)); + if (rv < 0) { + client_data->flag_retry = true; ++ *fw_info = (struct shim_fw_info){}; + return rv; + } + +@@ -489,6 +490,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data, + "data size %d is not equal to size of loader_xfer_query_response %zu\n", + rv, sizeof(struct loader_xfer_query_response)); + client_data->flag_retry = true; ++ *fw_info = (struct shim_fw_info){}; + return -EMSGSIZE; + } + +diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c +index d0cc3985b72a..36cce2bfb744 100644 +--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c ++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c +@@ -596,13 +596,6 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) + goto out; + } + +- /* There is no point in reading a TMC in HW FIFO mode */ +- mode = readl_relaxed(drvdata->base + TMC_MODE); +- if (mode != TMC_MODE_CIRCULAR_BUFFER) { +- ret = -EINVAL; +- goto out; +- } +- + /* Don't interfere if operated from Perf */ + if (drvdata->mode == CS_MODE_PERF) { + ret = -EINVAL; +@@ -616,8 +609,15 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) + } + + /* Disable the TMC if need be */ +- if (drvdata->mode == CS_MODE_SYSFS) ++ if (drvdata->mode == CS_MODE_SYSFS) { ++ /* There is no point in reading a TMC in HW FIFO mode */ ++ mode = readl_relaxed(drvdata->base + TMC_MODE); ++ if (mode != TMC_MODE_CIRCULAR_BUFFER) { ++ ret = -EINVAL; ++ goto out; ++ } + __tmc_etb_disable_hw(drvdata); ++ } + + drvdata->reading = true; + out: +diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c +index 8382eb64b424..d6c17506dba4 100644 +--- a/drivers/i2c/busses/i2c-icy.c ++++ b/drivers/i2c/busses/i2c-icy.c +@@ -43,6 +43,7 @@ + #include + #include + ++#include + #include + #include + +diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c +index 30ded6422e7b..69740a4ff1db 100644 +--- a/drivers/i2c/busses/i2c-piix4.c ++++ b/drivers/i2c/busses/i2c-piix4.c +@@ -977,7 +977,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) + } + + if (dev->vendor == PCI_VENDOR_ID_AMD && +- dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { ++ (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || ++ dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { + retval = piix4_setup_sb800(dev, id, 1); + } + +diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c +index 2c3c3d6935c0..d0c557c8d80f 100644 +--- a/drivers/i2c/busses/i2c-pxa.c ++++ b/drivers/i2c/busses/i2c-pxa.c +@@ -312,11 +312,10 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) + dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n", + readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)), + readl(_ISR(i2c))); +- dev_dbg(dev, "log: "); ++ dev_err(dev, "log:"); + for (i = 0; i < i2c->irqlogidx; i++) +- pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]); +- +- pr_debug("\n"); ++ pr_cont(" [%03x:%05x]", i2c->isrlog[i], i2c->icrlog[i]); ++ pr_cont("\n"); + } + + #else /* ifdef DEBUG */ +@@ -706,11 +705,9 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) + { + u32 icr; + +- /* +- * Clear the STOP and ACK flags +- */ ++ /* Clear the START, STOP, ACK, TB and MA flags */ + icr = readl(_ICR(i2c)); +- icr &= ~(ICR_STOP | ICR_ACKNAK); ++ icr &= ~(ICR_START | ICR_STOP | ICR_ACKNAK | ICR_TB | ICR_MA); + writel(icr, _ICR(i2c)); + } + +diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c +index 8d0f15f27dc5..0a95afaa48fe 100644 +--- a/drivers/iio/pressure/bmp280-core.c ++++ b/drivers/iio/pressure/bmp280-core.c +@@ -264,6 +264,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data, + + (s32)2097152) * calib->H2 + 8192) >> 14); + var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)calib->H1) >> 4; + ++ var = clamp_val(var, 0, 419430400); ++ + return var >> 12; + }; + +@@ -706,7 +708,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas) + unsigned int ctrl; + + if (data->use_eoc) +- init_completion(&data->done); ++ reinit_completion(&data->done); + + ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas); + if (ret) +@@ -962,6 +964,9 @@ static int bmp085_fetch_eoc_irq(struct device *dev, + "trying to enforce it\n"); + irq_trig = IRQF_TRIGGER_RISING; + } ++ ++ init_completion(&data->done); ++ + ret = devm_request_threaded_irq(dev, + irq, + bmp085_eoc_irq, +diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c +index 8b0b5ae22e4c..726e70b68249 100644 +--- a/drivers/infiniband/core/cma_configfs.c ++++ b/drivers/infiniband/core/cma_configfs.c +@@ -322,8 +322,21 @@ fail: + return ERR_PTR(err); + } + ++static void drop_cma_dev(struct config_group *cgroup, struct config_item *item) ++{ ++ struct config_group *group = ++ container_of(item, struct config_group, cg_item); ++ struct cma_dev_group *cma_dev_group = ++ container_of(group, struct cma_dev_group, device_group); ++ ++ configfs_remove_default_groups(&cma_dev_group->ports_group); ++ configfs_remove_default_groups(&cma_dev_group->device_group); ++ config_item_put(item); ++} ++ + static struct configfs_group_operations cma_subsys_group_ops = { + .make_group = make_cma_dev, ++ .drop_item = drop_cma_dev, + }; + + static const struct config_item_type cma_subsys_type = { +diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c +index 7a50cedcef1f..091cca9d88ed 100644 +--- a/drivers/infiniband/core/sysfs.c ++++ b/drivers/infiniband/core/sysfs.c +@@ -1060,8 +1060,7 @@ static int add_port(struct ib_core_device *coredev, int port_num) + coredev->ports_kobj, + "%d", port_num); + if (ret) { +- kfree(p); +- return ret; ++ goto err_put; + } + + p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL); +@@ -1074,8 +1073,7 @@ static int add_port(struct ib_core_device *coredev, int port_num) + ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type, + &p->kobj, "gid_attrs"); + if (ret) { +- kfree(p->gid_attr_group); +- goto err_put; ++ goto err_put_gid_attrs; + } + + if (device->ops.process_mad && is_full_dev) { +@@ -1406,8 +1404,10 @@ int ib_port_register_module_stat(struct ib_device *device, u8 port_num, + + ret = kobject_init_and_add(kobj, ktype, &port->kobj, "%s", + name); +- if (ret) ++ if (ret) { ++ kobject_put(kobj); + return ret; ++ } + } + + return 0; +diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c +index 599340c1f0b8..541dbcf22d0e 100644 +--- a/drivers/infiniband/hw/cxgb4/device.c ++++ b/drivers/infiniband/hw/cxgb4/device.c +@@ -953,6 +953,7 @@ void c4iw_dealloc(struct uld_ctx *ctx) + static void c4iw_remove(struct uld_ctx *ctx) + { + pr_debug("c4iw_dev %p\n", ctx->dev); ++ debugfs_remove_recursive(ctx->dev->debugfs_root); + c4iw_unregister_device(ctx->dev); + c4iw_dealloc(ctx); + } +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +index 4540b00ccee9..0502c90c83ed 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +@@ -1349,34 +1349,26 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) + static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev) + { + struct hns_roce_pf_timer_res_a *req_a; +- struct hns_roce_cmq_desc desc[2]; +- int ret, i; ++ struct hns_roce_cmq_desc desc; ++ int ret; + +- for (i = 0; i < 2; i++) { +- hns_roce_cmq_setup_basic_desc(&desc[i], +- HNS_ROCE_OPC_QUERY_PF_TIMER_RES, +- true); ++ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES, ++ true); + +- if (i == 0) +- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); +- else +- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); +- } +- +- ret = hns_roce_cmq_send(hr_dev, desc, 2); ++ ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) + return ret; + +- req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data; ++ req_a = (struct hns_roce_pf_timer_res_a *)desc.data; + + hr_dev->caps.qpc_timer_bt_num = +- roce_get_field(req_a->qpc_timer_bt_idx_num, +- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M, +- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S); ++ roce_get_field(req_a->qpc_timer_bt_idx_num, ++ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M, ++ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S); + hr_dev->caps.cqc_timer_bt_num = +- roce_get_field(req_a->cqc_timer_bt_idx_num, +- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M, +- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S); ++ roce_get_field(req_a->cqc_timer_bt_idx_num, ++ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M, ++ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S); + + return 0; + } +@@ -4564,7 +4556,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, + qp_attr->path_mig_state = IB_MIG_ARMED; + qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; + if (hr_qp->ibqp.qp_type == IB_QPT_UD) +- qp_attr->qkey = V2_QKEY_VAL; ++ qp_attr->qkey = le32_to_cpu(context.qkey_xrcd); + + qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn, + V2_QPC_BYTE_108_RX_REQ_EPSN_M, +diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c +index d609f4659afb..fd75a9043bf1 100644 +--- a/drivers/infiniband/hw/mlx5/devx.c ++++ b/drivers/infiniband/hw/mlx5/devx.c +@@ -489,6 +489,10 @@ static u64 devx_get_obj_id(const void *in) + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, + MLX5_GET(rst2init_qp_in, in, qpn)); + break; ++ case MLX5_CMD_OP_INIT2INIT_QP: ++ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, ++ MLX5_GET(init2init_qp_in, in, qpn)); ++ break; + case MLX5_CMD_OP_INIT2RTR_QP: + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, + MLX5_GET(init2rtr_qp_in, in, qpn)); +@@ -814,6 +818,7 @@ static bool devx_is_obj_modify_cmd(const void *in) + case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: + case MLX5_CMD_OP_RST2INIT_QP: + case MLX5_CMD_OP_INIT2RTR_QP: ++ case MLX5_CMD_OP_INIT2INIT_QP: + case MLX5_CMD_OP_RTR2RTS_QP: + case MLX5_CMD_OP_RTS2RTS_QP: + case MLX5_CMD_OP_SQERR2RTS_QP: +diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c +index 4e7fde86c96b..c29c1f7da4a1 100644 +--- a/drivers/infiniband/hw/mlx5/srq.c ++++ b/drivers/infiniband/hw/mlx5/srq.c +@@ -310,12 +310,18 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq, + srq->msrq.event = mlx5_ib_srq_event; + srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; + +- if (udata) +- if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { ++ if (udata) { ++ struct mlx5_ib_create_srq_resp resp = { ++ .srqn = srq->msrq.srqn, ++ }; ++ ++ if (ib_copy_to_udata(udata, &resp, min(udata->outlen, ++ sizeof(resp)))) { + mlx5_ib_dbg(dev, "copy to user failed\n"); + err = -EFAULT; + goto err_core; + } ++ } + + init_attr->attr.max_wr = srq->msrq.max - 1; + +diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h +deleted file mode 100644 +index 391f94d9e47d..000000000000 +--- a/drivers/input/serio/i8042-ppcio.h ++++ /dev/null +@@ -1,57 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-only */ +-#ifndef _I8042_PPCIO_H +-#define _I8042_PPCIO_H +- +- +-#if defined(CONFIG_WALNUT) +- +-#define I8042_KBD_IRQ 25 +-#define I8042_AUX_IRQ 26 +- +-#define I8042_KBD_PHYS_DESC "walnutps2/serio0" +-#define I8042_AUX_PHYS_DESC "walnutps2/serio1" +-#define I8042_MUX_PHYS_DESC "walnutps2/serio%d" +- +-extern void *kb_cs; +-extern void *kb_data; +- +-#define I8042_COMMAND_REG (*(int *)kb_cs) +-#define I8042_DATA_REG (*(int *)kb_data) +- +-static inline int i8042_read_data(void) +-{ +- return readb(kb_data); +-} +- +-static inline int i8042_read_status(void) +-{ +- return readb(kb_cs); +-} +- +-static inline void i8042_write_data(int val) +-{ +- writeb(val, kb_data); +-} +- +-static inline void i8042_write_command(int val) +-{ +- writeb(val, kb_cs); +-} +- +-static inline int i8042_platform_init(void) +-{ +- i8042_reset = I8042_RESET_ALWAYS; +- return 0; +-} +- +-static inline void i8042_platform_exit(void) +-{ +-} +- +-#else +- +-#include "i8042-io.h" +- +-#endif +- +-#endif /* _I8042_PPCIO_H */ +diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h +index 38dc27ad3c18..eb376700dfff 100644 +--- a/drivers/input/serio/i8042.h ++++ b/drivers/input/serio/i8042.h +@@ -17,8 +17,6 @@ + #include "i8042-ip22io.h" + #elif defined(CONFIG_SNI_RM) + #include "i8042-snirm.h" +-#elif defined(CONFIG_PPC) +-#include "i8042-ppcio.h" + #elif defined(CONFIG_SPARC) + #include "i8042-sparcio.h" + #elif defined(CONFIG_X86) || defined(CONFIG_IA64) +diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c +index 240e8de24cd2..b41b97c962ed 100644 +--- a/drivers/input/touchscreen/edt-ft5x06.c ++++ b/drivers/input/touchscreen/edt-ft5x06.c +@@ -935,19 +935,25 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev, + + error = device_property_read_u32(dev, "offset", &val); + if (!error) { +- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val); ++ if (reg_addr->reg_offset != NO_REGISTER) ++ edt_ft5x06_register_write(tsdata, ++ reg_addr->reg_offset, val); + tsdata->offset = val; + } + + error = device_property_read_u32(dev, "offset-x", &val); + if (!error) { +- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, val); ++ if (reg_addr->reg_offset_x != NO_REGISTER) ++ edt_ft5x06_register_write(tsdata, ++ reg_addr->reg_offset_x, val); + tsdata->offset_x = val; + } + + error = device_property_read_u32(dev, "offset-y", &val); + if (!error) { +- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, val); ++ if (reg_addr->reg_offset_y != NO_REGISTER) ++ edt_ft5x06_register_write(tsdata, ++ reg_addr->reg_offset_y, val); + tsdata->offset_y = val; + } + } +diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c +index 86887c9a349a..f9cc674ba9b7 100644 +--- a/drivers/mailbox/zynqmp-ipi-mailbox.c ++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c +@@ -504,10 +504,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, + mchan->req_buf_size = resource_size(&res); + mchan->req_buf = devm_ioremap(mdev, res.start, + mchan->req_buf_size); +- if (IS_ERR(mchan->req_buf)) { ++ if (!mchan->req_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); +- ret = PTR_ERR(mchan->req_buf); +- return ret; ++ return -ENOMEM; + } + } else if (ret != -ENODEV) { + dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret); +@@ -520,10 +519,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, + mchan->resp_buf_size = resource_size(&res); + mchan->resp_buf = devm_ioremap(mdev, res.start, + mchan->resp_buf_size); +- if (IS_ERR(mchan->resp_buf)) { ++ if (!mchan->resp_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); +- ret = PTR_ERR(mchan->resp_buf); +- return ret; ++ return -ENOMEM; + } + } else if (ret != -ENODEV) { + dev_err(mdev, "Unmatched resource %s.\n", name); +@@ -543,10 +541,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, + mchan->req_buf_size = resource_size(&res); + mchan->req_buf = devm_ioremap(mdev, res.start, + mchan->req_buf_size); +- if (IS_ERR(mchan->req_buf)) { ++ if (!mchan->req_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); +- ret = PTR_ERR(mchan->req_buf); +- return ret; ++ return -ENOMEM; + } + } else if (ret != -ENODEV) { + dev_err(mdev, "Unmatched resource %s.\n", name); +@@ -559,10 +556,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, + mchan->resp_buf_size = resource_size(&res); + mchan->resp_buf = devm_ioremap(mdev, res.start, + mchan->resp_buf_size); +- if (IS_ERR(mchan->resp_buf)) { ++ if (!mchan->resp_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); +- ret = PTR_ERR(mchan->resp_buf); +- return ret; ++ return -ENOMEM; + } + } else if (ret != -ENODEV) { + dev_err(mdev, "Unmatched resource %s.\n", name); +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c +index 46a8b5a91c38..3c1109fceb2f 100644 +--- a/drivers/md/bcache/btree.c ++++ b/drivers/md/bcache/btree.c +@@ -1442,7 +1442,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + if (__set_blocks(n1, n1->keys + n2->keys, + block_bytes(b->c)) > + btree_blocks(new_nodes[i])) +- goto out_nocoalesce; ++ goto out_unlock_nocoalesce; + + keys = n2->keys; + /* Take the key of the node we're getting rid of */ +@@ -1471,7 +1471,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + + if (__bch_keylist_realloc(&keylist, + bkey_u64s(&new_nodes[i]->key))) +- goto out_nocoalesce; ++ goto out_unlock_nocoalesce; + + bch_btree_node_write(new_nodes[i], &cl); + bch_keylist_add(&keylist, &new_nodes[i]->key); +@@ -1517,6 +1517,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + /* Invalidated our iterator */ + return -EINTR; + ++out_unlock_nocoalesce: ++ for (i = 0; i < nodes; i++) ++ mutex_unlock(&new_nodes[i]->write_lock); ++ + out_nocoalesce: + closure_sync(&cl); + +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c +index 456d790c918c..f2de4c73cc8f 100644 +--- a/drivers/md/dm-mpath.c ++++ b/drivers/md/dm-mpath.c +@@ -1856,7 +1856,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, + int r; + + current_pgpath = READ_ONCE(m->current_pgpath); +- if (!current_pgpath) ++ if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) + current_pgpath = choose_pgpath(m, 0); + + if (current_pgpath) { +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c +index e0a6cf9239f1..e6b0039d07aa 100644 +--- a/drivers/md/dm-zoned-metadata.c ++++ b/drivers/md/dm-zoned-metadata.c +@@ -1589,7 +1589,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) + return dzone; + } + +- return ERR_PTR(-EBUSY); ++ return NULL; + } + + /* +@@ -1609,7 +1609,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) + return zone; + } + +- return ERR_PTR(-EBUSY); ++ return NULL; + } + + /* +diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c +index e7ace908a9b7..d50817320e8e 100644 +--- a/drivers/md/dm-zoned-reclaim.c ++++ b/drivers/md/dm-zoned-reclaim.c +@@ -349,8 +349,8 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) + + /* Get a data zone */ + dzone = dmz_get_zone_for_reclaim(zmd); +- if (IS_ERR(dzone)) +- return PTR_ERR(dzone); ++ if (!dzone) ++ return -EBUSY; + + start = jiffies; + +diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c +index 857991cb3cbb..711979afd90a 100644 +--- a/drivers/mfd/stmfx.c ++++ b/drivers/mfd/stmfx.c +@@ -287,14 +287,21 @@ static int stmfx_irq_init(struct i2c_client *client) + + ret = regmap_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, irqoutpin); + if (ret) +- return ret; ++ goto irq_exit; + + ret = devm_request_threaded_irq(stmfx->dev, client->irq, + NULL, stmfx_irq_handler, + irqtrigger | IRQF_ONESHOT, + "stmfx", stmfx); + if (ret) +- stmfx_irq_exit(client); ++ goto irq_exit; ++ ++ stmfx->irq = client->irq; ++ ++ return 0; ++ ++irq_exit: ++ stmfx_irq_exit(client); + + return ret; + } +@@ -481,6 +488,8 @@ static int stmfx_suspend(struct device *dev) + if (ret) + return ret; + ++ disable_irq(stmfx->irq); ++ + if (stmfx->vdd) + return regulator_disable(stmfx->vdd); + +@@ -501,6 +510,13 @@ static int stmfx_resume(struct device *dev) + } + } + ++ /* Reset STMFX - supply has been stopped during suspend */ ++ ret = stmfx_chip_reset(stmfx); ++ if (ret) { ++ dev_err(stmfx->dev, "Failed to reset chip: %d\n", ret); ++ return ret; ++ } ++ + ret = regmap_raw_write(stmfx->map, STMFX_REG_SYS_CTRL, + &stmfx->bkp_sysctrl, sizeof(stmfx->bkp_sysctrl)); + if (ret) +@@ -517,6 +533,8 @@ static int stmfx_resume(struct device *dev) + if (ret) + return ret; + ++ enable_irq(stmfx->irq); ++ + return 0; + } + #endif +diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c +index 1e9fe7d92597..737dede4a95c 100644 +--- a/drivers/mfd/wm8994-core.c ++++ b/drivers/mfd/wm8994-core.c +@@ -690,3 +690,4 @@ module_i2c_driver(wm8994_i2c_driver); + MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC"); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Mark Brown "); ++MODULE_SOFTDEP("pre: wm8994_regulator"); +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c +index 842f2210dc7e..3a5d2890fe2a 100644 +--- a/drivers/misc/fastrpc.c ++++ b/drivers/misc/fastrpc.c +@@ -886,6 +886,7 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, + struct fastrpc_channel_ctx *cctx; + struct fastrpc_user *fl = ctx->fl; + struct fastrpc_msg *msg = &ctx->msg; ++ int ret; + + cctx = fl->cctx; + msg->pid = fl->tgid; +@@ -901,7 +902,13 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, + msg->size = roundup(ctx->msg_sz, PAGE_SIZE); + fastrpc_context_get(ctx); + +- return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); ++ ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); ++ ++ if (ret) ++ fastrpc_context_put(ctx); ++ ++ return ret; ++ + } + + static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, +@@ -1434,8 +1441,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) + domains[domain_id]); + data->miscdev.fops = &fastrpc_fops; + err = misc_register(&data->miscdev); +- if (err) ++ if (err) { ++ kfree(data); + return err; ++ } + + kref_init(&data->refcount); + +diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h +index 75862be53c60..30addffd76f5 100644 +--- a/drivers/misc/habanalabs/habanalabs.h ++++ b/drivers/misc/habanalabs/habanalabs.h +@@ -23,7 +23,7 @@ + + #define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT) + +-#define HL_PENDING_RESET_PER_SEC 5 ++#define HL_PENDING_RESET_PER_SEC 30 + + #define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */ + +diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c +index 48ba7e02bed7..d4c14b617201 100644 +--- a/drivers/misc/xilinx_sdfec.c ++++ b/drivers/misc/xilinx_sdfec.c +@@ -602,10 +602,10 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, + const u32 depth) + { + u32 reg = 0; +- u32 res; +- u32 n, i; ++ int res, i, nr_pages; ++ u32 n; + u32 *addr = NULL; +- struct page *page[MAX_NUM_PAGES]; ++ struct page *pages[MAX_NUM_PAGES]; + + /* + * Writes that go beyond the length of +@@ -622,15 +622,22 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, + if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE) + n += 1; + +- res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page); +- if (res < n) { +- for (i = 0; i < res; i++) +- put_page(page[i]); ++ if (WARN_ON_ONCE(n > INT_MAX)) ++ return -EINVAL; ++ ++ nr_pages = n; ++ ++ res = get_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages); ++ if (res < nr_pages) { ++ if (res > 0) { ++ for (i = 0; i < res; i++) ++ put_page(pages[i]); ++ } + return -EINVAL; + } + +- for (i = 0; i < n; i++) { +- addr = kmap(page[i]); ++ for (i = 0; i < nr_pages; i++) { ++ addr = kmap(pages[i]); + do { + xsdfec_regwrite(xsdfec, + base_addr + ((offset + reg) * +@@ -639,7 +646,7 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, + reg++; + } while ((reg < len) && + ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)); +- put_page(page[i]); ++ put_page(pages[i]); + } + return reg; + } +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c +index a69c9b9878b7..636966e93517 100644 +--- a/drivers/net/dsa/lantiq_gswip.c ++++ b/drivers/net/dsa/lantiq_gswip.c +@@ -1451,7 +1451,8 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port, + + unsupported: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +- dev_err(ds->dev, "Unsupported interface: %d\n", state->interface); ++ dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", ++ phy_modes(state->interface), port); + return; + } + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 63ee0c49be7c..b5147bd6cba6 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -9992,7 +9992,7 @@ static void bnxt_timer(struct timer_list *t) + struct bnxt *bp = from_timer(bp, t, timer); + struct net_device *dev = bp->dev; + +- if (!netif_running(dev)) ++ if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) + return; + + if (atomic_read(&bp->intr_sem) != 0) +diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +index cdd7e5da4a74..d375e438d805 100644 +--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c ++++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +@@ -235,6 +235,11 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) + + /* Put it in the ring. */ + p->rx_ring[p->rx_next_fill] = re.d64; ++ /* Make sure there is no reorder of filling the ring and ringing ++ * the bell ++ */ ++ wmb(); ++ + dma_sync_single_for_device(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index 108297a099ed..1ec33c614474 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -6345,11 +6345,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +- u32 ctrl, ctrl_ext, rctl, status; +- /* Runtime suspend should only enable wakeup for link changes */ +- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; ++ u32 ctrl, ctrl_ext, rctl, status, wufc; + int retval = 0; + ++ /* Runtime suspend should only enable wakeup for link changes */ ++ if (runtime) ++ wufc = E1000_WUFC_LNKC; ++ else if (device_may_wakeup(&pdev->dev)) ++ wufc = adapter->wol; ++ else ++ wufc = 0; ++ + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; +@@ -6406,7 +6412,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) + if (adapter->hw.phy.type == e1000_phy_igp_3) { + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + } else if (hw->mac.type >= e1000_pch_lpt) { +- if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) ++ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + /* ULP does not support wake from unicast, multicast + * or broadcast. + */ +diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h +index bd1b1ed323f4..6b9117a350fa 100644 +--- a/drivers/net/ethernet/intel/iavf/iavf.h ++++ b/drivers/net/ethernet/intel/iavf/iavf.h +@@ -87,6 +87,10 @@ struct iavf_vsi { + #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) + #define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ + ++#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \ ++ (IAVF_MAX_VF_VSI * \ ++ sizeof(struct virtchnl_vsi_resource))) ++ + /* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +@@ -306,6 +310,14 @@ struct iavf_adapter { + bool netdev_registered; + bool link_up; + enum virtchnl_link_speed link_speed; ++ /* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set ++ * in vf_res->vf_cap_flags. Use ADV_LINK_SUPPORT macro to determine if ++ * this field is valid. This field should be used going forward and the ++ * enum virtchnl_link_speed above should be considered the legacy way of ++ * storing/communicating link speeds. ++ */ ++ u32 link_speed_mbps; ++ + enum virtchnl_ops current_op; + #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ + (_a)->vf_res->vf_cap_flags & \ +@@ -322,6 +334,8 @@ struct iavf_adapter { + VIRTCHNL_VF_OFFLOAD_RSS_PF))) + #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_VLAN) ++#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ ++ VIRTCHNL_VF_CAP_ADV_LINK_SPEED) + struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ + struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + struct virtchnl_version_info pf_version; +diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +index dad3eec8ccd8..758bef02a2a8 100644 +--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c ++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +@@ -278,7 +278,18 @@ static int iavf_get_link_ksettings(struct net_device *netdev, + ethtool_link_ksettings_zero_link_mode(cmd, supported); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = PORT_NONE; +- /* Set speed and duplex */ ++ cmd->base.duplex = DUPLEX_FULL; ++ ++ if (ADV_LINK_SUPPORT(adapter)) { ++ if (adapter->link_speed_mbps && ++ adapter->link_speed_mbps < U32_MAX) ++ cmd->base.speed = adapter->link_speed_mbps; ++ else ++ cmd->base.speed = SPEED_UNKNOWN; ++ ++ return 0; ++ } ++ + switch (adapter->link_speed) { + case IAVF_LINK_SPEED_40GB: + cmd->base.speed = SPEED_40000; +@@ -306,7 +317,6 @@ static int iavf_get_link_ksettings(struct net_device *netdev, + default: + break; + } +- cmd->base.duplex = DUPLEX_FULL; + + return 0; + } +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c +index 8e16be960e96..bacc5fb7eba2 100644 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c +@@ -1756,17 +1756,17 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct iavf_hw *hw = &adapter->hw; +- int err = 0, bufsz; ++ int err; + + WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); + /* aq msg sent, awaiting reply */ + if (!adapter->vf_res) { +- bufsz = sizeof(struct virtchnl_vf_resource) + +- (IAVF_MAX_VF_VSI * +- sizeof(struct virtchnl_vsi_resource)); +- adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); +- if (!adapter->vf_res) ++ adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, ++ GFP_KERNEL); ++ if (!adapter->vf_res) { ++ err = -ENOMEM; + goto err; ++ } + } + err = iavf_get_vf_config(adapter); + if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { +@@ -2036,7 +2036,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) + iavf_reset_interrupt_capability(adapter); + iavf_free_queues(adapter); + iavf_free_q_vectors(adapter); +- kfree(adapter->vf_res); ++ memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); + iavf_shutdown_adminq(&adapter->hw); + adapter->netdev->flags &= ~IFF_UP; + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); +@@ -2487,6 +2487,16 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, + { + int speed = 0, ret = 0; + ++ if (ADV_LINK_SUPPORT(adapter)) { ++ if (adapter->link_speed_mbps < U32_MAX) { ++ speed = adapter->link_speed_mbps; ++ goto validate_bw; ++ } else { ++ dev_err(&adapter->pdev->dev, "Unknown link speed\n"); ++ return -EINVAL; ++ } ++ } ++ + switch (adapter->link_speed) { + case IAVF_LINK_SPEED_40GB: + speed = 40000; +@@ -2510,6 +2520,7 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, + break; + } + ++validate_bw: + if (max_tx_rate > speed) { + dev_err(&adapter->pdev->dev, + "Invalid tx rate specified\n"); +diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +index 1ab9cb339acb..9655318803b7 100644 +--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c ++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +@@ -139,7 +139,8 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) + VIRTCHNL_VF_OFFLOAD_ENCAP | + VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | +- VIRTCHNL_VF_OFFLOAD_ADQ; ++ VIRTCHNL_VF_OFFLOAD_ADQ | ++ VIRTCHNL_VF_CAP_ADV_LINK_SPEED; + + adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; +@@ -918,6 +919,8 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); + } + ++#define IAVF_MAX_SPEED_STRLEN 13 ++ + /** + * iavf_print_link_message - print link up or down + * @adapter: adapter structure +@@ -927,37 +930,99 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) + static void iavf_print_link_message(struct iavf_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; +- char *speed = "Unknown "; ++ int link_speed_mbps; ++ char *speed; + + if (!adapter->link_up) { + netdev_info(netdev, "NIC Link is Down\n"); + return; + } + ++ speed = kcalloc(1, IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); ++ if (!speed) ++ return; ++ ++ if (ADV_LINK_SUPPORT(adapter)) { ++ link_speed_mbps = adapter->link_speed_mbps; ++ goto print_link_msg; ++ } ++ + switch (adapter->link_speed) { + case IAVF_LINK_SPEED_40GB: +- speed = "40 G"; ++ link_speed_mbps = SPEED_40000; + break; + case IAVF_LINK_SPEED_25GB: +- speed = "25 G"; ++ link_speed_mbps = SPEED_25000; + break; + case IAVF_LINK_SPEED_20GB: +- speed = "20 G"; ++ link_speed_mbps = SPEED_20000; + break; + case IAVF_LINK_SPEED_10GB: +- speed = "10 G"; ++ link_speed_mbps = SPEED_10000; + break; + case IAVF_LINK_SPEED_1GB: +- speed = "1000 M"; ++ link_speed_mbps = SPEED_1000; + break; + case IAVF_LINK_SPEED_100MB: +- speed = "100 M"; ++ link_speed_mbps = SPEED_100; + break; + default: ++ link_speed_mbps = SPEED_UNKNOWN; + break; + } + +- netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed); ++print_link_msg: ++ if (link_speed_mbps > SPEED_1000) { ++ if (link_speed_mbps == SPEED_2500) ++ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); ++ else ++ /* convert to Gbps inline */ ++ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", ++ link_speed_mbps / 1000, "Gbps"); ++ } else if (link_speed_mbps == SPEED_UNKNOWN) { ++ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); ++ } else { ++ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s", ++ link_speed_mbps, "Mbps"); ++ } ++ ++ netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); ++ kfree(speed); ++} ++ ++/** ++ * iavf_get_vpe_link_status ++ * @adapter: adapter structure ++ * @vpe: virtchnl_pf_event structure ++ * ++ * Helper function for determining the link status ++ **/ ++static bool ++iavf_get_vpe_link_status(struct iavf_adapter *adapter, ++ struct virtchnl_pf_event *vpe) ++{ ++ if (ADV_LINK_SUPPORT(adapter)) ++ return vpe->event_data.link_event_adv.link_status; ++ else ++ return vpe->event_data.link_event.link_status; ++} ++ ++/** ++ * iavf_set_adapter_link_speed_from_vpe ++ * @adapter: adapter structure for which we are setting the link speed ++ * @vpe: virtchnl_pf_event structure that contains the link speed we are setting ++ * ++ * Helper function for setting iavf_adapter link speed ++ **/ ++static void ++iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, ++ struct virtchnl_pf_event *vpe) ++{ ++ if (ADV_LINK_SUPPORT(adapter)) ++ adapter->link_speed_mbps = ++ vpe->event_data.link_event_adv.link_speed; ++ else ++ adapter->link_speed = vpe->event_data.link_event.link_speed; + } + + /** +@@ -1187,12 +1252,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, + if (v_opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)msg; +- bool link_up = vpe->event_data.link_event.link_status; ++ bool link_up = iavf_get_vpe_link_status(adapter, vpe); + + switch (vpe->event) { + case VIRTCHNL_EVENT_LINK_CHANGE: +- adapter->link_speed = +- vpe->event_data.link_event.link_speed; ++ iavf_set_adapter_link_speed_from_vpe(adapter, vpe); + + /* we've already got the right link status, bail */ + if (adapter->link_up == link_up) +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +index 373b8c832850..cf5d447af7db 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +@@ -5925,8 +5925,8 @@ static int mvpp2_remove(struct platform_device *pdev) + { + struct mvpp2 *priv = platform_get_drvdata(pdev); + struct fwnode_handle *fwnode = pdev->dev.fwnode; ++ int i = 0, poolnum = MVPP2_BM_POOLS_NUM; + struct fwnode_handle *port_fwnode; +- int i = 0; + + mvpp2_dbgfs_cleanup(priv); + +@@ -5940,7 +5940,10 @@ static int mvpp2_remove(struct platform_device *pdev) + + destroy_workqueue(priv->stats_queue); + +- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { ++ if (priv->percpu_pools) ++ poolnum = mvpp2_get_nrxqs(priv) * 2; ++ ++ for (i = 0; i < poolnum; i++) { + struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; + + mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +index 7c77378accf0..f012aac83b10 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +@@ -181,7 +181,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, + in, pas)); + + err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen); +- kfree(in); ++ kvfree(in); + + if (err) { + mlx5_core_warn(mdev, " Can't create QP\n"); +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c +index aa101f72d405..cac75c7d1d01 100644 +--- a/drivers/net/geneve.c ++++ b/drivers/net/geneve.c +@@ -987,9 +987,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) + if (geneve->collect_md) { + info = skb_tunnel_info(skb); + if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { +- err = -EINVAL; + netdev_dbg(dev, "no tunnel metadata\n"); +- goto tx_error; ++ dev_kfree_skb(skb); ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; + } + } else { + info = &geneve->info; +@@ -1006,7 +1007,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) + + if (likely(!err)) + return NETDEV_TX_OK; +-tx_error: ++ + dev_kfree_skb(skb); + + if (err == -ELOOP) +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c +index 71cdef9fb56b..5ab53e9942f3 100644 +--- a/drivers/net/hamradio/yam.c ++++ b/drivers/net/hamradio/yam.c +@@ -1133,6 +1133,7 @@ static int __init yam_init_driver(void) + err = register_netdev(dev); + if (err) { + printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name); ++ free_netdev(dev); + goto error; + } + yam_devs[i] = dev; +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c +index a7796134e3be..91cf1d167263 100644 +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -358,7 +358,7 @@ static int m88e1101_config_aneg(struct phy_device *phydev) + return marvell_config_aneg(phydev); + } + +-#ifdef CONFIG_OF_MDIO ++#if IS_ENABLED(CONFIG_OF_MDIO) + /* Set and/or override some configuration registers based on the + * marvell,reg-init property stored in the of_node for the phydev. + * +diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c +index 2581ab724c34..f8f75a504a58 100644 +--- a/drivers/ntb/core.c ++++ b/drivers/ntb/core.c +@@ -214,10 +214,8 @@ int ntb_default_port_number(struct ntb_dev *ntb) + case NTB_TOPO_B2B_DSD: + return NTB_PORT_SEC_DSD; + default: +- break; ++ return 0; + } +- +- return -EINVAL; + } + EXPORT_SYMBOL(ntb_default_port_number); + +@@ -240,10 +238,8 @@ int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx) + case NTB_TOPO_B2B_DSD: + return NTB_PORT_PRI_USD; + default: +- break; ++ return 0; + } +- +- return -EINVAL; + } + EXPORT_SYMBOL(ntb_default_peer_port_number); + +@@ -315,4 +311,3 @@ static void __exit ntb_driver_exit(void) + bus_unregister(&ntb_bus); + } + module_exit(ntb_driver_exit); +- +diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c +index e9b7c2dfc730..5ce4766a6c9e 100644 +--- a/drivers/ntb/test/ntb_perf.c ++++ b/drivers/ntb/test/ntb_perf.c +@@ -158,6 +158,8 @@ struct perf_peer { + /* NTB connection setup service */ + struct work_struct service; + unsigned long sts; ++ ++ struct completion init_comp; + }; + #define to_peer_service(__work) \ + container_of(__work, struct perf_peer, service) +@@ -546,6 +548,7 @@ static int perf_setup_outbuf(struct perf_peer *peer) + + /* Initialization is finally done */ + set_bit(PERF_STS_DONE, &peer->sts); ++ complete_all(&peer->init_comp); + + return 0; + } +@@ -556,7 +559,7 @@ static void perf_free_inbuf(struct perf_peer *peer) + return; + + (void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx); +- dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size, ++ dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size, + peer->inbuf, peer->inbuf_xlat); + peer->inbuf = NULL; + } +@@ -585,8 +588,9 @@ static int perf_setup_inbuf(struct perf_peer *peer) + + perf_free_inbuf(peer); + +- peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size, +- &peer->inbuf_xlat, GFP_KERNEL); ++ peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev, ++ peer->inbuf_size, &peer->inbuf_xlat, ++ GFP_KERNEL); + if (!peer->inbuf) { + dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n", + &peer->inbuf_size); +@@ -636,6 +640,7 @@ static void perf_service_work(struct work_struct *work) + perf_setup_outbuf(peer); + + if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) { ++ init_completion(&peer->init_comp); + clear_bit(PERF_STS_DONE, &peer->sts); + if (test_bit(0, &peer->perf->busy_flag) && + peer == peer->perf->test_peer) { +@@ -652,7 +657,7 @@ static int perf_init_service(struct perf_ctx *perf) + { + u64 mask; + +- if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) { ++ if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) { + dev_err(&perf->ntb->dev, "Not enough memory windows\n"); + return -EINVAL; + } +@@ -1051,8 +1056,9 @@ static int perf_submit_test(struct perf_peer *peer) + struct perf_thread *pthr; + int tidx, ret; + +- if (!test_bit(PERF_STS_DONE, &peer->sts)) +- return -ENOLINK; ++ ret = wait_for_completion_interruptible(&peer->init_comp); ++ if (ret < 0) ++ return ret; + + if (test_and_set_bit_lock(0, &perf->busy_flag)) + return -EBUSY; +@@ -1418,10 +1424,21 @@ static int perf_init_peers(struct perf_ctx *perf) + peer->gidx = pidx; + } + INIT_WORK(&peer->service, perf_service_work); ++ init_completion(&peer->init_comp); + } + if (perf->gidx == -1) + perf->gidx = pidx; + ++ /* ++ * Hardware with only two ports may not have unique port ++ * numbers. In this case, the gidxs should all be zero. ++ */ ++ if (perf->pcnt == 1 && ntb_port_number(perf->ntb) == 0 && ++ ntb_peer_port_number(perf->ntb, 0) == 0) { ++ perf->gidx = 0; ++ perf->peers[0].gidx = 0; ++ } ++ + for (pidx = 0; pidx < perf->pcnt; pidx++) { + ret = perf_setup_peer_mw(&perf->peers[pidx]); + if (ret) +@@ -1517,4 +1534,3 @@ static void __exit perf_exit(void) + destroy_workqueue(perf_wq); + } + module_exit(perf_exit); +- +diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c +index 65865e460ab8..18d00eec7b02 100644 +--- a/drivers/ntb/test/ntb_pingpong.c ++++ b/drivers/ntb/test/ntb_pingpong.c +@@ -121,15 +121,14 @@ static int pp_find_next_peer(struct pp_ctx *pp) + link = ntb_link_is_up(pp->ntb, NULL, NULL); + + /* Find next available peer */ +- if (link & pp->nmask) { ++ if (link & pp->nmask) + pidx = __ffs64(link & pp->nmask); +- out_db = BIT_ULL(pidx + 1); +- } else if (link & pp->pmask) { ++ else if (link & pp->pmask) + pidx = __ffs64(link & pp->pmask); +- out_db = BIT_ULL(pidx); +- } else { ++ else + return -ENODEV; +- } ++ ++ out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx)); + + spin_lock(&pp->lock); + pp->out_pidx = pidx; +@@ -303,7 +302,7 @@ static void pp_init_flds(struct pp_ctx *pp) + break; + } + +- pp->in_db = BIT_ULL(pidx); ++ pp->in_db = BIT_ULL(lport); + pp->pmask = GENMASK_ULL(pidx, 0) >> 1; + pp->nmask = GENMASK_ULL(pcnt - 1, pidx); + +@@ -435,4 +434,3 @@ static void __exit pp_exit(void) + debugfs_remove_recursive(pp_dbgfs_topdir); + } + module_exit(pp_exit); +- +diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c +index d592c0ffbd19..311d6ab8d016 100644 +--- a/drivers/ntb/test/ntb_tool.c ++++ b/drivers/ntb/test/ntb_tool.c +@@ -504,7 +504,7 @@ static ssize_t tool_peer_link_read(struct file *filep, char __user *ubuf, + buf[1] = '\n'; + buf[2] = '\0'; + +- return simple_read_from_buffer(ubuf, size, offp, buf, 3); ++ return simple_read_from_buffer(ubuf, size, offp, buf, 2); + } + + static TOOL_FOPS_RDWR(tool_peer_link_fops, +@@ -590,7 +590,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx, + inmw->size = min_t(resource_size_t, req_size, size); + inmw->size = round_up(inmw->size, addr_align); + inmw->size = round_up(inmw->size, size_align); +- inmw->mm_base = dma_alloc_coherent(&tc->ntb->dev, inmw->size, ++ inmw->mm_base = dma_alloc_coherent(&tc->ntb->pdev->dev, inmw->size, + &inmw->dma_base, GFP_KERNEL); + if (!inmw->mm_base) + return -ENOMEM; +@@ -612,7 +612,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx, + return 0; + + err_free_dma: +- dma_free_coherent(&tc->ntb->dev, inmw->size, inmw->mm_base, ++ dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, inmw->mm_base, + inmw->dma_base); + inmw->mm_base = NULL; + inmw->dma_base = 0; +@@ -629,7 +629,7 @@ static void tool_free_mw(struct tool_ctx *tc, int pidx, int widx) + + if (inmw->mm_base != NULL) { + ntb_mw_clear_trans(tc->ntb, pidx, widx); +- dma_free_coherent(&tc->ntb->dev, inmw->size, ++ dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, + inmw->mm_base, inmw->dma_base); + } + +@@ -1690,4 +1690,3 @@ static void __exit tool_exit(void) + debugfs_remove_recursive(tool_dbgfs_topdir); + } + module_exit(tool_exit); +- +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 1c2129493508..a13cae190196 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -2971,9 +2971,15 @@ static int nvme_suspend(struct device *dev) + * the PCI bus layer to put it into D3 in order to take the PCIe link + * down, so as to allow the platform to achieve its minimum low-power + * state (which may not be possible if the link is up). ++ * ++ * If a host memory buffer is enabled, shut down the device as the NVMe ++ * specification allows the device to access the host memory buffer in ++ * host DRAM from all power states, but hosts will fail access to DRAM ++ * during S3. + */ + if (pm_suspend_via_firmware() || !ctrl->npss || + !pcie_aspm_enabled(pdev) || ++ ndev->nr_host_mem_descs || + (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) + return nvme_disable_prepare_reset(ndev, true); + +diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c +index c72eef988041..a32e60b024b8 100644 +--- a/drivers/of/kobj.c ++++ b/drivers/of/kobj.c +@@ -134,8 +134,6 @@ int __of_attach_node_sysfs(struct device_node *np) + if (!name) + return -ENOMEM; + +- of_node_get(np); +- + rc = kobject_add(&np->kobj, parent, "%s", name); + kfree(name); + if (rc) +@@ -144,6 +142,7 @@ int __of_attach_node_sysfs(struct device_node *np) + for_each_property_of_node(np, pp) + __of_add_property_sysfs(np, pp); + ++ of_node_get(np); + return 0; + } + +diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c +index b927a92e3463..8c9f88704874 100644 +--- a/drivers/pci/controller/dwc/pci-meson.c ++++ b/drivers/pci/controller/dwc/pci-meson.c +@@ -301,11 +301,11 @@ static void meson_pcie_init_dw(struct meson_pcie *mp) + meson_cfg_writel(mp, val, PCIE_CFG0); + + val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); +- val &= ~LINK_CAPABLE_MASK; ++ val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE); + meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); + + val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); +- val |= LINK_CAPABLE_X1 | FAST_LINK_MODE; ++ val |= LINK_CAPABLE_X1; + meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); + + val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF); +diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c +index 8615f1548882..fbcb211cceb4 100644 +--- a/drivers/pci/controller/dwc/pcie-designware-host.c ++++ b/drivers/pci/controller/dwc/pcie-designware-host.c +@@ -263,6 +263,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp) + return -ENOMEM; + } + ++ irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); ++ + pp->msi_domain = pci_msi_create_irq_domain(fwnode, + &dw_pcie_msi_domain_info, + pp->irq_domain); +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c +index 97245e076548..f2481e80e272 100644 +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -344,10 +344,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) + + advk_pcie_wait_for_link(pcie); + +- reg = PCIE_CORE_LINK_L0S_ENTRY | +- (1 << PCIE_CORE_LINK_WIDTH_SHIFT); +- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); +- + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); + reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | + PCIE_CORE_CMD_IO_ACCESS_EN | +diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c +index d219404bad92..9a86bb7448ac 100644 +--- a/drivers/pci/controller/pci-v3-semi.c ++++ b/drivers/pci/controller/pci-v3-semi.c +@@ -743,7 +743,7 @@ static int v3_pci_probe(struct platform_device *pdev) + int ret; + LIST_HEAD(res); + +- host = pci_alloc_host_bridge(sizeof(*v3)); ++ host = devm_pci_alloc_host_bridge(dev, sizeof(*v3)); + if (!host) + return -ENOMEM; + +diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c +index 1ad0b56f11b4..04114352d0e7 100644 +--- a/drivers/pci/controller/pcie-rcar.c ++++ b/drivers/pci/controller/pcie-rcar.c +@@ -335,11 +335,12 @@ static struct pci_ops rcar_pcie_ops = { + }; + + static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, +- struct resource *res) ++ struct resource_entry *window) + { + /* Setup PCIe address space mappings for each resource */ + resource_size_t size; + resource_size_t res_start; ++ struct resource *res = window->res; + u32 mask; + + rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); +@@ -353,9 +354,9 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, + rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); + + if (res->flags & IORESOURCE_IO) +- res_start = pci_pio_to_address(res->start); ++ res_start = pci_pio_to_address(res->start) - window->offset; + else +- res_start = res->start; ++ res_start = res->start - window->offset; + + rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); + rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, +@@ -384,7 +385,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) + switch (resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: +- rcar_pcie_setup_window(i, pci, res); ++ rcar_pcie_setup_window(i, pci, win); + i++; + break; + case IORESOURCE_BUS: +diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c +index afc1a3d240b5..87348ecfe3fc 100644 +--- a/drivers/pci/controller/vmd.c ++++ b/drivers/pci/controller/vmd.c +@@ -593,9 +593,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) + if (!membar2) + return -ENOMEM; + offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - +- readq(membar2 + MB2_SHADOW_OFFSET); ++ (readq(membar2 + MB2_SHADOW_OFFSET) & ++ PCI_BASE_ADDRESS_MEM_MASK); + offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - +- readq(membar2 + MB2_SHADOW_OFFSET + 8); ++ (readq(membar2 + MB2_SHADOW_OFFSET + 8) & ++ PCI_BASE_ADDRESS_MEM_MASK); + pci_iounmap(vmd->dev, membar2); + } + } +diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c +index 5fd90105510d..d3b6b9a05618 100644 +--- a/drivers/pci/pci-bridge-emul.c ++++ b/drivers/pci/pci-bridge-emul.c +@@ -195,8 +195,8 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { + * RO, the rest is reserved + */ + .w1c = GENMASK(19, 16), +- .ro = GENMASK(20, 19), +- .rsvd = GENMASK(31, 21), ++ .ro = GENMASK(21, 20), ++ .rsvd = GENMASK(31, 22), + }, + + [PCI_EXP_LNKCAP / 4] = { +@@ -236,7 +236,7 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { + PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16, + .ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS | + PCI_EXP_SLTSTA_EIS) << 16, +- .rsvd = GENMASK(15, 12) | (GENMASK(15, 9) << 16), ++ .rsvd = GENMASK(15, 13) | (GENMASK(15, 9) << 16), + }, + + [PCI_EXP_RTCTL / 4] = { +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index c73e8095a849..689f0280c038 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -4608,7 +4608,8 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) + * pcie_wait_for_link_delay - Wait until link is active or inactive + * @pdev: Bridge device + * @active: waiting for active or inactive? +- * @delay: Delay to wait after link has become active (in ms) ++ * @delay: Delay to wait after link has become active (in ms). Specify %0 ++ * for no delay. + * + * Use this to wait till link becomes active or inactive. + */ +@@ -4649,7 +4650,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, + msleep(10); + timeout -= 10; + } +- if (active && ret) ++ if (active && ret && delay) + msleep(delay); + else if (ret != active) + pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", +@@ -4770,17 +4771,28 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev) + if (!pcie_downstream_port(dev)) + return; + +- if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { +- pci_dbg(dev, "waiting %d ms for downstream link\n", delay); +- msleep(delay); +- } else { +- pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", +- delay); +- if (!pcie_wait_for_link_delay(dev, true, delay)) { ++ /* ++ * Per PCIe r5.0, sec 6.6.1, for downstream ports that support ++ * speeds > 5 GT/s, we must wait for link training to complete ++ * before the mandatory delay. ++ * ++ * We can only tell when link training completes via DLL Link ++ * Active, which is required for downstream ports that support ++ * speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common ++ * devices do not implement Link Active reporting even when it's ++ * required, so we'll check for that directly instead of checking ++ * the supported link speed. We assume devices without Link Active ++ * reporting can train in 100 ms regardless of speed. ++ */ ++ if (dev->link_active_reporting) { ++ pci_dbg(dev, "waiting for link to train\n"); ++ if (!pcie_wait_for_link_delay(dev, true, 0)) { + /* Did not train, no need to wait any further */ + return; + } + } ++ pci_dbg(child, "waiting %d ms to become accessible\n", delay); ++ msleep(delay); + + if (!pci_device_is_present(child)) { + pci_dbg(child, "waiting additional %d ms to become accessible\n", delay); +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index 5a1bbf2cb7e9..4a0ec34062d6 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -628,16 +628,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) + + /* Setup initial capable state. Will be updated later */ + link->aspm_capable = link->aspm_support; +- /* +- * If the downstream component has pci bridge function, don't +- * do ASPM for now. +- */ +- list_for_each_entry(child, &linkbus->devices, bus_list) { +- if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) { +- link->aspm_disable = ASPM_STATE_ALL; +- break; +- } +- } + + /* Get and check endpoint acceptable latencies */ + list_for_each_entry(child, &linkbus->devices, bus_list) { +diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c +index 9361f3aa26ab..357a454cafa0 100644 +--- a/drivers/pci/pcie/ptm.c ++++ b/drivers/pci/pcie/ptm.c +@@ -39,10 +39,6 @@ void pci_ptm_init(struct pci_dev *dev) + if (!pci_is_pcie(dev)) + return; + +- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); +- if (!pos) +- return; +- + /* + * Enable PTM only on interior devices (root ports, switch ports, + * etc.) on the assumption that it causes no link traffic until an +@@ -52,6 +48,23 @@ void pci_ptm_init(struct pci_dev *dev) + pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)) + return; + ++ /* ++ * Switch Downstream Ports are not permitted to have a PTM ++ * capability; their PTM behavior is controlled by the Upstream ++ * Port (PCIe r5.0, sec 7.9.16). ++ */ ++ ups = pci_upstream_bridge(dev); ++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM && ++ ups && ups->ptm_enabled) { ++ dev->ptm_granularity = ups->ptm_granularity; ++ dev->ptm_enabled = 1; ++ return; ++ } ++ ++ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); ++ if (!pos) ++ return; ++ + pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap); + local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; + +@@ -61,7 +74,6 @@ void pci_ptm_init(struct pci_dev *dev) + * the spec recommendation (PCIe r3.1, sec 7.32.3), select the + * furthest upstream Time Source as the PTM Root. + */ +- ups = pci_upstream_bridge(dev); + if (ups && ups->ptm_enabled) { + ctrl = PCI_PTM_CTRL_ENABLE; + if (ups->ptm_granularity == 0) +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 83d909abc61d..8fa13486f2f1 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -867,9 +867,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) + goto free; + + err = device_register(&bridge->dev); +- if (err) ++ if (err) { + put_device(&bridge->dev); +- ++ goto free; ++ } + bus->bridge = get_device(&bridge->dev); + device_enable_async_suspend(bus->bridge); + pci_set_bus_of_node(bus); +diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c +index d8ca40a97693..d21fa04fa44d 100644 +--- a/drivers/pci/setup-res.c ++++ b/drivers/pci/setup-res.c +@@ -439,10 +439,11 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) + res->end = res->start + pci_rebar_size_to_bytes(size) - 1; + + /* Check if the new config works by trying to assign everything. */ +- ret = pci_reassign_bridge_resources(dev->bus->self, res->flags); +- if (ret) +- goto error_resize; +- ++ if (dev->bus->self) { ++ ret = pci_reassign_bridge_resources(dev->bus->self, res->flags); ++ if (ret) ++ goto error_resize; ++ } + return 0; + + error_resize: +diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +index 078b8dc57250..c5b0950c2a7a 100644 +--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c ++++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +@@ -35,7 +35,7 @@ + /* L3C has 8-counters */ + #define L3C_NR_COUNTERS 0x8 + +-#define L3C_PERF_CTRL_EN 0x20000 ++#define L3C_PERF_CTRL_EN 0x10000 + #define L3C_EVTYPE_NONE 0xff + + /* +diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c +index fe6c58910e4c..7c7862b4f41f 100644 +--- a/drivers/phy/broadcom/phy-bcm-sr-usb.c ++++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c +@@ -16,8 +16,6 @@ enum bcm_usb_phy_version { + }; + + enum bcm_usb_phy_reg { +- PLL_NDIV_FRAC, +- PLL_NDIV_INT, + PLL_CTRL, + PHY_CTRL, + PHY_PLL_CTRL, +@@ -31,18 +29,11 @@ static const u8 bcm_usb_combo_phy_ss[] = { + }; + + static const u8 bcm_usb_combo_phy_hs[] = { +- [PLL_NDIV_FRAC] = 0x04, +- [PLL_NDIV_INT] = 0x08, + [PLL_CTRL] = 0x0c, + [PHY_CTRL] = 0x10, + }; + +-#define HSPLL_NDIV_INT_VAL 0x13 +-#define HSPLL_NDIV_FRAC_VAL 0x1005 +- + static const u8 bcm_usb_hs_phy[] = { +- [PLL_NDIV_FRAC] = 0x0, +- [PLL_NDIV_INT] = 0x4, + [PLL_CTRL] = 0x8, + [PHY_CTRL] = 0xc, + }; +@@ -52,7 +43,6 @@ enum pll_ctrl_bits { + SSPLL_SUSPEND_EN, + PLL_SEQ_START, + PLL_LOCK, +- PLL_PDIV, + }; + + static const u8 u3pll_ctrl[] = { +@@ -66,29 +56,17 @@ static const u8 u3pll_ctrl[] = { + #define HSPLL_PDIV_VAL 0x1 + + static const u8 u2pll_ctrl[] = { +- [PLL_PDIV] = 1, + [PLL_RESETB] = 5, + [PLL_LOCK] = 6, + }; + + enum bcm_usb_phy_ctrl_bits { + CORERDY, +- AFE_LDO_PWRDWNB, +- AFE_PLL_PWRDWNB, +- AFE_BG_PWRDWNB, +- PHY_ISO, + PHY_RESETB, + PHY_PCTL, + }; + + #define PHY_PCTL_MASK 0xffff +-/* +- * 0x0806 of PCTL_VAL has below bits set +- * BIT-8 : refclk divider 1 +- * BIT-3:2: device mode; mode is not effect +- * BIT-1: soft reset active low +- */ +-#define HSPHY_PCTL_VAL 0x0806 + #define SSPHY_PCTL_VAL 0x0006 + + static const u8 u3phy_ctrl[] = { +@@ -98,10 +76,6 @@ static const u8 u3phy_ctrl[] = { + + static const u8 u2phy_ctrl[] = { + [CORERDY] = 0, +- [AFE_LDO_PWRDWNB] = 1, +- [AFE_PLL_PWRDWNB] = 2, +- [AFE_BG_PWRDWNB] = 3, +- [PHY_ISO] = 4, + [PHY_RESETB] = 5, + [PHY_PCTL] = 6, + }; +@@ -186,38 +160,13 @@ static int bcm_usb_hs_phy_init(struct bcm_usb_phy_cfg *phy_cfg) + int ret = 0; + void __iomem *regs = phy_cfg->regs; + const u8 *offset; +- u32 rd_data; + + offset = phy_cfg->offset; + +- writel(HSPLL_NDIV_INT_VAL, regs + offset[PLL_NDIV_INT]); +- writel(HSPLL_NDIV_FRAC_VAL, regs + offset[PLL_NDIV_FRAC]); +- +- rd_data = readl(regs + offset[PLL_CTRL]); +- rd_data &= ~(HSPLL_PDIV_MASK << u2pll_ctrl[PLL_PDIV]); +- rd_data |= (HSPLL_PDIV_VAL << u2pll_ctrl[PLL_PDIV]); +- writel(rd_data, regs + offset[PLL_CTRL]); +- +- /* Set Core Ready high */ +- bcm_usb_reg32_setbits(regs + offset[PHY_CTRL], +- BIT(u2phy_ctrl[CORERDY])); +- +- /* Maximum timeout for Core Ready done */ +- msleep(30); +- ++ bcm_usb_reg32_clrbits(regs + offset[PLL_CTRL], ++ BIT(u2pll_ctrl[PLL_RESETB])); + bcm_usb_reg32_setbits(regs + offset[PLL_CTRL], + BIT(u2pll_ctrl[PLL_RESETB])); +- bcm_usb_reg32_setbits(regs + offset[PHY_CTRL], +- BIT(u2phy_ctrl[PHY_RESETB])); +- +- +- rd_data = readl(regs + offset[PHY_CTRL]); +- rd_data &= ~(PHY_PCTL_MASK << u2phy_ctrl[PHY_PCTL]); +- rd_data |= (HSPHY_PCTL_VAL << u2phy_ctrl[PHY_PCTL]); +- writel(rd_data, regs + offset[PHY_CTRL]); +- +- /* Maximum timeout for PLL reset done */ +- msleep(30); + + ret = bcm_usb_pll_lock_check(regs + offset[PLL_CTRL], + BIT(u2pll_ctrl[PLL_LOCK])); +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c +index 9f42036c5fbb..1f81569c7ae3 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c +@@ -774,16 +774,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev, + return 0; + } + +-/* +- * imx_free_resources() - free memory used by this driver +- * @info: info driver instance +- */ +-static void imx_free_resources(struct imx_pinctrl *ipctl) +-{ +- if (ipctl->pctl) +- pinctrl_unregister(ipctl->pctl); +-} +- + int imx_pinctrl_probe(struct platform_device *pdev, + const struct imx_pinctrl_soc_info *info) + { +@@ -874,23 +864,18 @@ int imx_pinctrl_probe(struct platform_device *pdev, + &ipctl->pctl); + if (ret) { + dev_err(&pdev->dev, "could not register IMX pinctrl driver\n"); +- goto free; ++ return ret; + } + + ret = imx_pinctrl_probe_dt(pdev, ipctl); + if (ret) { + dev_err(&pdev->dev, "fail to probe dt properties\n"); +- goto free; ++ return ret; + } + + dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); + + return pinctrl_enable(ipctl->pctl); +- +-free: +- imx_free_resources(ipctl); +- +- return ret; + } + + static int __maybe_unused imx_pinctrl_suspend(struct device *dev) +diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +index 7e29e3fecdb2..5bb183c0ce31 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +@@ -638,7 +638,6 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev, + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + if (ret) { +- pinctrl_unregister(ipctl->pctl); + dev_err(&pdev->dev, "Failed to populate subdevices\n"); + return ret; + } +diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c +index fb76fb2e9ea5..0a951a75c82b 100644 +--- a/drivers/pinctrl/pinctrl-ocelot.c ++++ b/drivers/pinctrl/pinctrl-ocelot.c +@@ -711,11 +711,12 @@ static void ocelot_irq_handler(struct irq_desc *desc) + struct irq_chip *parent_chip = irq_desc_get_chip(desc); + struct gpio_chip *chip = irq_desc_get_handler_data(desc); + struct ocelot_pinctrl *info = gpiochip_get_data(chip); ++ unsigned int id_reg = OCELOT_GPIO_INTR_IDENT * info->stride; + unsigned int reg = 0, irq, i; + unsigned long irqs; + + for (i = 0; i < info->stride; i++) { +- regmap_read(info->map, OCELOT_GPIO_INTR_IDENT + 4 * i, ®); ++ regmap_read(info->map, id_reg + 4 * i, ®); + if (!reg) + continue; + +diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c +index dc0bbf198cbc..1bd8840e11a6 100644 +--- a/drivers/pinctrl/pinctrl-rockchip.c ++++ b/drivers/pinctrl/pinctrl-rockchip.c +@@ -506,8 +506,8 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev, + } + + map_num += grp->npins; +- new_map = devm_kcalloc(pctldev->dev, map_num, sizeof(*new_map), +- GFP_KERNEL); ++ ++ new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL); + if (!new_map) + return -ENOMEM; + +@@ -517,7 +517,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev, + /* create mux map */ + parent = of_get_parent(np); + if (!parent) { +- devm_kfree(pctldev->dev, new_map); ++ kfree(new_map); + return -EINVAL; + } + new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; +@@ -544,6 +544,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev, + static void rockchip_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, unsigned num_maps) + { ++ kfree(map); + } + + static const struct pinctrl_ops rockchip_pctrl_ops = { +diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c +index 017fc6b3e27e..ca9da61cfc4e 100644 +--- a/drivers/pinctrl/pinctrl-rza1.c ++++ b/drivers/pinctrl/pinctrl-rza1.c +@@ -418,7 +418,7 @@ static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = { + }; + + static const struct rza1_swio_entry rza1l_swio_entries[] = { +- [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins }, ++ [0] = { ARRAY_SIZE(rza1l_swio_pins), rza1l_swio_pins }, + }; + + /* RZ/A1L (r7s72102x) pinmux flags table */ +diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig +index c84a7b1caeb6..d6fdc10c29f0 100644 +--- a/drivers/power/supply/Kconfig ++++ b/drivers/power/supply/Kconfig +@@ -577,7 +577,7 @@ config CHARGER_BQ24257 + tristate "TI BQ24250/24251/24257 battery charger driver" + depends on I2C + depends on GPIOLIB || COMPILE_TEST +- depends on REGMAP_I2C ++ select REGMAP_I2C + help + Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery + chargers. +diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c +index 84a206f42a8e..e7931ffb7151 100644 +--- a/drivers/power/supply/lp8788-charger.c ++++ b/drivers/power/supply/lp8788-charger.c +@@ -572,27 +572,14 @@ static void lp8788_setup_adc_channel(struct device *dev, + return; + + /* ADC channel for battery voltage */ +- chan = iio_channel_get(dev, pdata->adc_vbatt); ++ chan = devm_iio_channel_get(dev, pdata->adc_vbatt); + pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan; + + /* ADC channel for battery temperature */ +- chan = iio_channel_get(dev, pdata->adc_batt_temp); ++ chan = devm_iio_channel_get(dev, pdata->adc_batt_temp); + pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan; + } + +-static void lp8788_release_adc_channel(struct lp8788_charger *pchg) +-{ +- int i; +- +- for (i = 0; i < LP8788_NUM_CHG_ADC; i++) { +- if (!pchg->chan[i]) +- continue; +- +- iio_channel_release(pchg->chan[i]); +- pchg->chan[i] = NULL; +- } +-} +- + static ssize_t lp8788_show_charger_status(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -735,7 +722,6 @@ static int lp8788_charger_remove(struct platform_device *pdev) + flush_work(&pchg->charger_work); + lp8788_irq_unregister(pdev, pchg); + lp8788_psy_unregister(pchg); +- lp8788_release_adc_channel(pchg); + + return 0; + } +diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c +index c1d124b8be0c..d102921b3ab2 100644 +--- a/drivers/power/supply/smb347-charger.c ++++ b/drivers/power/supply/smb347-charger.c +@@ -1138,6 +1138,7 @@ static bool smb347_volatile_reg(struct device *dev, unsigned int reg) + switch (reg) { + case IRQSTAT_A: + case IRQSTAT_C: ++ case IRQSTAT_D: + case IRQSTAT_E: + case IRQSTAT_F: + case STAT_A: +diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c +index c9e57bd109fb..599a0f66a384 100644 +--- a/drivers/pwm/pwm-img.c ++++ b/drivers/pwm/pwm-img.c +@@ -129,8 +129,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + duty = DIV_ROUND_UP(timebase * duty_ns, period_ns); + + ret = pm_runtime_get_sync(chip->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_autosuspend(chip->dev); + return ret; ++ } + + val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); + val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm)); +@@ -331,8 +333,10 @@ static int img_pwm_remove(struct platform_device *pdev) + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put(&pdev->dev); + return ret; ++ } + + for (i = 0; i < pwm_chip->chip.npwm; i++) { + val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); +diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c +index 9d78cc21cb12..d0f5c69930d0 100644 +--- a/drivers/pwm/pwm-jz4740.c ++++ b/drivers/pwm/pwm-jz4740.c +@@ -108,8 +108,8 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + if (prescaler == 6) + return -EINVAL; + +- tmp = (unsigned long long)period * state->duty_cycle; +- do_div(tmp, state->period); ++ tmp = (unsigned long long)rate * state->duty_cycle; ++ do_div(tmp, NSEC_PER_SEC); + duty = period - tmp; + + if (duty >= period) +diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c +index 6ba065d5c4d9..d84e9f306086 100644 +--- a/drivers/remoteproc/qcom_q6v5_mss.c ++++ b/drivers/remoteproc/qcom_q6v5_mss.c +@@ -1005,7 +1005,13 @@ static int q6v5_mpss_load(struct q6v5 *qproc) + goto release_firmware; + } + +- ptr = qproc->mpss_region + offset; ++ ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz); ++ if (!ptr) { ++ dev_err(qproc->dev, ++ "unable to map memory region: %pa+%zx-%x\n", ++ &qproc->mpss_phys, offset, phdr->p_memsz); ++ goto release_firmware; ++ } + + if (phdr->p_filesz && phdr->p_offset < fw->size) { + /* Firmware is large enough to be non-split */ +@@ -1014,6 +1020,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc) + "failed to load segment %d from truncated file %s\n", + i, fw_name); + ret = -EINVAL; ++ iounmap(ptr); + goto release_firmware; + } + +@@ -1024,6 +1031,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc) + ret = request_firmware(&seg_fw, fw_name, qproc->dev); + if (ret) { + dev_err(qproc->dev, "failed to load %s\n", fw_name); ++ iounmap(ptr); + goto release_firmware; + } + +@@ -1036,6 +1044,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc) + memset(ptr + phdr->p_filesz, 0, + phdr->p_memsz - phdr->p_filesz); + } ++ iounmap(ptr); + size += phdr->p_memsz; + } + +@@ -1075,7 +1084,8 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, + int ret = 0; + struct q6v5 *qproc = rproc->priv; + unsigned long mask = BIT((unsigned long)segment->priv); +- void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); ++ int offset = segment->da - qproc->mpss_reloc; ++ void *ptr = NULL; + + /* Unlock mba before copying segments */ + if (!qproc->dump_mba_loaded) { +@@ -1089,10 +1099,15 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, + } + } + +- if (!ptr || ret) +- memset(dest, 0xff, segment->size); +- else ++ if (!ret) ++ ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size); ++ ++ if (ptr) { + memcpy(dest, ptr, segment->size); ++ iounmap(ptr); ++ } else { ++ memset(dest, 0xff, segment->size); ++ } + + qproc->dump_segment_mask |= mask; + +@@ -1393,12 +1408,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) + + qproc->mpss_phys = qproc->mpss_reloc = r.start; + qproc->mpss_size = resource_size(&r); +- qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); +- if (!qproc->mpss_region) { +- dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", +- &r.start, qproc->mpss_size); +- return -EBUSY; +- } + + return 0; + } +diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c +index 0896b3614eb1..ce92ae227aa1 100644 +--- a/drivers/remoteproc/remoteproc_core.c ++++ b/drivers/remoteproc/remoteproc_core.c +@@ -2036,6 +2036,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, + rproc->dev.type = &rproc_type; + rproc->dev.class = &rproc_class; + rproc->dev.driver_data = rproc; ++ idr_init(&rproc->notifyids); + + /* Assign a unique device index and name */ + rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); +@@ -2060,8 +2061,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, + + mutex_init(&rproc->lock); + +- idr_init(&rproc->notifyids); +- + INIT_LIST_HEAD(&rproc->carveouts); + INIT_LIST_HEAD(&rproc->mappings); + INIT_LIST_HEAD(&rproc->traces); +diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c +index afce2c0b4bd6..d6802e6191cb 100644 +--- a/drivers/rtc/rtc-mc13xxx.c ++++ b/drivers/rtc/rtc-mc13xxx.c +@@ -308,8 +308,10 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev) + mc13xxx_unlock(mc13xxx); + + ret = rtc_register_device(priv->rtc); +- if (ret) ++ if (ret) { ++ mc13xxx_lock(mc13xxx); + goto err_irq_request; ++ } + + return 0; + +diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c +index 2b316661a578..bbdfebd70644 100644 +--- a/drivers/rtc/rtc-rv3028.c ++++ b/drivers/rtc/rtc-rv3028.c +@@ -625,6 +625,8 @@ static int rv3028_probe(struct i2c_client *client) + return -ENOMEM; + + rv3028->regmap = devm_regmap_init_i2c(client, ®map_config); ++ if (IS_ERR(rv3028->regmap)) ++ return PTR_ERR(rv3028->regmap); + + i2c_set_clientdata(client, rv3028); + +diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h +index a58b45df95d7..3b0a4483a252 100644 +--- a/drivers/s390/cio/qdio.h ++++ b/drivers/s390/cio/qdio.h +@@ -372,7 +372,6 @@ static inline int multicast_outbound(struct qdio_q *q) + extern u64 last_ai_time; + + /* prototypes for thin interrupt */ +-void qdio_setup_thinint(struct qdio_irq *irq_ptr); + int qdio_establish_thinint(struct qdio_irq *irq_ptr); + void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); + void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); +diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c +index ee0b3c586211..9dc56aa3ae55 100644 +--- a/drivers/s390/cio/qdio_setup.c ++++ b/drivers/s390/cio/qdio_setup.c +@@ -479,7 +479,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) + setup_queues(irq_ptr, init_data); + + setup_qib(irq_ptr, init_data); +- qdio_setup_thinint(irq_ptr); + set_impl_params(irq_ptr, init_data->qib_param_field_format, + init_data->qib_param_field, + init_data->input_slib_elements, +diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c +index 93ee067c10ca..ddf780b12d40 100644 +--- a/drivers/s390/cio/qdio_thinint.c ++++ b/drivers/s390/cio/qdio_thinint.c +@@ -268,17 +268,19 @@ int __init tiqdio_register_thinints(void) + + int qdio_establish_thinint(struct qdio_irq *irq_ptr) + { ++ int rc; ++ + if (!is_thinint_irq(irq_ptr)) + return 0; +- return set_subchannel_ind(irq_ptr, 0); +-} + +-void qdio_setup_thinint(struct qdio_irq *irq_ptr) +-{ +- if (!is_thinint_irq(irq_ptr)) +- return; + irq_ptr->dsci = get_indicator(); + DBF_HEX(&irq_ptr->dsci, sizeof(void *)); ++ ++ rc = set_subchannel_ind(irq_ptr, 0); ++ if (rc) ++ put_indicator(irq_ptr->dsci); ++ ++ return rc; + } + + void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) +diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c +index d12dd89538df..deab66598910 100644 +--- a/drivers/scsi/arm/acornscsi.c ++++ b/drivers/scsi/arm/acornscsi.c +@@ -2911,8 +2911,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) + + ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); + ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); +- if (!ashost->base || !ashost->fast) ++ if (!ashost->base || !ashost->fast) { ++ ret = -ENOMEM; + goto out_put; ++ } + + host->irq = ec->irq; + ashost->host = host; +diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +index 524cdbcd29aa..ec7d01f6e2d5 100644 +--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c ++++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +@@ -959,6 +959,7 @@ static int init_act_open(struct cxgbi_sock *csk) + struct net_device *ndev = cdev->ports[csk->port_id]; + struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; + struct sk_buff *skb = NULL; ++ int ret; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); +@@ -979,16 +980,16 @@ static int init_act_open(struct cxgbi_sock *csk) + csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); + if (csk->atid < 0) { + pr_err("NO atid available.\n"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto put_sock; + } + cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); + cxgbi_sock_get(csk); + + skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); + if (!skb) { +- cxgb3_free_atid(t3dev, csk->atid); +- cxgbi_sock_put(csk); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto free_atid; + } + skb->sk = (struct sock *)csk; + set_arp_failure_handler(skb, act_open_arp_failure); +@@ -1010,6 +1011,15 @@ static int init_act_open(struct cxgbi_sock *csk) + cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); + send_act_open_req(csk, skb, csk->l2t); + return 0; ++ ++free_atid: ++ cxgb3_free_atid(t3dev, csk->atid); ++put_sock: ++ cxgbi_sock_put(csk); ++ l2t_release(t3dev, csk->l2t); ++ csk->l2t = NULL; ++ ++ return ret; + } + + cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { +diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c +index 6f4692f0d714..031aa4043c5e 100644 +--- a/drivers/scsi/hisi_sas/hisi_sas_main.c ++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c +@@ -904,8 +904,11 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct device *dev = hisi_hba->dev; + ++ dev_dbg(dev, "phy%d OOB ready\n", phy_no); ++ if (phy->phy_attached) ++ return; ++ + if (!timer_pending(&phy->timer)) { +- dev_dbg(dev, "phy%d OOB ready\n", phy_no); + phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ; + add_timer(&phy->timer); + } +diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c +index 59f0f1030c54..c5711c659b51 100644 +--- a/drivers/scsi/ibmvscsi/ibmvscsi.c ++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c +@@ -415,6 +415,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, + int rc = 0; + struct vio_dev *vdev = to_vio_dev(hostdata->dev); + ++ set_adapter_info(hostdata); ++ + /* Re-enable the CRQ */ + do { + if (rc) +diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c +index e4857b728033..a64abe38db2d 100644 +--- a/drivers/scsi/iscsi_boot_sysfs.c ++++ b/drivers/scsi/iscsi_boot_sysfs.c +@@ -352,7 +352,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, + boot_kobj->kobj.kset = boot_kset->kset; + if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype, + NULL, name, index)) { +- kfree(boot_kobj); ++ kobject_put(&boot_kobj->kobj); + return NULL; + } + boot_kobj->data = data; +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index 66f8867dd837..94d8f2834100 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -8394,6 +8394,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + spin_lock_irq(shost->host_lock); + if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { + spin_unlock_irq(shost->host_lock); ++ if (newnode) ++ lpfc_nlp_put(ndlp); + goto dropit; + } + spin_unlock_irq(shost->host_lock); +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 752b71cfbe12..7fd1d731555f 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -4777,7 +4777,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) + } + + kfree(ioc->hpr_lookup); ++ ioc->hpr_lookup = NULL; + kfree(ioc->internal_lookup); ++ ioc->internal_lookup = NULL; + if (ioc->chain_lookup) { + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; +diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h +index f3f399fe10c8..0da4e16fb23a 100644 +--- a/drivers/scsi/qedf/qedf.h ++++ b/drivers/scsi/qedf/qedf.h +@@ -355,6 +355,7 @@ struct qedf_ctx { + #define QEDF_GRCDUMP_CAPTURE 4 + #define QEDF_IN_RECOVERY 5 + #define QEDF_DBG_STOP_IO 6 ++#define QEDF_PROBING 8 + unsigned long flags; /* Miscellaneous state flags */ + int fipvlan_retries; + u8 num_queues; +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c +index 59ca98f12afd..3d0e345947c1 100644 +--- a/drivers/scsi/qedf/qedf_main.c ++++ b/drivers/scsi/qedf/qedf_main.c +@@ -3153,7 +3153,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) + { + int rc = -EINVAL; + struct fc_lport *lport; +- struct qedf_ctx *qedf; ++ struct qedf_ctx *qedf = NULL; + struct Scsi_Host *host; + bool is_vf = false; + struct qed_ll2_params params; +@@ -3183,6 +3183,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) + + /* Initialize qedf_ctx */ + qedf = lport_priv(lport); ++ set_bit(QEDF_PROBING, &qedf->flags); + qedf->lport = lport; + qedf->ctlr.lp = lport; + qedf->pdev = pdev; +@@ -3206,9 +3207,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) + } else { + /* Init pointers during recovery */ + qedf = pci_get_drvdata(pdev); ++ set_bit(QEDF_PROBING, &qedf->flags); + lport = qedf->lport; + } + ++ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n"); ++ + host = lport->host; + + /* Allocate mempool for qedf_io_work structs */ +@@ -3513,6 +3517,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) + else + fc_fabric_login(lport); + ++ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); ++ ++ clear_bit(QEDF_PROBING, &qedf->flags); ++ + /* All good */ + return 0; + +@@ -3538,6 +3546,11 @@ err2: + err1: + scsi_host_put(lport->host); + err0: ++ if (qedf) { ++ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); ++ ++ clear_bit(QEDF_PROBING, &qedf->flags); ++ } + return rc; + } + +@@ -3687,11 +3700,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data) + { + struct qedf_ctx *qedf = dev; + struct qed_mfw_tlv_fcoe *fcoe = data; +- struct fc_lport *lport = qedf->lport; +- struct Scsi_Host *host = lport->host; +- struct fc_host_attrs *fc_host = shost_to_fc_host(host); ++ struct fc_lport *lport; ++ struct Scsi_Host *host; ++ struct fc_host_attrs *fc_host; + struct fc_host_statistics *hst; + ++ if (!qedf) { ++ QEDF_ERR(NULL, "qedf is null.\n"); ++ return; ++ } ++ ++ if (test_bit(QEDF_PROBING, &qedf->flags)) { ++ QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n"); ++ return; ++ } ++ ++ lport = qedf->lport; ++ host = lport->host; ++ fc_host = shost_to_fc_host(host); ++ + /* Force a refresh of the fc_host stats including offload stats */ + hst = qedf_fc_get_host_stats(host); + +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c +index 8829880a54c3..0f57c8073406 100644 +--- a/drivers/scsi/qedi/qedi_iscsi.c ++++ b/drivers/scsi/qedi/qedi_iscsi.c +@@ -997,7 +997,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) + if (qedi_ep->state == EP_STATE_OFLDCONN_START) + goto ep_exit_recover; + +- flush_work(&qedi_ep->offload_work); ++ if (qedi_ep->state != EP_STATE_OFLDCONN_NONE) ++ flush_work(&qedi_ep->offload_work); + + if (qedi_ep->conn) { + qedi_conn = qedi_ep->conn; +@@ -1214,6 +1215,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) + } + + iscsi_cid = (u32)path_data->handle; ++ if (iscsi_cid >= qedi->max_active_conns) { ++ ret = -EINVAL; ++ goto set_path_exit; ++ } + qedi_ep = qedi->ep_tbl[iscsi_cid]; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c +index 03d272a09e26..d7ec4083a091 100644 +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -6295,6 +6295,7 @@ qla2x00_do_dpc(void *data) + + if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, + &base_vha->dpc_flags))) { ++ base_vha->flags.online = 1; + ql_dbg(ql_dbg_dpc, base_vha, 0x4007, + "ISP abort scheduled.\n"); + if (ha->isp_ops->abort_isp(base_vha)) { +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +index abe7f79bb789..744cd93189da 100644 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -926,6 +926,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); ++ qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return count; +@@ -1088,6 +1089,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); ++ qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return count; +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index 91c007d26c1e..206c9f53e9e7 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -551,7 +551,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) + } + } + +-static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) ++static void scsi_free_sgtables(struct scsi_cmnd *cmd) + { + if (cmd->sdb.table.nents) + sg_free_table_chained(&cmd->sdb.table, +@@ -563,7 +563,7 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) + + static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) + { +- scsi_mq_free_sgtables(cmd); ++ scsi_free_sgtables(cmd); + scsi_uninit_cmd(cmd); + scsi_del_cmd_from_list(cmd); + } +@@ -1063,7 +1063,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) + + return BLK_STS_OK; + out_free_sgtables: +- scsi_mq_free_sgtables(cmd); ++ scsi_free_sgtables(cmd); + return ret; + } + EXPORT_SYMBOL(scsi_init_io); +@@ -1214,6 +1214,7 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, + struct request *req) + { + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); ++ blk_status_t ret; + + if (!blk_rq_bytes(req)) + cmd->sc_data_direction = DMA_NONE; +@@ -1223,9 +1224,14 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, + cmd->sc_data_direction = DMA_FROM_DEVICE; + + if (blk_rq_is_scsi(req)) +- return scsi_setup_scsi_cmnd(sdev, req); ++ ret = scsi_setup_scsi_cmnd(sdev, req); + else +- return scsi_setup_fs_cmnd(sdev, req); ++ ret = scsi_setup_fs_cmnd(sdev, req); ++ ++ if (ret != BLK_STS_OK) ++ scsi_free_sgtables(cmd); ++ ++ return ret; + } + + static blk_status_t +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c +index 4664fdf75c0f..70a28f6fb1d0 100644 +--- a/drivers/scsi/sr.c ++++ b/drivers/scsi/sr.c +@@ -750,7 +750,7 @@ static int sr_probe(struct device *dev) + cd->cdi.disk = disk; + + if (register_cdrom(&cd->cdi)) +- goto fail_put; ++ goto fail_minor; + + /* + * Initialize block layer runtime PM stuffs before the +@@ -768,6 +768,10 @@ static int sr_probe(struct device *dev) + + return 0; + ++fail_minor: ++ spin_lock(&sr_index_lock); ++ clear_bit(minor, sr_index_bits); ++ spin_unlock(&sr_index_lock); + fail_put: + put_disk(disk); + fail_free: +diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c +index 411ef60b2c14..c49e9f6c46f8 100644 +--- a/drivers/scsi/ufs/ufs-qcom.c ++++ b/drivers/scsi/ufs/ufs-qcom.c +@@ -1546,11 +1546,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) + + /* sleep a bit intermittently as we are dumping too much data */ + ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); +- usleep_range(1000, 1100); ++ udelay(1000); + ufs_qcom_testbus_read(hba); +- usleep_range(1000, 1100); ++ udelay(1000); + ufs_qcom_print_unipro_testbus(hba); +- usleep_range(1000, 1100); ++ udelay(1000); + } + + /** +diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c +index d2197a31abe5..bad366e49159 100644 +--- a/drivers/scsi/ufs/ufs_bsg.c ++++ b/drivers/scsi/ufs/ufs_bsg.c +@@ -106,8 +106,10 @@ static int ufs_bsg_request(struct bsg_job *job) + desc_op = bsg_request->upiu_req.qr.opcode; + ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, + &desc_len, desc_op); +- if (ret) ++ if (ret) { ++ pm_runtime_put_sync(hba->dev); + goto out; ++ } + + /* fall through */ + case UPIU_TRANSACTION_NOP_OUT: +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index bc73181b0405..2b6853c7375c 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -5101,7 +5101,6 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, + err = ufshcd_enable_auto_bkops(hba); + else + err = ufshcd_disable_auto_bkops(hba); +- hba->urgent_bkops_lvl = curr_status; + out: + return err; + } +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c +index 29fbab55c3b3..01a17d84b606 100644 +--- a/drivers/slimbus/qcom-ngd-ctrl.c ++++ b/drivers/slimbus/qcom-ngd-ctrl.c +@@ -1354,7 +1354,6 @@ static int of_qcom_slim_ngd_register(struct device *parent, + ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME; + ngd->pdev->dev.of_node = node; + ctrl->ngd = ngd; +- platform_set_drvdata(ngd->pdev, ctrl); + + platform_device_add(ngd->pdev); + ngd->base = ctrl->base + ngd->id * data->offset + +@@ -1369,12 +1368,13 @@ static int of_qcom_slim_ngd_register(struct device *parent, + + static int qcom_slim_ngd_probe(struct platform_device *pdev) + { +- struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; ++ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev->parent); + int ret; + + ctrl->ctrl.dev = dev; + ++ platform_set_drvdata(pdev, ctrl); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND); + pm_runtime_set_suspended(dev); +diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c +index 6473fa602f82..611f4f5bc36a 100644 +--- a/drivers/soundwire/slave.c ++++ b/drivers/soundwire/slave.c +@@ -57,6 +57,8 @@ static int sdw_slave_add(struct sdw_bus *bus, + list_del(&slave->node); + mutex_unlock(&bus->bus_lock); + put_device(&slave->dev); ++ ++ return ret; + } + sdw_slave_debugfs_init(slave); + +diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c +index 5f0e089573a2..af26bc9f184a 100644 +--- a/drivers/staging/gasket/gasket_sysfs.c ++++ b/drivers/staging/gasket/gasket_sysfs.c +@@ -339,6 +339,7 @@ void gasket_sysfs_put_attr(struct device *device, + + dev_err(device, "Unable to put unknown attribute: %s\n", + attr->attr.attr.name); ++ put_mapping(mapping); + } + EXPORT_SYMBOL(gasket_sysfs_put_attr); + +@@ -372,6 +373,7 @@ ssize_t gasket_sysfs_register_store(struct device *device, + gasket_dev = mapping->gasket_dev; + if (!gasket_dev) { + dev_err(device, "Device driver may have been removed\n"); ++ put_mapping(mapping); + return 0; + } + +diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c +index d6ba25f21d80..d2672b65c3f4 100644 +--- a/drivers/staging/greybus/light.c ++++ b/drivers/staging/greybus/light.c +@@ -1026,7 +1026,8 @@ static int gb_lights_light_config(struct gb_lights *glights, u8 id) + + light->channels_count = conf.channel_count; + light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL); +- ++ if (!light->name) ++ return -ENOMEM; + light->channels = kcalloc(light->channels_count, + sizeof(struct gb_channel), GFP_KERNEL); + if (!light->channels) +diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c +index 59568d18ce23..5b72aa81d94c 100644 +--- a/drivers/staging/sm750fb/sm750.c ++++ b/drivers/staging/sm750fb/sm750.c +@@ -898,6 +898,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) + fix->visual = FB_VISUAL_PSEUDOCOLOR; + break; + case 16: ++ case 24: + case 32: + fix->visual = FB_VISUAL_TRUECOLOR; + break; +diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c +index 77d0732f451b..221e3d93db14 100644 +--- a/drivers/staging/wilc1000/wilc_hif.c ++++ b/drivers/staging/wilc1000/wilc_hif.c +@@ -12,6 +12,8 @@ + #define WILC_FALSE_FRMWR_CHANNEL 100 + #define WILC_MAX_RATES_SUPPORTED 12 + ++#define WILC_SCAN_WID_LIST_SIZE 6 ++ + struct wilc_rcvd_mac_info { + u8 status; + }; +@@ -233,7 +235,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type, + void *user_arg, struct cfg80211_scan_request *request) + { + int result = 0; +- struct wid wid_list[5]; ++ struct wid wid_list[WILC_SCAN_WID_LIST_SIZE]; + u32 index = 0; + u32 i, scan_timeout; + u8 *buffer; +diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c +index 3305b47fdf53..16d5a4e117a2 100644 +--- a/drivers/target/loopback/tcm_loop.c ++++ b/drivers/target/loopback/tcm_loop.c +@@ -545,32 +545,15 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd) + return 0; + } + +-static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) ++static int tcm_loop_queue_data_or_status(const char *func, ++ struct se_cmd *se_cmd, u8 scsi_status) + { + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + struct scsi_cmnd *sc = tl_cmd->sc; + + pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", +- __func__, sc, sc->cmnd[0]); +- +- sc->result = SAM_STAT_GOOD; +- set_host_byte(sc, DID_OK); +- if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || +- (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) +- scsi_set_resid(sc, se_cmd->residual_count); +- sc->scsi_done(sc); +- return 0; +-} +- +-static int tcm_loop_queue_status(struct se_cmd *se_cmd) +-{ +- struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, +- struct tcm_loop_cmd, tl_se_cmd); +- struct scsi_cmnd *sc = tl_cmd->sc; +- +- pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", +- __func__, sc, sc->cmnd[0]); ++ func, sc, sc->cmnd[0]); + + if (se_cmd->sense_buffer && + ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || +@@ -581,7 +564,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) + sc->result = SAM_STAT_CHECK_CONDITION; + set_driver_byte(sc, DRIVER_SENSE); + } else +- sc->result = se_cmd->scsi_status; ++ sc->result = scsi_status; + + set_host_byte(sc, DID_OK); + if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || +@@ -591,6 +574,17 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) + return 0; + } + ++static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) ++{ ++ return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD); ++} ++ ++static int tcm_loop_queue_status(struct se_cmd *se_cmd) ++{ ++ return tcm_loop_queue_data_or_status(__func__, ++ se_cmd, se_cmd->scsi_status); ++} ++ + static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) + { + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c +index 9425354aef99..a497e7c1f4fc 100644 +--- a/drivers/target/target_core_user.c ++++ b/drivers/target/target_core_user.c +@@ -882,41 +882,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, + return command_size; + } + +-static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, +- struct timer_list *timer) ++static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, ++ struct timer_list *timer) + { +- struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; +- int cmd_id; +- +- if (tcmu_cmd->cmd_id) +- goto setup_timer; +- +- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); +- if (cmd_id < 0) { +- pr_err("tcmu: Could not allocate cmd id.\n"); +- return cmd_id; +- } +- tcmu_cmd->cmd_id = cmd_id; +- +- pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, +- udev->name, tmo / MSEC_PER_SEC); +- +-setup_timer: + if (!tmo) +- return 0; ++ return; + + tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); + if (!timer_pending(timer)) + mod_timer(timer, tcmu_cmd->deadline); + +- return 0; ++ pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, ++ tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); + } + + static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) + { + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + unsigned int tmo; +- int ret; + + /* + * For backwards compat if qfull_time_out is not set use +@@ -931,13 +914,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) + else + tmo = TCMU_TIME_OUT; + +- ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); +- if (ret) +- return ret; ++ tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); + + list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); +- pr_debug("adding cmd %u on dev %s to ring space wait queue\n", +- tcmu_cmd->cmd_id, udev->name); ++ pr_debug("adding cmd %p on dev %s to ring space wait queue\n", ++ tcmu_cmd, udev->name); + return 0; + } + +@@ -959,7 +940,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) + struct tcmu_mailbox *mb; + struct tcmu_cmd_entry *entry; + struct iovec *iov; +- int iov_cnt, ret; ++ int iov_cnt, cmd_id; + uint32_t cmd_head; + uint64_t cdb_off; + bool copy_to_data_area; +@@ -1060,14 +1041,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) + } + entry->req.iov_bidi_cnt = iov_cnt; + +- ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, +- &udev->cmd_timer); +- if (ret) { +- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); ++ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); ++ if (cmd_id < 0) { ++ pr_err("tcmu: Could not allocate cmd id.\n"); + ++ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); + *scsi_err = TCM_OUT_OF_RESOURCES; + return -1; + } ++ tcmu_cmd->cmd_id = cmd_id; ++ ++ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, ++ tcmu_cmd, udev->name); ++ ++ tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); ++ + entry->hdr.cmd_id = tcmu_cmd->cmd_id; + + /* +@@ -1279,50 +1267,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) + return handled; + } + +-static int tcmu_check_expired_cmd(int id, void *p, void *data) ++static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) + { +- struct tcmu_cmd *cmd = p; +- struct tcmu_dev *udev = cmd->tcmu_dev; +- u8 scsi_status; + struct se_cmd *se_cmd; +- bool is_running; +- +- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) +- return 0; + + if (!time_after(jiffies, cmd->deadline)) +- return 0; ++ return; + +- is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); ++ set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); ++ list_del_init(&cmd->queue_entry); + se_cmd = cmd->se_cmd; ++ cmd->se_cmd = NULL; + +- if (is_running) { +- /* +- * If cmd_time_out is disabled but qfull is set deadline +- * will only reflect the qfull timeout. Ignore it. +- */ +- if (!udev->cmd_time_out) +- return 0; ++ pr_debug("Timing out inflight cmd %u on dev %s.\n", ++ cmd->cmd_id, cmd->tcmu_dev->name); + +- set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); +- /* +- * target_complete_cmd will translate this to LUN COMM FAILURE +- */ +- scsi_status = SAM_STAT_CHECK_CONDITION; +- list_del_init(&cmd->queue_entry); +- cmd->se_cmd = NULL; +- } else { +- list_del_init(&cmd->queue_entry); +- idr_remove(&udev->commands, id); +- tcmu_free_cmd(cmd); +- scsi_status = SAM_STAT_TASK_SET_FULL; +- } ++ target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); ++} + +- pr_debug("Timing out cmd %u on dev %s that is %s.\n", +- id, udev->name, is_running ? "inflight" : "queued"); ++static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) ++{ ++ struct se_cmd *se_cmd; + +- target_complete_cmd(se_cmd, scsi_status); +- return 0; ++ if (!time_after(jiffies, cmd->deadline)) ++ return; ++ ++ pr_debug("Timing out queued cmd %p on dev %s.\n", ++ cmd, cmd->tcmu_dev->name); ++ ++ list_del_init(&cmd->queue_entry); ++ se_cmd = cmd->se_cmd; ++ tcmu_free_cmd(cmd); ++ ++ target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); + } + + static void tcmu_device_timedout(struct tcmu_dev *udev) +@@ -1407,16 +1384,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) + return &udev->se_dev; + } + +-static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) ++static void run_qfull_queue(struct tcmu_dev *udev, bool fail) + { + struct tcmu_cmd *tcmu_cmd, *tmp_cmd; + LIST_HEAD(cmds); +- bool drained = true; + sense_reason_t scsi_ret; + int ret; + + if (list_empty(&udev->qfull_queue)) +- return true; ++ return; + + pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); + +@@ -1425,11 +1401,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { + list_del_init(&tcmu_cmd->queue_entry); + +- pr_debug("removing cmd %u on dev %s from queue\n", +- tcmu_cmd->cmd_id, udev->name); ++ pr_debug("removing cmd %p on dev %s from queue\n", ++ tcmu_cmd, udev->name); + + if (fail) { +- idr_remove(&udev->commands, tcmu_cmd->cmd_id); + /* + * We were not able to even start the command, so + * fail with busy to allow a retry in case runner +@@ -1444,10 +1419,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) + + ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); + if (ret < 0) { +- pr_debug("cmd %u on dev %s failed with %u\n", +- tcmu_cmd->cmd_id, udev->name, scsi_ret); +- +- idr_remove(&udev->commands, tcmu_cmd->cmd_id); ++ pr_debug("cmd %p on dev %s failed with %u\n", ++ tcmu_cmd, udev->name, scsi_ret); + /* + * Ignore scsi_ret for now. target_complete_cmd + * drops it. +@@ -1462,13 +1435,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) + * the queue + */ + list_splice_tail(&cmds, &udev->qfull_queue); +- drained = false; + break; + } + } + + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); +- return drained; + } + + static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) +@@ -1652,6 +1623,8 @@ static void tcmu_dev_kref_release(struct kref *kref) + if (tcmu_check_and_free_pending_cmd(cmd) != 0) + all_expired = false; + } ++ if (!list_empty(&udev->qfull_queue)) ++ all_expired = false; + idr_destroy(&udev->commands); + WARN_ON(!all_expired); + +@@ -2037,9 +2010,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) + mutex_lock(&udev->cmdr_lock); + + idr_for_each_entry(&udev->commands, cmd, i) { +- if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) +- continue; +- + pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", + cmd->cmd_id, udev->name, + test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); +@@ -2077,6 +2047,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) + + del_timer(&udev->cmd_timer); + ++ run_qfull_queue(udev, false); ++ + mutex_unlock(&udev->cmdr_lock); + } + +@@ -2698,6 +2670,7 @@ static void find_free_blocks(void) + static void check_timedout_devices(void) + { + struct tcmu_dev *udev, *tmp_dev; ++ struct tcmu_cmd *cmd, *tmp_cmd; + LIST_HEAD(devs); + + spin_lock_bh(&timed_out_udevs_lock); +@@ -2708,9 +2681,24 @@ static void check_timedout_devices(void) + spin_unlock_bh(&timed_out_udevs_lock); + + mutex_lock(&udev->cmdr_lock); +- idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); + +- tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); ++ /* ++ * If cmd_time_out is disabled but qfull is set deadline ++ * will only reflect the qfull timeout. Ignore it. ++ */ ++ if (udev->cmd_time_out) { ++ list_for_each_entry_safe(cmd, tmp_cmd, ++ &udev->inflight_queue, ++ queue_entry) { ++ tcmu_check_expired_ring_cmd(cmd); ++ } ++ tcmu_set_next_deadline(&udev->inflight_queue, ++ &udev->cmd_timer); ++ } ++ list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, ++ queue_entry) { ++ tcmu_check_expired_queue_cmd(cmd); ++ } + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); + + mutex_unlock(&udev->cmdr_lock); +diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +index d3e959d01606..85776db4bf34 100644 +--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c ++++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +@@ -169,7 +169,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, + + data = ti_bandgap_get_sensor_data(bgp, id); + +- if (!data || IS_ERR(data)) ++ if (!IS_ERR_OR_NULL(data)) + data = ti_thermal_build_data(bgp, id); + + if (!data) +@@ -196,7 +196,7 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id) + + data = ti_bandgap_get_sensor_data(bgp, id); + +- if (data && data->ti_thermal) { ++ if (!IS_ERR_OR_NULL(data) && data->ti_thermal) { + if (data->our_zone) + thermal_zone_device_unregister(data->ti_thermal); + } +@@ -262,7 +262,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) + + data = ti_bandgap_get_sensor_data(bgp, id); + +- if (data) { ++ if (!IS_ERR_OR_NULL(data)) { + cpufreq_cooling_unregister(data->cool_dev); + if (data->policy) + cpufreq_cpu_put(data->policy); +diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c +index cdcc64ea2554..f8e43a6faea9 100644 +--- a/drivers/tty/hvc/hvc_console.c ++++ b/drivers/tty/hvc/hvc_console.c +@@ -75,6 +75,8 @@ static LIST_HEAD(hvc_structs); + */ + static DEFINE_MUTEX(hvc_structs_mutex); + ++/* Mutex to serialize hvc_open */ ++static DEFINE_MUTEX(hvc_open_mutex); + /* + * This value is used to assign a tty->index value to a hvc_struct based + * upon order of exposure via hvc_probe(), when we can not match it to +@@ -346,16 +348,24 @@ static int hvc_install(struct tty_driver *driver, struct tty_struct *tty) + */ + static int hvc_open(struct tty_struct *tty, struct file * filp) + { +- struct hvc_struct *hp = tty->driver_data; ++ struct hvc_struct *hp; + unsigned long flags; + int rc = 0; + ++ mutex_lock(&hvc_open_mutex); ++ ++ hp = tty->driver_data; ++ if (!hp) { ++ rc = -EIO; ++ goto out; ++ } ++ + spin_lock_irqsave(&hp->port.lock, flags); + /* Check and then increment for fast path open. */ + if (hp->port.count++ > 0) { + spin_unlock_irqrestore(&hp->port.lock, flags); + hvc_kick(); +- return 0; ++ goto out; + } /* else count == 0 */ + spin_unlock_irqrestore(&hp->port.lock, flags); + +@@ -383,6 +393,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + /* Force wakeup of the polling thread */ + hvc_kick(); + ++out: ++ mutex_unlock(&hvc_open_mutex); + return rc; + } + +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c +index 36a3eb4ad4c5..75408b9f232d 100644 +--- a/drivers/tty/n_gsm.c ++++ b/drivers/tty/n_gsm.c +@@ -665,11 +665,10 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, + * FIXME: lock against link layer control transmissions + */ + +-static void gsm_data_kick(struct gsm_mux *gsm) ++static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci) + { + struct gsm_msg *msg, *nmsg; + int len; +- int skip_sof = 0; + + list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) { + if (gsm->constipated && msg->addr) +@@ -691,18 +690,23 @@ static void gsm_data_kick(struct gsm_mux *gsm) + print_hex_dump_bytes("gsm_data_kick: ", + DUMP_PREFIX_OFFSET, + gsm->txframe, len); +- +- if (gsm->output(gsm, gsm->txframe + skip_sof, +- len - skip_sof) < 0) ++ if (gsm->output(gsm, gsm->txframe, len) < 0) + break; + /* FIXME: Can eliminate one SOF in many more cases */ + gsm->tx_bytes -= msg->len; +- /* For a burst of frames skip the extra SOF within the +- burst */ +- skip_sof = 1; + + list_del(&msg->list); + kfree(msg); ++ ++ if (dlci) { ++ tty_port_tty_wakeup(&dlci->port); ++ } else { ++ int i = 0; ++ ++ for (i = 0; i < NUM_DLCI; i++) ++ if (gsm->dlci[i]) ++ tty_port_tty_wakeup(&gsm->dlci[i]->port); ++ } + } + } + +@@ -754,7 +758,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) + /* Add to the actual output queue */ + list_add_tail(&msg->list, &gsm->tx_list); + gsm->tx_bytes += msg->len; +- gsm_data_kick(gsm); ++ gsm_data_kick(gsm, dlci); + } + + /** +@@ -1215,7 +1219,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, + gsm_control_reply(gsm, CMD_FCON, NULL, 0); + /* Kick the link in case it is idling */ + spin_lock_irqsave(&gsm->tx_lock, flags); +- gsm_data_kick(gsm); ++ gsm_data_kick(gsm, NULL); + spin_unlock_irqrestore(&gsm->tx_lock, flags); + break; + case CMD_FCOFF: +@@ -2525,7 +2529,7 @@ static void gsmld_write_wakeup(struct tty_struct *tty) + /* Queue poll */ + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + spin_lock_irqsave(&gsm->tx_lock, flags); +- gsm_data_kick(gsm); ++ gsm_data_kick(gsm, NULL); + if (gsm->tx_bytes < TX_THRESH_LO) { + gsm_dlci_data_sweep(gsm); + } +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index 2c65c775bf5a..dbb27303a6b4 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -2539,6 +2539,8 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, + struct ktermios *termios, + struct ktermios *old) + { ++ unsigned int tolerance = port->uartclk / 100; ++ + /* + * Ask the core to calculate the divisor for us. + * Allow 1% tolerance at the upper limit so uart clks marginally +@@ -2547,7 +2549,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, + */ + return uart_get_baud_rate(port, termios, old, + port->uartclk / 16 / UART_DIV_MAX, +- port->uartclk); ++ (port->uartclk + tolerance) / 16); + } + + void +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index b0b689546395..de3e8c24c03e 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -2585,6 +2585,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, + uap->port.fifosize = uap->fifosize; + uap->port.flags = UPF_BOOT_AUTOCONF; + uap->port.line = index; ++ spin_lock_init(&uap->port.lock); + + amba_ports[index] = uap; + +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c +index 0d8e3f3804a3..084c48c5848f 100644 +--- a/drivers/usb/class/usblp.c ++++ b/drivers/usb/class/usblp.c +@@ -468,7 +468,8 @@ static int usblp_release(struct inode *inode, struct file *file) + usb_autopm_put_interface(usblp->intf); + + if (!usblp->present) /* finish cleanup from disconnect */ +- usblp_cleanup(usblp); ++ usblp_cleanup(usblp); /* any URBs must be dead */ ++ + mutex_unlock(&usblp_mutex); + return 0; + } +@@ -1375,9 +1376,11 @@ static void usblp_disconnect(struct usb_interface *intf) + + usblp_unlink_urbs(usblp); + mutex_unlock(&usblp->mut); ++ usb_poison_anchored_urbs(&usblp->urbs); + + if (!usblp->used) + usblp_cleanup(usblp); ++ + mutex_unlock(&usblp_mutex); + } + +diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c +index 6af6add3d4c0..6272b4ae4740 100644 +--- a/drivers/usb/dwc2/core_intr.c ++++ b/drivers/usb/dwc2/core_intr.c +@@ -421,10 +421,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) + if (ret && (ret != -ENOTSUPP)) + dev_err(hsotg->dev, "exit power_down failed\n"); + ++ /* Change to L0 state */ ++ hsotg->lx_state = DWC2_L0; + call_gadget(hsotg, resume); ++ } else { ++ /* Change to L0 state */ ++ hsotg->lx_state = DWC2_L0; + } +- /* Change to L0 state */ +- hsotg->lx_state = DWC2_L0; + } else { + if (hsotg->params.power_down) + return; +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index c30c5b1c478c..17340864a540 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1217,6 +1217,8 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep) + } + } + ++static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep); ++ + static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) + { + struct dwc3_gadget_ep_cmd_params params; +@@ -1256,14 +1258,20 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) + + ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); + if (ret < 0) { +- /* +- * FIXME we need to iterate over the list of requests +- * here and stop, unmap, free and del each of the linked +- * requests instead of what we do now. +- */ +- if (req->trb) +- memset(req->trb, 0, sizeof(struct dwc3_trb)); +- dwc3_gadget_del_and_unmap_request(dep, req, ret); ++ struct dwc3_request *tmp; ++ ++ if (ret == -EAGAIN) ++ return ret; ++ ++ dwc3_stop_active_transfer(dep, true, true); ++ ++ list_for_each_entry_safe(req, tmp, &dep->started_list, list) ++ dwc3_gadget_move_cancelled_request(req); ++ ++ /* If ep isn't started, then there's no end transfer pending */ ++ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) ++ dwc3_gadget_ep_cleanup_cancelled_requests(dep); ++ + return ret; + } + +@@ -1505,6 +1513,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r + { + int i; + ++ /* If req->trb is not set, then the request has not started */ ++ if (!req->trb) ++ return; ++ + /* + * If request was already started, this means we had to + * stop the transfer. With that we also need to ignore +@@ -1595,6 +1607,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) + { + struct dwc3_gadget_ep_cmd_params params; + struct dwc3 *dwc = dep->dwc; ++ struct dwc3_request *req; ++ struct dwc3_request *tmp; + int ret; + + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { +@@ -1631,13 +1645,37 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) + else + dep->flags |= DWC3_EP_STALL; + } else { ++ /* ++ * Don't issue CLEAR_STALL command to control endpoints. The ++ * controller automatically clears the STALL when it receives ++ * the SETUP token. ++ */ ++ if (dep->number <= 1) { ++ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); ++ return 0; ++ } + + ret = dwc3_send_clear_stall_ep_cmd(dep); +- if (ret) ++ if (ret) { + dev_err(dwc->dev, "failed to clear STALL on %s\n", + dep->name); +- else +- dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); ++ return ret; ++ } ++ ++ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); ++ ++ dwc3_stop_active_transfer(dep, true, true); ++ ++ list_for_each_entry_safe(req, tmp, &dep->started_list, list) ++ dwc3_gadget_move_cancelled_request(req); ++ ++ list_for_each_entry_safe(req, tmp, &dep->pending_list, list) ++ dwc3_gadget_move_cancelled_request(req); ++ ++ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) { ++ dep->flags &= ~DWC3_EP_DELAY_START; ++ dwc3_gadget_ep_cleanup_cancelled_requests(dep); ++ } + } + + return ret; +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index d98ca1566e95..f75ff1a75dc4 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -96,40 +96,43 @@ function_descriptors(struct usb_function *f, + } + + /** +- * next_ep_desc() - advance to the next EP descriptor ++ * next_desc() - advance to the next desc_type descriptor + * @t: currect pointer within descriptor array ++ * @desc_type: descriptor type + * +- * Return: next EP descriptor or NULL ++ * Return: next desc_type descriptor or NULL + * +- * Iterate over @t until either EP descriptor found or ++ * Iterate over @t until either desc_type descriptor found or + * NULL (that indicates end of list) encountered + */ + static struct usb_descriptor_header** +-next_ep_desc(struct usb_descriptor_header **t) ++next_desc(struct usb_descriptor_header **t, u8 desc_type) + { + for (; *t; t++) { +- if ((*t)->bDescriptorType == USB_DT_ENDPOINT) ++ if ((*t)->bDescriptorType == desc_type) + return t; + } + return NULL; + } + + /* +- * for_each_ep_desc()- iterate over endpoint descriptors in the +- * descriptors list +- * @start: pointer within descriptor array. +- * @ep_desc: endpoint descriptor to use as the loop cursor ++ * for_each_desc() - iterate over desc_type descriptors in the ++ * descriptors list ++ * @start: pointer within descriptor array. ++ * @iter_desc: desc_type descriptor to use as the loop cursor ++ * @desc_type: wanted descriptr type + */ +-#define for_each_ep_desc(start, ep_desc) \ +- for (ep_desc = next_ep_desc(start); \ +- ep_desc; ep_desc = next_ep_desc(ep_desc+1)) ++#define for_each_desc(start, iter_desc, desc_type) \ ++ for (iter_desc = next_desc(start, desc_type); \ ++ iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type)) + + /** +- * config_ep_by_speed() - configures the given endpoint ++ * config_ep_by_speed_and_alt() - configures the given endpoint + * according to gadget speed. + * @g: pointer to the gadget + * @f: usb function + * @_ep: the endpoint to configure ++ * @alt: alternate setting number + * + * Return: error code, 0 on success + * +@@ -142,11 +145,13 @@ next_ep_desc(struct usb_descriptor_header **t) + * Note: the supplied function should hold all the descriptors + * for supported speeds + */ +-int config_ep_by_speed(struct usb_gadget *g, +- struct usb_function *f, +- struct usb_ep *_ep) ++int config_ep_by_speed_and_alt(struct usb_gadget *g, ++ struct usb_function *f, ++ struct usb_ep *_ep, ++ u8 alt) + { + struct usb_endpoint_descriptor *chosen_desc = NULL; ++ struct usb_interface_descriptor *int_desc = NULL; + struct usb_descriptor_header **speed_desc = NULL; + + struct usb_ss_ep_comp_descriptor *comp_desc = NULL; +@@ -182,8 +187,21 @@ int config_ep_by_speed(struct usb_gadget *g, + default: + speed_desc = f->fs_descriptors; + } ++ ++ /* find correct alternate setting descriptor */ ++ for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) { ++ int_desc = (struct usb_interface_descriptor *)*d_spd; ++ ++ if (int_desc->bAlternateSetting == alt) { ++ speed_desc = d_spd; ++ goto intf_found; ++ } ++ } ++ return -EIO; ++ ++intf_found: + /* find descriptors */ +- for_each_ep_desc(speed_desc, d_spd) { ++ for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) { + chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; + if (chosen_desc->bEndpointAddress == _ep->address) + goto ep_found; +@@ -237,6 +255,32 @@ ep_found: + } + return 0; + } ++EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt); ++ ++/** ++ * config_ep_by_speed() - configures the given endpoint ++ * according to gadget speed. ++ * @g: pointer to the gadget ++ * @f: usb function ++ * @_ep: the endpoint to configure ++ * ++ * Return: error code, 0 on success ++ * ++ * This function chooses the right descriptors for a given ++ * endpoint according to gadget speed and saves it in the ++ * endpoint desc field. If the endpoint already has a descriptor ++ * assigned to it - overwrites it with currently corresponding ++ * descriptor. The endpoint maxpacket field is updated according ++ * to the chosen descriptor. ++ * Note: the supplied function should hold all the descriptors ++ * for supported speeds ++ */ ++int config_ep_by_speed(struct usb_gadget *g, ++ struct usb_function *f, ++ struct usb_ep *_ep) ++{ ++ return config_ep_by_speed_and_alt(g, f, _ep, 0); ++} + EXPORT_SYMBOL_GPL(config_ep_by_speed); + + /** +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c +index 51fa614b4079..0e7820158aaa 100644 +--- a/drivers/usb/gadget/udc/core.c ++++ b/drivers/usb/gadget/udc/core.c +@@ -1297,6 +1297,8 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) + kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); + + usb_gadget_disconnect(udc->gadget); ++ if (udc->gadget->irq) ++ synchronize_irq(udc->gadget->irq); + udc->driver->unbind(udc->gadget); + usb_gadget_udc_stop(udc); + +diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c +index bf6c81e2f8cc..6d2f1f98f13d 100644 +--- a/drivers/usb/gadget/udc/lpc32xx_udc.c ++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c +@@ -1614,17 +1614,17 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep, + const struct usb_endpoint_descriptor *desc) + { + struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); +- struct lpc32xx_udc *udc = ep->udc; ++ struct lpc32xx_udc *udc; + u16 maxpacket; + u32 tmp; + unsigned long flags; + + /* Verify EP data */ + if ((!_ep) || (!ep) || (!desc) || +- (desc->bDescriptorType != USB_DT_ENDPOINT)) { +- dev_dbg(udc->dev, "bad ep or descriptor\n"); ++ (desc->bDescriptorType != USB_DT_ENDPOINT)) + return -EINVAL; +- } ++ ++ udc = ep->udc; + maxpacket = usb_endpoint_maxp(desc); + if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) { + dev_dbg(udc->dev, "bad ep descriptor's packet size\n"); +@@ -1872,7 +1872,7 @@ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) + static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) + { + struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); +- struct lpc32xx_udc *udc = ep->udc; ++ struct lpc32xx_udc *udc; + unsigned long flags; + + if ((!ep) || (ep->hwep_num <= 1)) +@@ -1882,6 +1882,7 @@ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) + if (ep->is_in) + return -EAGAIN; + ++ udc = ep->udc; + spin_lock_irqsave(&udc->lock, flags); + + if (value == 1) { +diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c +index a8288df6aadf..ea59b56e5402 100644 +--- a/drivers/usb/gadget/udc/m66592-udc.c ++++ b/drivers/usb/gadget/udc/m66592-udc.c +@@ -1667,7 +1667,7 @@ static int m66592_probe(struct platform_device *pdev) + + err_add_udc: + m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); +- ++ m66592->ep0_req = NULL; + clean_up3: + if (m66592->pdata->on_chip) { + clk_disable(m66592->clk); +diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c +index f82208fbc249..5dcc0692b95c 100644 +--- a/drivers/usb/gadget/udc/s3c2410_udc.c ++++ b/drivers/usb/gadget/udc/s3c2410_udc.c +@@ -251,10 +251,6 @@ static void s3c2410_udc_done(struct s3c2410_ep *ep, + static void s3c2410_udc_nuke(struct s3c2410_udc *udc, + struct s3c2410_ep *ep, int status) + { +- /* Sanity check */ +- if (&ep->queue == NULL) +- return; +- + while (!list_empty(&ep->queue)) { + struct s3c2410_request *req; + req = list_entry(ep->queue.next, struct s3c2410_request, +diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c +index c9f91e6c72b6..7f65c86047dd 100644 +--- a/drivers/usb/host/ehci-mxc.c ++++ b/drivers/usb/host/ehci-mxc.c +@@ -50,6 +50,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) + } + + irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return irq; + + hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev)); + if (!hcd) +diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c +index 769749ca5961..e9a49007cce4 100644 +--- a/drivers/usb/host/ehci-platform.c ++++ b/drivers/usb/host/ehci-platform.c +@@ -29,6 +29,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include +@@ -44,6 +46,9 @@ struct ehci_platform_priv { + struct clk *clks[EHCI_MAX_CLKS]; + struct reset_control *rsts; + bool reset_on_resume; ++ bool quirk_poll; ++ struct timer_list poll_timer; ++ struct delayed_work poll_work; + }; + + static const char hcd_name[] = "ehci-platform"; +@@ -118,6 +123,111 @@ static struct usb_ehci_pdata ehci_platform_defaults = { + .power_off = ehci_platform_power_off, + }; + ++/** ++ * quirk_poll_check_port_status - Poll port_status if the device sticks ++ * @ehci: the ehci hcd pointer ++ * ++ * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting ++ * stuck very rarely after a full/low usb device was disconnected. To ++ * detect such a situation, the controllers require a special way which poll ++ * the EHCI PORTSC register. ++ * ++ * Return: true if the controller's port_status indicated getting stuck ++ */ ++static bool quirk_poll_check_port_status(struct ehci_hcd *ehci) ++{ ++ u32 port_status = ehci_readl(ehci, &ehci->regs->port_status[0]); ++ ++ if (!(port_status & PORT_OWNER) && ++ (port_status & PORT_POWER) && ++ !(port_status & PORT_CONNECT) && ++ (port_status & PORT_LS_MASK)) ++ return true; ++ ++ return false; ++} ++ ++/** ++ * quirk_poll_rebind_companion - rebind comanion device to recover ++ * @ehci: the ehci hcd pointer ++ * ++ * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting ++ * stuck very rarely after a full/low usb device was disconnected. To ++ * recover from such a situation, the controllers require changing the OHCI ++ * functional state. ++ */ ++static void quirk_poll_rebind_companion(struct ehci_hcd *ehci) ++{ ++ struct device *companion_dev; ++ struct usb_hcd *hcd = ehci_to_hcd(ehci); ++ ++ companion_dev = usb_of_get_companion_dev(hcd->self.controller); ++ if (!companion_dev) ++ return; ++ ++ device_release_driver(companion_dev); ++ if (device_attach(companion_dev) < 0) ++ ehci_err(ehci, "%s: failed\n", __func__); ++ ++ put_device(companion_dev); ++} ++ ++static void quirk_poll_work(struct work_struct *work) ++{ ++ struct ehci_platform_priv *priv = ++ container_of(to_delayed_work(work), struct ehci_platform_priv, ++ poll_work); ++ struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, ++ priv); ++ ++ /* check the status twice to reduce misdetection rate */ ++ if (!quirk_poll_check_port_status(ehci)) ++ return; ++ udelay(10); ++ if (!quirk_poll_check_port_status(ehci)) ++ return; ++ ++ ehci_dbg(ehci, "%s: detected getting stuck. rebind now!\n", __func__); ++ quirk_poll_rebind_companion(ehci); ++} ++ ++static void quirk_poll_timer(struct timer_list *t) ++{ ++ struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer); ++ struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, ++ priv); ++ ++ if (quirk_poll_check_port_status(ehci)) { ++ /* ++ * Now scheduling the work for testing the port more. Note that ++ * updating the status is possible to be delayed when ++ * reconnection. So, this uses delayed work with 5 ms delay ++ * to avoid misdetection. ++ */ ++ schedule_delayed_work(&priv->poll_work, msecs_to_jiffies(5)); ++ } ++ ++ mod_timer(&priv->poll_timer, jiffies + HZ); ++} ++ ++static void quirk_poll_init(struct ehci_platform_priv *priv) ++{ ++ INIT_DELAYED_WORK(&priv->poll_work, quirk_poll_work); ++ timer_setup(&priv->poll_timer, quirk_poll_timer, 0); ++ mod_timer(&priv->poll_timer, jiffies + HZ); ++} ++ ++static void quirk_poll_end(struct ehci_platform_priv *priv) ++{ ++ del_timer_sync(&priv->poll_timer); ++ cancel_delayed_work(&priv->poll_work); ++} ++ ++static const struct soc_device_attribute quirk_poll_match[] = { ++ { .family = "R-Car Gen3" }, ++ { /* sentinel*/ } ++}; ++ + static int ehci_platform_probe(struct platform_device *dev) + { + struct usb_hcd *hcd; +@@ -176,6 +286,9 @@ static int ehci_platform_probe(struct platform_device *dev) + "has-transaction-translator")) + hcd->has_tt = 1; + ++ if (soc_device_match(quirk_poll_match)) ++ priv->quirk_poll = true; ++ + for (clk = 0; clk < EHCI_MAX_CLKS; clk++) { + priv->clks[clk] = of_clk_get(dev->dev.of_node, clk); + if (IS_ERR(priv->clks[clk])) { +@@ -247,6 +360,9 @@ static int ehci_platform_probe(struct platform_device *dev) + device_enable_async_suspend(hcd->self.controller); + platform_set_drvdata(dev, hcd); + ++ if (priv->quirk_poll) ++ quirk_poll_init(priv); ++ + return err; + + err_power: +@@ -273,6 +389,9 @@ static int ehci_platform_remove(struct platform_device *dev) + struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd); + int clk; + ++ if (priv->quirk_poll) ++ quirk_poll_end(priv); ++ + usb_remove_hcd(hcd); + + if (pdata->power_off) +@@ -297,9 +416,13 @@ static int ehci_platform_suspend(struct device *dev) + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct usb_ehci_pdata *pdata = dev_get_platdata(dev); + struct platform_device *pdev = to_platform_device(dev); ++ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd); + bool do_wakeup = device_may_wakeup(dev); + int ret; + ++ if (priv->quirk_poll) ++ quirk_poll_end(priv); ++ + ret = ehci_suspend(hcd, do_wakeup); + if (ret) + return ret; +@@ -331,6 +454,14 @@ static int ehci_platform_resume(struct device *dev) + } + + ehci_resume(hcd, priv->reset_on_resume); ++ ++ pm_runtime_disable(dev); ++ pm_runtime_set_active(dev); ++ pm_runtime_enable(dev); ++ ++ if (priv->quirk_poll) ++ quirk_poll_init(priv); ++ + return 0; + } + #endif /* CONFIG_PM_SLEEP */ +diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c +index 7addfc2cbadc..4a8456f12a73 100644 +--- a/drivers/usb/host/ohci-platform.c ++++ b/drivers/usb/host/ohci-platform.c +@@ -299,6 +299,11 @@ static int ohci_platform_resume(struct device *dev) + } + + ohci_resume(hcd, false); ++ ++ pm_runtime_disable(dev); ++ pm_runtime_set_active(dev); ++ pm_runtime_enable(dev); ++ + return 0; + } + #endif /* CONFIG_PM_SLEEP */ +diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c +index c158cda9e4b9..cff965240327 100644 +--- a/drivers/usb/host/ohci-sm501.c ++++ b/drivers/usb/host/ohci-sm501.c +@@ -157,9 +157,10 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev) + * the call to usb_hcd_setup_local_mem() below does just that. + */ + +- if (usb_hcd_setup_local_mem(hcd, mem->start, +- mem->start - mem->parent->start, +- resource_size(mem)) < 0) ++ retval = usb_hcd_setup_local_mem(hcd, mem->start, ++ mem->start - mem->parent->start, ++ resource_size(mem)); ++ if (retval < 0) + goto err5; + retval = usb_add_hcd(hcd, irq, IRQF_SHARED); + if (retval) +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 52c625c02341..60d06e9b600f 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -410,7 +410,15 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) + if (ret) + return ret; + +- return xhci_resume(xhci, 0); ++ ret = xhci_resume(xhci, 0); ++ if (ret) ++ return ret; ++ ++ pm_runtime_disable(dev); ++ pm_runtime_set_active(dev); ++ pm_runtime_enable(dev); ++ ++ return 0; + } + + static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev) +diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c +index 7570c7602ab4..f32c582611eb 100644 +--- a/drivers/vfio/mdev/mdev_sysfs.c ++++ b/drivers/vfio/mdev/mdev_sysfs.c +@@ -110,7 +110,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, + "%s-%s", dev_driver_string(parent->dev), + group->name); + if (ret) { +- kfree(type); ++ kobject_put(&type->kobj); + return ERR_PTR(ret); + } + +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c +index f0891bd8444c..d6359c37c9e5 100644 +--- a/drivers/vfio/pci/vfio_pci_config.c ++++ b/drivers/vfio/pci/vfio_pci_config.c +@@ -1460,7 +1460,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev) + if (ret) + return ret; + +- if (cap <= PCI_CAP_ID_MAX) { ++ /* ++ * ID 0 is a NULL capability, conflicting with our fake ++ * PCI_CAP_ID_BASIC. As it has no content, consider it ++ * hidden for now. ++ */ ++ if (cap && cap <= PCI_CAP_ID_MAX) { + len = pci_cap_length[cap]; + if (len == 0xFF) { /* Variable length */ + len = vfio_cap_len(vdev, cap, pos); +@@ -1726,8 +1731,11 @@ void vfio_config_free(struct vfio_pci_device *vdev) + vdev->vconfig = NULL; + kfree(vdev->pci_config_map); + vdev->pci_config_map = NULL; +- kfree(vdev->msi_perm); +- vdev->msi_perm = NULL; ++ if (vdev->msi_perm) { ++ free_perm_bits(vdev->msi_perm); ++ kfree(vdev->msi_perm); ++ vdev->msi_perm = NULL; ++ } + } + + /* +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c +index a9caf1bc3c3e..88ce114790d7 100644 +--- a/drivers/vhost/scsi.c ++++ b/drivers/vhost/scsi.c +@@ -2290,6 +2290,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { + static const struct target_core_fabric_ops vhost_scsi_ops = { + .module = THIS_MODULE, + .fabric_name = "vhost", ++ .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS, + .tpg_get_wwn = vhost_scsi_get_fabric_wwn, + .tpg_get_tag = vhost_scsi_get_tpgt, + .tpg_check_demo_mode = vhost_scsi_check_true, +diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c +index f68920131a4a..e94932c69f54 100644 +--- a/drivers/video/backlight/lp855x_bl.c ++++ b/drivers/video/backlight/lp855x_bl.c +@@ -456,7 +456,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) + ret = regulator_enable(lp->enable); + if (ret < 0) { + dev_err(lp->dev, "failed to enable vddio: %d\n", ret); +- return ret; ++ goto disable_supply; + } + + /* +@@ -471,24 +471,34 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) + ret = lp855x_configure(lp); + if (ret) { + dev_err(lp->dev, "device config err: %d", ret); +- return ret; ++ goto disable_vddio; + } + + ret = lp855x_backlight_register(lp); + if (ret) { + dev_err(lp->dev, + "failed to register backlight. err: %d\n", ret); +- return ret; ++ goto disable_vddio; + } + + ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group); + if (ret) { + dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret); +- return ret; ++ goto disable_vddio; + } + + backlight_update_status(lp->bl); ++ + return 0; ++ ++disable_vddio: ++ if (lp->enable) ++ regulator_disable(lp->enable); ++disable_supply: ++ if (lp->supply) ++ regulator_disable(lp->supply); ++ ++ return ret; + } + + static int lp855x_remove(struct i2c_client *cl) +@@ -497,6 +507,8 @@ static int lp855x_remove(struct i2c_client *cl) + + lp->bl->props.brightness = 0; + backlight_update_status(lp->bl); ++ if (lp->enable) ++ regulator_disable(lp->enable); + if (lp->supply) + regulator_disable(lp->supply); + sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group); +diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c +index e92f38fcb7a4..1b9bcfed39e9 100644 +--- a/drivers/watchdog/da9062_wdt.c ++++ b/drivers/watchdog/da9062_wdt.c +@@ -55,11 +55,6 @@ static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt, + unsigned int regval) + { + struct da9062 *chip = wdt->hw; +- int ret; +- +- ret = da9062_reset_watchdog_timer(wdt); +- if (ret) +- return ret; + + regmap_update_bits(chip->regmap, + DA9062AA_CONTROL_D, +diff --git a/fs/afs/dir.c b/fs/afs/dir.c +index d1e1caa23c8b..3c486340b220 100644 +--- a/fs/afs/dir.c ++++ b/fs/afs/dir.c +@@ -658,7 +658,8 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, + + cookie->ctx.actor = afs_lookup_filldir; + cookie->name = dentry->d_name; +- cookie->nr_fids = 1; /* slot 0 is saved for the fid we actually want */ ++ cookie->nr_fids = 2; /* slot 0 is saved for the fid we actually want ++ * and slot 1 for the directory */ + + read_seqlock_excl(&dvnode->cb_lock); + dcbi = rcu_dereference_protected(dvnode->cb_interest, +@@ -709,7 +710,11 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, + if (!cookie->inodes) + goto out_s; + +- for (i = 1; i < cookie->nr_fids; i++) { ++ cookie->fids[1] = dvnode->fid; ++ cookie->statuses[1].cb_break = afs_calc_vnode_cb_break(dvnode); ++ cookie->inodes[1] = igrab(&dvnode->vfs_inode); ++ ++ for (i = 2; i < cookie->nr_fids; i++) { + scb = &cookie->statuses[i]; + + /* Find any inodes that already exist and get their +diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c +index 0a4fed9e706b..5c2729fc07e5 100644 +--- a/fs/afs/fsclient.c ++++ b/fs/afs/fsclient.c +@@ -56,16 +56,15 @@ static void xdr_dump_bad(const __be32 *bp) + /* + * decode an AFSFetchStatus block + */ +-static int xdr_decode_AFSFetchStatus(const __be32 **_bp, +- struct afs_call *call, +- struct afs_status_cb *scb) ++static void xdr_decode_AFSFetchStatus(const __be32 **_bp, ++ struct afs_call *call, ++ struct afs_status_cb *scb) + { + const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp; + struct afs_file_status *status = &scb->status; + bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus); + u64 data_version, size; + u32 type, abort_code; +- int ret; + + abort_code = ntohl(xdr->abort_code); + +@@ -79,7 +78,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, + */ + status->abort_code = abort_code; + scb->have_error = true; +- goto good; ++ goto advance; + } + + pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); +@@ -89,7 +88,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, + if (abort_code != 0 && inline_error) { + status->abort_code = abort_code; + scb->have_error = true; +- goto good; ++ goto advance; + } + + type = ntohl(xdr->type); +@@ -125,15 +124,13 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, + data_version |= (u64)ntohl(xdr->data_version_hi) << 32; + status->data_version = data_version; + scb->have_status = true; +-good: +- ret = 0; + advance: + *_bp = (const void *)*_bp + sizeof(*xdr); +- return ret; ++ return; + + bad: + xdr_dump_bad(*_bp); +- ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); ++ afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + goto advance; + } + +@@ -254,9 +251,7 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call) + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSCallBack(&bp, call, call->out_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + +@@ -419,9 +414,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) + return ret; + + bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSCallBack(&bp, call, call->out_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + +@@ -579,12 +572,8 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call) + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + xdr_decode_AFSFid(&bp, call->out_fid); +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_AFSCallBack(&bp, call, call->out_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + +@@ -693,9 +682,7 @@ static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call) + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +@@ -786,12 +773,8 @@ static int afs_deliver_fs_link(struct afs_call *call) + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +@@ -880,12 +863,8 @@ static int afs_deliver_fs_symlink(struct afs_call *call) + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + xdr_decode_AFSFid(&bp, call->out_fid); +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +@@ -988,16 +967,12 @@ static int afs_deliver_fs_rename(struct afs_call *call) + if (ret < 0) + return ret; + ++ bp = call->buffer; + /* If the two dirs are the same, we have two copies of the same status + * report, so we just decode it twice. + */ +- bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +@@ -1105,9 +1080,7 @@ static int afs_deliver_fs_store_data(struct afs_call *call) + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +@@ -1285,9 +1258,7 @@ static int afs_deliver_fs_store_status(struct afs_call *call) + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +@@ -1956,9 +1927,7 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call) + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSCallBack(&bp, call, call->out_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + +@@ -2064,10 +2033,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) + + bp = call->buffer; + scb = &call->out_scb[call->count]; +- ret = xdr_decode_AFSFetchStatus(&bp, call, scb); +- if (ret < 0) +- return ret; +- ++ xdr_decode_AFSFetchStatus(&bp, call, scb); + call->count++; + if (call->count < call->count2) + goto more_counts; +@@ -2245,9 +2211,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) + return ret; + + bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + + call->unmarshall++; +@@ -2328,9 +2292,7 @@ static int afs_deliver_fs_file_status_and_vol(struct afs_call *call) + return ret; + + bp = call->buffer; +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +diff --git a/fs/afs/inode.c b/fs/afs/inode.c +index 46d2d7cb461d..a74e8e209454 100644 +--- a/fs/afs/inode.c ++++ b/fs/afs/inode.c +@@ -171,6 +171,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc, + struct timespec64 t; + umode_t mode; + bool data_changed = false; ++ bool change_size = false; + + BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags)); + +@@ -226,6 +227,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc, + } else { + set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); + } ++ change_size = true; + } else if (vnode->status.type == AFS_FTYPE_DIR) { + /* Expected directory change is handled elsewhere so + * that we can locally edit the directory and save on a +@@ -233,11 +235,19 @@ static void afs_apply_status(struct afs_fs_cursor *fc, + */ + if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) + data_changed = false; ++ change_size = true; + } + + if (data_changed) { + inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); +- afs_set_i_size(vnode, status->size); ++ ++ /* Only update the size if the data version jumped. If the ++ * file is being modified locally, then we might have our own ++ * idea of what the size should be that's not the same as ++ * what's on the server. ++ */ ++ if (change_size) ++ afs_set_i_size(vnode, status->size); + } + } + +diff --git a/fs/afs/internal.h b/fs/afs/internal.h +index 485cc3b2aaa8..555ad7c9afcb 100644 +--- a/fs/afs/internal.h ++++ b/fs/afs/internal.h +@@ -161,6 +161,7 @@ struct afs_call { + bool upgrade; /* T to request service upgrade */ + bool have_reply_time; /* T if have got reply_time */ + bool intr; /* T if interruptible */ ++ bool unmarshalling_error; /* T if an unmarshalling error occurred */ + u16 service_id; /* Actual service ID (after upgrade) */ + unsigned int debug_id; /* Trace ID */ + u32 operation_ID; /* operation ID for an incoming call */ +diff --git a/fs/afs/misc.c b/fs/afs/misc.c +index 52b19e9c1535..5334f1bd2bca 100644 +--- a/fs/afs/misc.c ++++ b/fs/afs/misc.c +@@ -83,6 +83,7 @@ int afs_abort_to_error(u32 abort_code) + case UAENOLCK: return -ENOLCK; + case UAENOTEMPTY: return -ENOTEMPTY; + case UAELOOP: return -ELOOP; ++ case UAEOVERFLOW: return -EOVERFLOW; + case UAENOMEDIUM: return -ENOMEDIUM; + case UAEDQUOT: return -EDQUOT; + +diff --git a/fs/afs/proc.c b/fs/afs/proc.c +index fba2ec3a3a9c..106b27011f6d 100644 +--- a/fs/afs/proc.c ++++ b/fs/afs/proc.c +@@ -562,6 +562,7 @@ void afs_put_sysnames(struct afs_sysnames *sysnames) + if (sysnames->subs[i] != afs_init_sysname && + sysnames->subs[i] != sysnames->blank) + kfree(sysnames->subs[i]); ++ kfree(sysnames); + } + } + +diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c +index 52aa90fb4fbd..6adab30a8399 100644 +--- a/fs/afs/rxrpc.c ++++ b/fs/afs/rxrpc.c +@@ -540,6 +540,8 @@ static void afs_deliver_to_call(struct afs_call *call) + + ret = call->type->deliver(call); + state = READ_ONCE(call->state); ++ if (ret == 0 && call->unmarshalling_error) ++ ret = -EBADMSG; + switch (ret) { + case 0: + afs_queue_call_work(call); +@@ -963,5 +965,7 @@ noinline int afs_protocol_error(struct afs_call *call, int error, + enum afs_eproto_cause cause) + { + trace_afs_protocol_error(call, error, cause); ++ if (call) ++ call->unmarshalling_error = true; + return error; + } +diff --git a/fs/afs/write.c b/fs/afs/write.c +index cb76566763db..96b042af6248 100644 +--- a/fs/afs/write.c ++++ b/fs/afs/write.c +@@ -194,11 +194,11 @@ int afs_write_end(struct file *file, struct address_space *mapping, + + i_size = i_size_read(&vnode->vfs_inode); + if (maybe_i_size > i_size) { +- spin_lock(&vnode->wb_lock); ++ write_seqlock(&vnode->cb_lock); + i_size = i_size_read(&vnode->vfs_inode); + if (maybe_i_size > i_size) + i_size_write(&vnode->vfs_inode, maybe_i_size); +- spin_unlock(&vnode->wb_lock); ++ write_sequnlock(&vnode->cb_lock); + } + + if (!PageUptodate(page)) { +@@ -811,6 +811,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) + vmf->page->index, priv); + SetPagePrivate(vmf->page); + set_page_private(vmf->page, priv); ++ file_update_time(file); + + sb_end_pagefault(inode->i_sb); + return VM_FAULT_LOCKED; +diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c +index 8af7f093305d..d21cf61d86b9 100644 +--- a/fs/afs/yfsclient.c ++++ b/fs/afs/yfsclient.c +@@ -179,21 +179,20 @@ static void xdr_dump_bad(const __be32 *bp) + /* + * Decode a YFSFetchStatus block + */ +-static int xdr_decode_YFSFetchStatus(const __be32 **_bp, +- struct afs_call *call, +- struct afs_status_cb *scb) ++static void xdr_decode_YFSFetchStatus(const __be32 **_bp, ++ struct afs_call *call, ++ struct afs_status_cb *scb) + { + const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp; + struct afs_file_status *status = &scb->status; + u32 type; +- int ret; + + status->abort_code = ntohl(xdr->abort_code); + if (status->abort_code != 0) { + if (status->abort_code == VNOVNODE) + status->nlink = 0; + scb->have_error = true; +- goto good; ++ goto advance; + } + + type = ntohl(xdr->type); +@@ -221,15 +220,13 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp, + status->size = xdr_to_u64(xdr->size); + status->data_version = xdr_to_u64(xdr->data_version); + scb->have_status = true; +-good: +- ret = 0; + advance: + *_bp += xdr_size(xdr); +- return ret; ++ return; + + bad: + xdr_dump_bad(*_bp); +- ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); ++ afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + goto advance; + } + +@@ -348,9 +345,7 @@ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call) + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSCallBack(&bp, call, call->out_scb); + xdr_decode_YFSVolSync(&bp, call->out_volsync); + +@@ -372,9 +367,7 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call) + return ret; + + bp = call->buffer; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +@@ -534,9 +527,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) + return ret; + + bp = call->buffer; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSCallBack(&bp, call, call->out_scb); + xdr_decode_YFSVolSync(&bp, call->out_volsync); + +@@ -645,12 +636,8 @@ static int yfs_deliver_fs_create_vnode(struct afs_call *call) + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + xdr_decode_YFSFid(&bp, call->out_fid); +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_YFSCallBack(&bp, call, call->out_scb); + xdr_decode_YFSVolSync(&bp, call->out_volsync); + +@@ -803,14 +790,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call) + return ret; + + bp = call->buffer; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; +- ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_YFSFid(&bp, &fid); +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + /* Was deleted if vnode->status.abort_code == VNOVNODE. */ + + xdr_decode_YFSVolSync(&bp, call->out_volsync); +@@ -890,10 +872,7 @@ static int yfs_deliver_fs_remove(struct afs_call *call) + return ret; + + bp = call->buffer; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; +- ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_YFSVolSync(&bp, call->out_volsync); + return 0; + } +@@ -975,12 +954,8 @@ static int yfs_deliver_fs_link(struct afs_call *call) + return ret; + + bp = call->buffer; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_YFSVolSync(&bp, call->out_volsync); + _leave(" = 0 [done]"); + return 0; +@@ -1062,12 +1037,8 @@ static int yfs_deliver_fs_symlink(struct afs_call *call) + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + xdr_decode_YFSFid(&bp, call->out_fid); +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_YFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +@@ -1155,13 +1126,11 @@ static int yfs_deliver_fs_rename(struct afs_call *call) + return ret; + + bp = call->buffer; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); +- if (ret < 0) +- return ret; +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; +- ++ /* If the two dirs are the same, we have two copies of the same status ++ * report, so we just decode it twice. ++ */ ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSVolSync(&bp, call->out_volsync); + _leave(" = 0 [done]"); + return 0; +@@ -1846,9 +1815,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) + + bp = call->buffer; + scb = &call->out_scb[call->count]; +- ret = xdr_decode_YFSFetchStatus(&bp, call, scb); +- if (ret < 0) +- return ret; ++ xdr_decode_YFSFetchStatus(&bp, call, scb); + + call->count++; + if (call->count < call->count2) +@@ -2068,9 +2035,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) + bp = call->buffer; + yacl->inherit_flag = ntohl(*bp++); + yacl->num_cleaned = ntohl(*bp++); +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; ++ xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSVolSync(&bp, call->out_volsync); + + call->unmarshall++; +diff --git a/fs/block_dev.c b/fs/block_dev.c +index 34644ce4b502..2dc9c73a4cb2 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -1546,10 +1546,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) + */ + if (!for_part) { + ret = devcgroup_inode_permission(bdev->bd_inode, perm); +- if (ret != 0) { +- bdput(bdev); ++ if (ret != 0) + return ret; +- } + } + + restart: +@@ -1618,8 +1616,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) + goto out_clear; + BUG_ON(for_part); + ret = __blkdev_get(whole, mode, 1); +- if (ret) ++ if (ret) { ++ bdput(whole); + goto out_clear; ++ } + bdev->bd_contains = whole; + bdev->bd_part = disk_get_part(disk, partno); + if (!(disk->flags & GENHD_FL_UP) || +@@ -1669,7 +1669,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) + disk_unblock_events(disk); + put_disk_and_module(disk); + out: +- bdput(bdev); + + return ret; + } +@@ -1736,6 +1735,9 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) + bdput(whole); + } + ++ if (res) ++ bdput(bdev); ++ + return res; + } + EXPORT_SYMBOL(blkdev_get); +diff --git a/fs/ceph/export.c b/fs/ceph/export.c +index 79dc06881e78..e088843a7734 100644 +--- a/fs/ceph/export.c ++++ b/fs/ceph/export.c +@@ -172,9 +172,16 @@ struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino) + static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) + { + struct inode *inode = __lookup_inode(sb, ino); ++ int err; ++ + if (IS_ERR(inode)) + return ERR_CAST(inode); +- if (inode->i_nlink == 0) { ++ /* We need LINK caps to reliably check i_nlink */ ++ err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false); ++ if (err) ++ return ERR_PTR(err); ++ /* -ESTALE if inode as been unlinked and no file is open */ ++ if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) { + iput(inode); + return ERR_PTR(-ESTALE); + } +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 721b2560caa7..947c4aad5d6a 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -614,26 +614,26 @@ cifs_reconnect(struct TCP_Server_Info *server) + try_to_freeze(); + + mutex_lock(&server->srv_mutex); ++#ifdef CONFIG_CIFS_DFS_UPCALL + /* + * Set up next DFS target server (if any) for reconnect. If DFS + * feature is disabled, then we will retry last server we + * connected to before. + */ ++ reconn_inval_dfs_target(server, cifs_sb, &tgt_list, &tgt_it); ++#endif ++ rc = reconn_set_ipaddr(server); ++ if (rc) { ++ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", ++ __func__, rc); ++ } ++ + if (cifs_rdma_enabled(server)) + rc = smbd_reconnect(server); + else + rc = generic_ip_connect(server); + if (rc) { + cifs_dbg(FYI, "reconnect error %d\n", rc); +-#ifdef CONFIG_CIFS_DFS_UPCALL +- reconn_inval_dfs_target(server, cifs_sb, &tgt_list, +- &tgt_it); +-#endif +- rc = reconn_set_ipaddr(server); +- if (rc) { +- cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", +- __func__, rc); +- } + mutex_unlock(&server->srv_mutex); + msleep(3000); + } else { +diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h +index 416d9de35679..4311d01b02a8 100644 +--- a/fs/dlm/dlm_internal.h ++++ b/fs/dlm/dlm_internal.h +@@ -97,7 +97,6 @@ do { \ + __LINE__, __FILE__, #x, jiffies); \ + {do} \ + printk("\n"); \ +- BUG(); \ + panic("DLM: Record message above and reboot.\n"); \ + } \ + } +diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c +index 2743c6f8a457..0589e914663f 100644 +--- a/fs/ext4/dir.c ++++ b/fs/ext4/dir.c +@@ -677,6 +677,7 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, + struct qstr qstr = {.name = str, .len = len }; + const struct dentry *parent = READ_ONCE(dentry->d_parent); + const struct inode *inode = READ_ONCE(parent->d_inode); ++ char strbuf[DNAME_INLINE_LEN]; + + if (!inode || !IS_CASEFOLDED(inode) || + !EXT4_SB(inode->i_sb)->s_encoding) { +@@ -685,6 +686,21 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, + return memcmp(str, name->name, len); + } + ++ /* ++ * If the dentry name is stored in-line, then it may be concurrently ++ * modified by a rename. If this happens, the VFS will eventually retry ++ * the lookup, so it doesn't matter what ->d_compare() returns. ++ * However, it's unsafe to call utf8_strncasecmp() with an unstable ++ * string. Therefore, we have to copy the name into a temporary buffer. ++ */ ++ if (len <= DNAME_INLINE_LEN - 1) { ++ memcpy(strbuf, str, len); ++ strbuf[len] = 0; ++ qstr.name = strbuf; ++ /* prevent compiler from optimizing out the temporary buffer */ ++ barrier(); ++ } ++ + return ext4_ci_compare(inode, name, &qstr, false); + } + +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 9bd44588eb77..3193f0b4a02d 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -3010,7 +3010,7 @@ again: + * in use to avoid freeing it when removing blocks. + */ + if (sbi->s_cluster_ratio > 1) { +- pblk = ext4_ext_pblock(ex) + end - ee_block + 2; ++ pblk = ext4_ext_pblock(ex) + end - ee_block + 1; + partial.pclu = EXT4_B2C(sbi, pblk); + partial.state = nofree; + } +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index d3500eaf900e..f7c20bb20da3 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2034,6 +2034,16 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, + #endif + } else if (token == Opt_dax) { + #ifdef CONFIG_FS_DAX ++ if (is_remount && test_opt(sb, DAX)) { ++ ext4_msg(sb, KERN_ERR, "can't mount with " ++ "both data=journal and dax"); ++ return -1; ++ } ++ if (is_remount && !(sbi->s_mount_opt & EXT4_MOUNT_DAX)) { ++ ext4_msg(sb, KERN_ERR, "can't change " ++ "dax mount option while remounting"); ++ return -1; ++ } + ext4_msg(sb, KERN_WARNING, + "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); + sbi->s_mount_opt |= m->mount_opt; +@@ -2294,6 +2304,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, + ext4_msg(sb, KERN_ERR, "revision level too high, " + "forcing read-only mode"); + err = -EROFS; ++ goto done; + } + if (read_only) + goto done; +@@ -5366,12 +5377,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) + err = -EINVAL; + goto restore_opts; + } +- if (test_opt(sb, DAX)) { +- ext4_msg(sb, KERN_ERR, "can't mount with " +- "both data=journal and dax"); +- err = -EINVAL; +- goto restore_opts; +- } + } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) { + if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { + ext4_msg(sb, KERN_ERR, "can't mount with " +@@ -5387,12 +5392,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) + goto restore_opts; + } + +- if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) { +- ext4_msg(sb, KERN_WARNING, "warning: refusing change of " +- "dax flag with busy inodes while remounting"); +- sbi->s_mount_opt ^= EXT4_MOUNT_DAX; +- } +- + if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) + ext4_abort(sb, "Abort forced by user"); + +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c +index a28ffecc0f95..bbd07fe8a492 100644 +--- a/fs/f2fs/checkpoint.c ++++ b/fs/f2fs/checkpoint.c +@@ -892,8 +892,8 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) + int i; + int err; + +- sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks), +- GFP_KERNEL); ++ sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), ++ GFP_KERNEL); + if (!sbi->ckpt) + return -ENOMEM; + /* +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c +index 84280ad3786c..e9af46dc06f7 100644 +--- a/fs/f2fs/dir.c ++++ b/fs/f2fs/dir.c +@@ -107,36 +107,28 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, + /* + * Test whether a case-insensitive directory entry matches the filename + * being searched for. +- * +- * Returns: 0 if the directory entry matches, more than 0 if it +- * doesn't match or less than zero on error. + */ +-int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, +- const struct qstr *entry, bool quick) ++static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name, ++ const struct qstr *entry, bool quick) + { +- const struct f2fs_sb_info *sbi = F2FS_SB(parent->i_sb); ++ const struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); + const struct unicode_map *um = sbi->s_encoding; +- int ret; ++ int res; + + if (quick) +- ret = utf8_strncasecmp_folded(um, name, entry); ++ res = utf8_strncasecmp_folded(um, name, entry); + else +- ret = utf8_strncasecmp(um, name, entry); +- +- if (ret < 0) { +- /* Handle invalid character sequence as either an error +- * or as an opaque byte sequence. ++ res = utf8_strncasecmp(um, name, entry); ++ if (res < 0) { ++ /* ++ * In strict mode, ignore invalid names. In non-strict mode, ++ * fall back to treating them as opaque byte sequences. + */ +- if (f2fs_has_strict_mode(sbi)) +- return -EINVAL; +- +- if (name->len != entry->len) +- return 1; +- +- return !!memcmp(name->name, entry->name, name->len); ++ if (f2fs_has_strict_mode(sbi) || name->len != entry->len) ++ return false; ++ return !memcmp(name->name, entry->name, name->len); + } +- +- return ret; ++ return res == 0; + } + + static void f2fs_fname_setup_ci_filename(struct inode *dir, +@@ -188,10 +180,10 @@ static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d, + if (cf_str->name) { + struct qstr cf = {.name = cf_str->name, + .len = cf_str->len}; +- return !f2fs_ci_compare(parent, &cf, &entry, true); ++ return f2fs_match_ci_name(parent, &cf, &entry, true); + } +- return !f2fs_ci_compare(parent, fname->usr_fname, &entry, +- false); ++ return f2fs_match_ci_name(parent, fname->usr_fname, &entry, ++ false); + } + #endif + if (fscrypt_match_name(fname, d->filename[bit_pos], +@@ -1067,17 +1059,41 @@ const struct file_operations f2fs_dir_operations = { + static int f2fs_d_compare(const struct dentry *dentry, unsigned int len, + const char *str, const struct qstr *name) + { +- struct qstr qstr = {.name = str, .len = len }; + const struct dentry *parent = READ_ONCE(dentry->d_parent); +- const struct inode *inode = READ_ONCE(parent->d_inode); ++ const struct inode *dir = READ_ONCE(parent->d_inode); ++ const struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); ++ struct qstr entry = QSTR_INIT(str, len); ++ char strbuf[DNAME_INLINE_LEN]; ++ int res; ++ ++ if (!dir || !IS_CASEFOLDED(dir)) ++ goto fallback; + +- if (!inode || !IS_CASEFOLDED(inode)) { +- if (len != name->len) +- return -1; +- return memcmp(str, name->name, len); ++ /* ++ * If the dentry name is stored in-line, then it may be concurrently ++ * modified by a rename. If this happens, the VFS will eventually retry ++ * the lookup, so it doesn't matter what ->d_compare() returns. ++ * However, it's unsafe to call utf8_strncasecmp() with an unstable ++ * string. Therefore, we have to copy the name into a temporary buffer. ++ */ ++ if (len <= DNAME_INLINE_LEN - 1) { ++ memcpy(strbuf, str, len); ++ strbuf[len] = 0; ++ entry.name = strbuf; ++ /* prevent compiler from optimizing out the temporary buffer */ ++ barrier(); + } + +- return f2fs_ci_compare(inode, name, &qstr, false); ++ res = utf8_strncasecmp(sbi->s_encoding, name, &entry); ++ if (res >= 0) ++ return res; ++ ++ if (f2fs_has_strict_mode(sbi)) ++ return -EINVAL; ++fallback: ++ if (len != name->len) ++ return 1; ++ return !!memcmp(str, name->name, len); + } + + static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str) +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index a26ea1e6ba88..03693d6b1c10 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -2790,18 +2790,12 @@ static inline bool f2fs_may_extent_tree(struct inode *inode) + static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, + size_t size, gfp_t flags) + { +- void *ret; +- + if (time_to_inject(sbi, FAULT_KMALLOC)) { + f2fs_show_injection_info(FAULT_KMALLOC); + return NULL; + } + +- ret = kmalloc(size, flags); +- if (ret) +- return ret; +- +- return kvmalloc(size, flags); ++ return kmalloc(size, flags); + } + + static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, +@@ -2960,11 +2954,6 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, + bool hot, bool set); + struct dentry *f2fs_get_parent(struct dentry *child); + +-extern int f2fs_ci_compare(const struct inode *parent, +- const struct qstr *name, +- const struct qstr *entry, +- bool quick); +- + /* + * dir.c + */ +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index c3a9da79ac99..5d94abe467a4 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -2056,8 +2056,15 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) + + if (in != F2FS_GOING_DOWN_FULLSYNC) { + ret = mnt_want_write_file(filp); +- if (ret) ++ if (ret) { ++ if (ret == -EROFS) { ++ ret = 0; ++ f2fs_stop_checkpoint(sbi, false); ++ set_sbi_flag(sbi, SBI_IS_SHUTDOWN); ++ trace_f2fs_shutdown(sbi, in, ret); ++ } + return ret; ++ } + } + + switch (in) { +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c +index f14401a77d60..90a20bd12961 100644 +--- a/fs/f2fs/node.c ++++ b/fs/f2fs/node.c +@@ -2933,7 +2933,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) + return 0; + + nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); +- nm_i->nat_bits = f2fs_kzalloc(sbi, ++ nm_i->nat_bits = f2fs_kvzalloc(sbi, + nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); + if (!nm_i->nat_bits) + return -ENOMEM; +@@ -3066,9 +3066,9 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi) + int i; + + nm_i->free_nid_bitmap = +- f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *), +- nm_i->nat_blocks), +- GFP_KERNEL); ++ f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), ++ nm_i->nat_blocks), ++ GFP_KERNEL); + if (!nm_i->free_nid_bitmap) + return -ENOMEM; + +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index e36543c9f2b7..f4b882ee48dd 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -1230,7 +1230,8 @@ static int f2fs_statfs_project(struct super_block *sb, + limit >>= sb->s_blocksize_bits; + + if (limit && buf->f_blocks > limit) { +- curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits; ++ curblock = (dquot->dq_dqb.dqb_curspace + ++ dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; + buf->f_blocks = limit; + buf->f_bfree = buf->f_bavail = + (buf->f_blocks > curblock) ? +@@ -2900,7 +2901,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) + if (nr_sectors & (bdev_zone_sectors(bdev) - 1)) + FDEV(devi).nr_blkz++; + +- FDEV(devi).blkz_seq = f2fs_kzalloc(sbi, ++ FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, + BITS_TO_LONGS(FDEV(devi).nr_blkz) + * sizeof(unsigned long), + GFP_KERNEL); +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 3dd37a998ea9..66214707a945 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -712,6 +712,7 @@ static ssize_t fuse_async_req_send(struct fuse_conn *fc, + spin_unlock(&io->lock); + + ia->ap.args.end = fuse_aio_complete_req; ++ ia->ap.args.may_block = io->should_dirty; + err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL); + if (err) + fuse_aio_complete_req(fc, &ia->ap.args, err); +@@ -3279,13 +3280,11 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, + if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) + return -EXDEV; + +- if (fc->writeback_cache) { +- inode_lock(inode_in); +- err = fuse_writeback_range(inode_in, pos_in, pos_in + len); +- inode_unlock(inode_in); +- if (err) +- return err; +- } ++ inode_lock(inode_in); ++ err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1); ++ inode_unlock(inode_in); ++ if (err) ++ return err; + + inode_lock(inode_out); + +@@ -3293,11 +3292,27 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, + if (err) + goto out; + +- if (fc->writeback_cache) { +- err = fuse_writeback_range(inode_out, pos_out, pos_out + len); +- if (err) +- goto out; +- } ++ /* ++ * Write out dirty pages in the destination file before sending the COPY ++ * request to userspace. After the request is completed, truncate off ++ * pages (including partial ones) from the cache that have been copied, ++ * since these contain stale data at that point. ++ * ++ * This should be mostly correct, but if the COPY writes to partial ++ * pages (at the start or end) and the parts not covered by the COPY are ++ * written through a memory map after calling fuse_writeback_range(), ++ * then these partial page modifications will be lost on truncation. ++ * ++ * It is unlikely that someone would rely on such mixed style ++ * modifications. Yet this does give less guarantees than if the ++ * copying was performed with write(2). ++ * ++ * To fix this a i_mmap_sem style lock could be used to prevent new ++ * faults while the copy is ongoing. ++ */ ++ err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1); ++ if (err) ++ goto out; + + if (is_unstable) + set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); +@@ -3318,6 +3333,10 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, + if (err) + goto out; + ++ truncate_inode_pages_range(inode_out->i_mapping, ++ ALIGN_DOWN(pos_out, PAGE_SIZE), ++ ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1); ++ + if (fc->writeback_cache) { + fuse_write_update_size(inode_out, pos_out + outarg.size); + file_update_time(file_out); +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index ca344bf71404..d7cde216fc87 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -249,6 +249,7 @@ struct fuse_args { + bool out_argvar:1; + bool page_zeroing:1; + bool page_replace:1; ++ bool may_block:1; + struct fuse_in_arg in_args[3]; + struct fuse_arg out_args[2]; + void (*end)(struct fuse_conn *fc, struct fuse_args *args, int error); +diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c +index a5c86048b96e..7505f8102762 100644 +--- a/fs/fuse/virtio_fs.c ++++ b/fs/fuse/virtio_fs.c +@@ -55,6 +55,12 @@ struct virtio_fs_forget { + struct list_head list; + }; + ++struct virtio_fs_req_work { ++ struct fuse_req *req; ++ struct virtio_fs_vq *fsvq; ++ struct work_struct done_work; ++}; ++ + static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, + struct fuse_req *req, bool in_flight); + +@@ -443,19 +449,67 @@ static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req) + } + + /* Work function for request completion */ ++static void virtio_fs_request_complete(struct fuse_req *req, ++ struct virtio_fs_vq *fsvq) ++{ ++ struct fuse_pqueue *fpq = &fsvq->fud->pq; ++ struct fuse_conn *fc = fsvq->fud->fc; ++ struct fuse_args *args; ++ struct fuse_args_pages *ap; ++ unsigned int len, i, thislen; ++ struct page *page; ++ ++ /* ++ * TODO verify that server properly follows FUSE protocol ++ * (oh.uniq, oh.len) ++ */ ++ args = req->args; ++ copy_args_from_argbuf(args, req); ++ ++ if (args->out_pages && args->page_zeroing) { ++ len = args->out_args[args->out_numargs - 1].size; ++ ap = container_of(args, typeof(*ap), args); ++ for (i = 0; i < ap->num_pages; i++) { ++ thislen = ap->descs[i].length; ++ if (len < thislen) { ++ WARN_ON(ap->descs[i].offset); ++ page = ap->pages[i]; ++ zero_user_segment(page, len, thislen); ++ len = 0; ++ } else { ++ len -= thislen; ++ } ++ } ++ } ++ ++ spin_lock(&fpq->lock); ++ clear_bit(FR_SENT, &req->flags); ++ spin_unlock(&fpq->lock); ++ ++ fuse_request_end(fc, req); ++ spin_lock(&fsvq->lock); ++ dec_in_flight_req(fsvq); ++ spin_unlock(&fsvq->lock); ++} ++ ++static void virtio_fs_complete_req_work(struct work_struct *work) ++{ ++ struct virtio_fs_req_work *w = ++ container_of(work, typeof(*w), done_work); ++ ++ virtio_fs_request_complete(w->req, w->fsvq); ++ kfree(w); ++} ++ + static void virtio_fs_requests_done_work(struct work_struct *work) + { + struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, + done_work); + struct fuse_pqueue *fpq = &fsvq->fud->pq; +- struct fuse_conn *fc = fsvq->fud->fc; + struct virtqueue *vq = fsvq->vq; + struct fuse_req *req; +- struct fuse_args_pages *ap; + struct fuse_req *next; +- struct fuse_args *args; +- unsigned int len, i, thislen; +- struct page *page; ++ unsigned int len; + LIST_HEAD(reqs); + + /* Collect completed requests off the virtqueue */ +@@ -473,38 +527,20 @@ static void virtio_fs_requests_done_work(struct work_struct *work) + + /* End requests */ + list_for_each_entry_safe(req, next, &reqs, list) { +- /* +- * TODO verify that server properly follows FUSE protocol +- * (oh.uniq, oh.len) +- */ +- args = req->args; +- copy_args_from_argbuf(args, req); +- +- if (args->out_pages && args->page_zeroing) { +- len = args->out_args[args->out_numargs - 1].size; +- ap = container_of(args, typeof(*ap), args); +- for (i = 0; i < ap->num_pages; i++) { +- thislen = ap->descs[i].length; +- if (len < thislen) { +- WARN_ON(ap->descs[i].offset); +- page = ap->pages[i]; +- zero_user_segment(page, len, thislen); +- len = 0; +- } else { +- len -= thislen; +- } +- } +- } +- +- spin_lock(&fpq->lock); +- clear_bit(FR_SENT, &req->flags); + list_del_init(&req->list); +- spin_unlock(&fpq->lock); + +- fuse_request_end(fc, req); +- spin_lock(&fsvq->lock); +- dec_in_flight_req(fsvq); +- spin_unlock(&fsvq->lock); ++ /* blocking async request completes in a worker context */ ++ if (req->args->may_block) { ++ struct virtio_fs_req_work *w; ++ ++ w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL); ++ INIT_WORK(&w->done_work, virtio_fs_complete_req_work); ++ w->fsvq = fsvq; ++ w->req = req; ++ schedule_work(&w->done_work); ++ } else { ++ virtio_fs_request_complete(req, fsvq); ++ } + } + } + +diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c +index 110e5c4db819..a4b6a49462a4 100644 +--- a/fs/gfs2/log.c ++++ b/fs/gfs2/log.c +@@ -881,8 +881,10 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) + * @new: New transaction to be merged + */ + +-static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) ++static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new) + { ++ struct gfs2_trans *old = sdp->sd_log_tr; ++ + WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); + + old->tr_num_buf_new += new->tr_num_buf_new; +@@ -893,6 +895,11 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) + + list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); + list_splice_tail_init(&new->tr_buf, &old->tr_buf); ++ ++ spin_lock(&sdp->sd_ail_lock); ++ list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list); ++ list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list); ++ spin_unlock(&sdp->sd_ail_lock); + } + + static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) +@@ -904,7 +911,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) + gfs2_log_lock(sdp); + + if (sdp->sd_log_tr) { +- gfs2_merge_trans(sdp->sd_log_tr, tr); ++ gfs2_merge_trans(sdp, tr); + } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { + gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); + sdp->sd_log_tr = tr; +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c +index 18daf494abab..c26c864590cc 100644 +--- a/fs/gfs2/ops_fstype.c ++++ b/fs/gfs2/ops_fstype.c +@@ -911,7 +911,7 @@ fail: + } + + static const match_table_t nolock_tokens = { +- { Opt_jid, "jid=%d\n", }, ++ { Opt_jid, "jid=%d", }, + { Opt_err, NULL }, + }; + +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c +index c1ce2805c563..fa58835668a6 100644 +--- a/fs/jbd2/journal.c ++++ b/fs/jbd2/journal.c +@@ -96,7 +96,6 @@ EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); + EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); + EXPORT_SYMBOL(jbd2_inode_cache); + +-static void __journal_abort_soft (journal_t *journal, int errno); + static int jbd2_journal_create_slab(size_t slab_size); + + #ifdef CONFIG_JBD2_DEBUG +@@ -805,7 +804,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr, + "at offset %lu on %s\n", + __func__, blocknr, journal->j_devname); + err = -EIO; +- __journal_abort_soft(journal, err); ++ jbd2_journal_abort(journal, err); + } + } else { + *retp = blocknr; /* +journal->j_blk_offset */ +@@ -2070,64 +2069,6 @@ int jbd2_journal_wipe(journal_t *journal, int write) + return err; + } + +-/* +- * Journal abort has very specific semantics, which we describe +- * for journal abort. +- * +- * Two internal functions, which provide abort to the jbd layer +- * itself are here. +- */ +- +-/* +- * Quick version for internal journal use (doesn't lock the journal). +- * Aborts hard --- we mark the abort as occurred, but do _nothing_ else, +- * and don't attempt to make any other journal updates. +- */ +-void __jbd2_journal_abort_hard(journal_t *journal) +-{ +- transaction_t *transaction; +- +- if (journal->j_flags & JBD2_ABORT) +- return; +- +- printk(KERN_ERR "Aborting journal on device %s.\n", +- journal->j_devname); +- +- write_lock(&journal->j_state_lock); +- journal->j_flags |= JBD2_ABORT; +- transaction = journal->j_running_transaction; +- if (transaction) +- __jbd2_log_start_commit(journal, transaction->t_tid); +- write_unlock(&journal->j_state_lock); +-} +- +-/* Soft abort: record the abort error status in the journal superblock, +- * but don't do any other IO. */ +-static void __journal_abort_soft (journal_t *journal, int errno) +-{ +- int old_errno; +- +- write_lock(&journal->j_state_lock); +- old_errno = journal->j_errno; +- if (!journal->j_errno || errno == -ESHUTDOWN) +- journal->j_errno = errno; +- +- if (journal->j_flags & JBD2_ABORT) { +- write_unlock(&journal->j_state_lock); +- if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) +- jbd2_journal_update_sb_errno(journal); +- return; +- } +- write_unlock(&journal->j_state_lock); +- +- __jbd2_journal_abort_hard(journal); +- +- jbd2_journal_update_sb_errno(journal); +- write_lock(&journal->j_state_lock); +- journal->j_flags |= JBD2_REC_ERR; +- write_unlock(&journal->j_state_lock); +-} +- + /** + * void jbd2_journal_abort () - Shutdown the journal immediately. + * @journal: the journal to shutdown. +@@ -2171,7 +2112,47 @@ static void __journal_abort_soft (journal_t *journal, int errno) + + void jbd2_journal_abort(journal_t *journal, int errno) + { +- __journal_abort_soft(journal, errno); ++ transaction_t *transaction; ++ ++ /* ++ * ESHUTDOWN always takes precedence because a file system check ++ * caused by any other journal abort error is not required after ++ * a shutdown triggered. ++ */ ++ write_lock(&journal->j_state_lock); ++ if (journal->j_flags & JBD2_ABORT) { ++ int old_errno = journal->j_errno; ++ ++ write_unlock(&journal->j_state_lock); ++ if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) { ++ journal->j_errno = errno; ++ jbd2_journal_update_sb_errno(journal); ++ } ++ return; ++ } ++ ++ /* ++ * Mark the abort as occurred and start current running transaction ++ * to release all journaled buffer. ++ */ ++ pr_err("Aborting journal on device %s.\n", journal->j_devname); ++ ++ journal->j_flags |= JBD2_ABORT; ++ journal->j_errno = errno; ++ transaction = journal->j_running_transaction; ++ if (transaction) ++ __jbd2_log_start_commit(journal, transaction->t_tid); ++ write_unlock(&journal->j_state_lock); ++ ++ /* ++ * Record errno to the journal super block, so that fsck and jbd2 ++ * layer could realise that a filesystem check is needed. ++ */ ++ jbd2_journal_update_sb_errno(journal); ++ ++ write_lock(&journal->j_state_lock); ++ journal->j_flags |= JBD2_REC_ERR; ++ write_unlock(&journal->j_state_lock); + } + + /** +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 3802c88e8372..6de41f741280 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -826,6 +826,8 @@ int nfs_getattr(const struct path *path, struct kstat *stat, + do_update |= cache_validity & NFS_INO_INVALID_ATIME; + if (request_mask & (STATX_CTIME|STATX_MTIME)) + do_update |= cache_validity & NFS_INO_REVAL_PAGECACHE; ++ if (request_mask & STATX_BLOCKS) ++ do_update |= cache_validity & NFS_INO_INVALID_BLOCKS; + if (do_update) { + /* Update the attribute cache */ + if (!(server->flags & NFS_MOUNT_NOAC)) +@@ -1750,7 +1752,8 @@ out_noforce: + status = nfs_post_op_update_inode_locked(inode, fattr, + NFS_INO_INVALID_CHANGE + | NFS_INO_INVALID_CTIME +- | NFS_INO_INVALID_MTIME); ++ | NFS_INO_INVALID_MTIME ++ | NFS_INO_INVALID_BLOCKS); + return status; + } + +@@ -1857,7 +1860,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) + nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR + | NFS_INO_INVALID_ATIME + | NFS_INO_REVAL_FORCED +- | NFS_INO_REVAL_PAGECACHE); ++ | NFS_INO_REVAL_PAGECACHE ++ | NFS_INO_INVALID_BLOCKS); + + /* Do atomic weak cache consistency updates */ + nfs_wcc_update_inode(inode, fattr); +@@ -2019,8 +2023,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) + inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used); + } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) + inode->i_blocks = fattr->du.nfs2.blocks; +- else ++ else { ++ nfsi->cache_validity |= save_cache_validity & ++ (NFS_INO_INVALID_BLOCKS ++ | NFS_INO_REVAL_FORCED); + cache_revalidated = false; ++ } + + /* Update attrtimeo value if we're out of the unstable period */ + if (attr_changed) { +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index e257653f25ab..33c17c69aeaa 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -7870,7 +7870,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) + } + + static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { +- .rpc_call_done = &nfs4_bind_one_conn_to_session_done, ++ .rpc_call_done = nfs4_bind_one_conn_to_session_done, + }; + + /* +diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h +index 10ec5ecdf117..65c331f75e9c 100644 +--- a/fs/nfsd/cache.h ++++ b/fs/nfsd/cache.h +@@ -78,6 +78,8 @@ enum { + /* Checksum this amount of the request */ + #define RC_CSUMLEN (256U) + ++int nfsd_drc_slab_create(void); ++void nfsd_drc_slab_free(void); + int nfsd_reply_cache_init(struct nfsd_net *); + void nfsd_reply_cache_shutdown(struct nfsd_net *); + int nfsd_cache_lookup(struct svc_rqst *); +diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h +index 9a4ef815fb8c..ed53e206a299 100644 +--- a/fs/nfsd/netns.h ++++ b/fs/nfsd/netns.h +@@ -139,7 +139,6 @@ struct nfsd_net { + * Duplicate reply cache + */ + struct nfsd_drc_bucket *drc_hashtbl; +- struct kmem_cache *drc_slab; + + /* max number of entries allowed in the cache */ + unsigned int max_drc_entries; +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c +index afca3287184b..efe55d101b0e 100644 +--- a/fs/nfsd/nfs4callback.c ++++ b/fs/nfsd/nfs4callback.c +@@ -1230,6 +1230,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) + err = setup_callback_client(clp, &conn, ses); + if (err) { + nfsd4_mark_cb_down(clp, err); ++ if (c) ++ svc_xprt_put(c->cn_xprt); + return; + } + } +diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c +index 96352ab7bd81..4a258065188e 100644 +--- a/fs/nfsd/nfscache.c ++++ b/fs/nfsd/nfscache.c +@@ -36,6 +36,8 @@ struct nfsd_drc_bucket { + spinlock_t cache_lock; + }; + ++static struct kmem_cache *drc_slab; ++ + static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); + static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, + struct shrink_control *sc); +@@ -95,7 +97,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, + { + struct svc_cacherep *rp; + +- rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL); ++ rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); + if (rp) { + rp->c_state = RC_UNUSED; + rp->c_type = RC_NOCACHE; +@@ -129,7 +131,7 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, + atomic_dec(&nn->num_drc_entries); + nn->drc_mem_usage -= sizeof(*rp); + } +- kmem_cache_free(nn->drc_slab, rp); ++ kmem_cache_free(drc_slab, rp); + } + + static void +@@ -141,6 +143,18 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, + spin_unlock(&b->cache_lock); + } + ++int nfsd_drc_slab_create(void) ++{ ++ drc_slab = kmem_cache_create("nfsd_drc", ++ sizeof(struct svc_cacherep), 0, 0, NULL); ++ return drc_slab ? 0: -ENOMEM; ++} ++ ++void nfsd_drc_slab_free(void) ++{ ++ kmem_cache_destroy(drc_slab); ++} ++ + int nfsd_reply_cache_init(struct nfsd_net *nn) + { + unsigned int hashsize; +@@ -159,18 +173,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) + if (status) + goto out_nomem; + +- nn->drc_slab = kmem_cache_create("nfsd_drc", +- sizeof(struct svc_cacherep), 0, 0, NULL); +- if (!nn->drc_slab) +- goto out_shrinker; +- + nn->drc_hashtbl = kcalloc(hashsize, + sizeof(*nn->drc_hashtbl), GFP_KERNEL); + if (!nn->drc_hashtbl) { + nn->drc_hashtbl = vzalloc(array_size(hashsize, + sizeof(*nn->drc_hashtbl))); + if (!nn->drc_hashtbl) +- goto out_slab; ++ goto out_shrinker; + } + + for (i = 0; i < hashsize; i++) { +@@ -180,8 +189,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) + nn->drc_hashsize = hashsize; + + return 0; +-out_slab: +- kmem_cache_destroy(nn->drc_slab); + out_shrinker: + unregister_shrinker(&nn->nfsd_reply_cache_shrinker); + out_nomem: +@@ -209,8 +216,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn) + nn->drc_hashtbl = NULL; + nn->drc_hashsize = 0; + +- kmem_cache_destroy(nn->drc_slab); +- nn->drc_slab = NULL; + } + + /* +@@ -464,8 +469,7 @@ found_entry: + rtn = RC_REPLY; + break; + default: +- printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); +- nfsd_reply_cache_free_locked(b, rp, nn); ++ WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); + } + + goto out; +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c +index d77c5261c03c..159feae6af8b 100644 +--- a/fs/nfsd/nfsctl.c ++++ b/fs/nfsd/nfsctl.c +@@ -1534,6 +1534,9 @@ static int __init init_nfsd(void) + goto out_free_slabs; + nfsd_fault_inject_init(); /* nfsd fault injection controls */ + nfsd_stat_init(); /* Statistics */ ++ retval = nfsd_drc_slab_create(); ++ if (retval) ++ goto out_free_stat; + nfsd_lockd_init(); /* lockd->nfsd callbacks */ + retval = create_proc_exports_entry(); + if (retval) +@@ -1547,6 +1550,8 @@ out_free_all: + remove_proc_entry("fs/nfs", NULL); + out_free_lockd: + nfsd_lockd_shutdown(); ++ nfsd_drc_slab_free(); ++out_free_stat: + nfsd_stat_shutdown(); + nfsd_fault_inject_cleanup(); + nfsd4_exit_pnfs(); +@@ -1561,6 +1566,7 @@ out_unregister_pernet: + + static void __exit exit_nfsd(void) + { ++ nfsd_drc_slab_free(); + remove_proc_entry("fs/nfs/exports", NULL); + remove_proc_entry("fs/nfs", NULL); + nfsd_stat_shutdown(); +diff --git a/include/linux/bitops.h b/include/linux/bitops.h +index c94a9ff9f082..4f0e62cbf2ff 100644 +--- a/include/linux/bitops.h ++++ b/include/linux/bitops.h +@@ -57,7 +57,7 @@ static inline int get_bitmask_order(unsigned int count) + + static __always_inline unsigned long hweight_long(unsigned long w) + { +- return sizeof(w) == 4 ? hweight32(w) : hweight64(w); ++ return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); + } + + /** +diff --git a/include/linux/genhd.h b/include/linux/genhd.h +index 8b5330dd5ac0..62a2ec9f17df 100644 +--- a/include/linux/genhd.h ++++ b/include/linux/genhd.h +@@ -750,9 +750,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part) + static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) + { + #if BITS_PER_LONG==32 && defined(CONFIG_SMP) ++ preempt_disable(); + write_seqcount_begin(&part->nr_sects_seq); + part->nr_sects = size; + write_seqcount_end(&part->nr_sects_seq); ++ preempt_enable(); + #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) + preempt_disable(); + part->nr_sects = size; +diff --git a/include/linux/ioport.h b/include/linux/ioport.h +index 7bddddfc76d6..fdc201d61460 100644 +--- a/include/linux/ioport.h ++++ b/include/linux/ioport.h +@@ -300,5 +300,11 @@ struct resource *devm_request_free_mem_region(struct device *dev, + struct resource *request_free_mem_region(struct resource *base, + unsigned long size, const char *name); + ++#ifdef CONFIG_IO_STRICT_DEVMEM ++void revoke_devmem(struct resource *res); ++#else ++static inline void revoke_devmem(struct resource *res) { }; ++#endif ++ + #endif /* __ASSEMBLY__ */ + #endif /* _LINUX_IOPORT_H */ +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h +index 10e6049c0ba9..b0e97e5de8ca 100644 +--- a/include/linux/jbd2.h ++++ b/include/linux/jbd2.h +@@ -1402,7 +1402,6 @@ extern int jbd2_journal_skip_recovery (journal_t *); + extern void jbd2_journal_update_sb_errno(journal_t *); + extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t, + unsigned long, int); +-extern void __jbd2_journal_abort_hard (journal_t *); + extern void jbd2_journal_abort (journal_t *, int); + extern int jbd2_journal_errno (journal_t *); + extern void jbd2_journal_ack_err (journal_t *); +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h +index 04bdaf01112c..645fd401c856 100644 +--- a/include/linux/kprobes.h ++++ b/include/linux/kprobes.h +@@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) + return this_cpu_ptr(&kprobe_ctlblk); + } + ++extern struct kprobe kprobe_busy; ++void kprobe_busy_begin(void); ++void kprobe_busy_end(void); ++ + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); + int register_kprobe(struct kprobe *p); + void unregister_kprobe(struct kprobe *p); +diff --git a/include/linux/libata.h b/include/linux/libata.h +index c44e4cfbcb16..b9970f5bab67 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + /* + * Define if arch has non-standard setup. This is a _PCI_ standard +@@ -870,6 +871,8 @@ struct ata_port { + struct timer_list fastdrain_timer; + unsigned long fastdrain_cnt; + ++ async_cookie_t cookie; ++ + int em_message_type; + void *private_data; + +diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h +index 3c67983678ec..744dce63946e 100644 +--- a/include/linux/mfd/stmfx.h ++++ b/include/linux/mfd/stmfx.h +@@ -109,6 +109,7 @@ struct stmfx { + struct device *dev; + struct regmap *map; + struct regulator *vdd; ++ int irq; + struct irq_domain *irq_domain; + struct mutex lock; /* IRQ bus lock */ + u8 irq_src; +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h +index 570a60c2f4f4..ad09c0cc5464 100644 +--- a/include/linux/nfs_fs.h ++++ b/include/linux/nfs_fs.h +@@ -225,6 +225,7 @@ struct nfs4_copy_state { + #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ + #define NFS_INO_DATA_INVAL_DEFER \ + BIT(13) /* Deferred cache invalidation */ ++#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ + + #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ + | NFS_INO_INVALID_CTIME \ +diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h +index 8675e145ea8b..2040696d75b6 100644 +--- a/include/linux/usb/composite.h ++++ b/include/linux/usb/composite.h +@@ -249,6 +249,9 @@ int usb_function_activate(struct usb_function *); + + int usb_interface_id(struct usb_configuration *, struct usb_function *); + ++int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, ++ struct usb_ep *_ep, u8 alt); ++ + int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, + struct usb_ep *_ep); + +diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h +index a15ce99dfc2d..78e006355557 100644 +--- a/include/linux/usb/ehci_def.h ++++ b/include/linux/usb/ehci_def.h +@@ -151,7 +151,7 @@ struct ehci_regs { + #define PORT_OWNER (1<<13) /* true: companion hc owns this port */ + #define PORT_POWER (1<<12) /* true: has power (see PPC) */ + #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ +-/* 11:10 for detecting lowspeed devices (reset vs release ownership) */ ++#define PORT_LS_MASK (3<<10) /* Link status (SE0, K or J */ + /* 9 reserved */ + #define PORT_LPM (1<<9) /* LPM transaction */ + #define PORT_RESET (1<<8) /* reset port */ +diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h +index 124462d65eac..67f5adc9b875 100644 +--- a/include/linux/usb/gadget.h ++++ b/include/linux/usb/gadget.h +@@ -373,6 +373,7 @@ struct usb_gadget_ops { + * @connected: True if gadget is connected. + * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag + * indicates that it supports LPM as per the LPM ECN & errata. ++ * @irq: the interrupt number for device controller. + * + * Gadgets have a mostly-portable "gadget driver" implementing device + * functions, handling all usb configurations and interfaces. Gadget +@@ -427,6 +428,7 @@ struct usb_gadget { + unsigned deactivated:1; + unsigned connected:1; + unsigned lpm_capable:1; ++ int irq; + }; + #define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) + +diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h +index 903cc2d2750b..84ae605c0643 100644 +--- a/include/uapi/linux/magic.h ++++ b/include/uapi/linux/magic.h +@@ -93,6 +93,7 @@ + #define BALLOON_KVM_MAGIC 0x13661366 + #define ZSMALLOC_MAGIC 0x58295829 + #define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */ ++#define DEVMEM_MAGIC 0x454d444d /* "DMEM" */ + #define Z3FOLD_MAGIC 0x33 + + #endif /* __LINUX_MAGIC_H__ */ +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 2625c241ac00..195ecb955fcc 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -586,11 +586,12 @@ static void kprobe_optimizer(struct work_struct *work) + mutex_unlock(&module_mutex); + mutex_unlock(&text_mutex); + cpus_read_unlock(); +- mutex_unlock(&kprobe_mutex); + + /* Step 5: Kick optimizer again if needed */ + if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) + kick_kprobe_optimizer(); ++ ++ mutex_unlock(&kprobe_mutex); + } + + /* Wait for completing optimization and unoptimization */ +@@ -1236,6 +1237,26 @@ __releases(hlist_lock) + } + NOKPROBE_SYMBOL(kretprobe_table_unlock); + ++struct kprobe kprobe_busy = { ++ .addr = (void *) get_kprobe, ++}; ++ ++void kprobe_busy_begin(void) ++{ ++ struct kprobe_ctlblk *kcb; ++ ++ preempt_disable(); ++ __this_cpu_write(current_kprobe, &kprobe_busy); ++ kcb = get_kprobe_ctlblk(); ++ kcb->kprobe_status = KPROBE_HIT_ACTIVE; ++} ++ ++void kprobe_busy_end(void) ++{ ++ __this_cpu_write(current_kprobe, NULL); ++ preempt_enable(); ++} ++ + /* + * This function is called from finish_task_switch when task tk becomes dead, + * so that we can recycle any function-return probe instances associated +@@ -1253,6 +1274,8 @@ void kprobe_flush_task(struct task_struct *tk) + /* Early boot. kretprobe_table_locks not yet initialized. */ + return; + ++ kprobe_busy_begin(); ++ + INIT_HLIST_HEAD(&empty_rp); + hash = hash_ptr(tk, KPROBE_HASH_BITS); + head = &kretprobe_inst_table[hash]; +@@ -1266,6 +1289,8 @@ void kprobe_flush_task(struct task_struct *tk) + hlist_del(&ri->hlist); + kfree(ri); + } ++ ++ kprobe_busy_end(); + } + NOKPROBE_SYMBOL(kprobe_flush_task); + +diff --git a/kernel/resource.c b/kernel/resource.c +index 76036a41143b..841737bbda9e 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -1126,6 +1126,7 @@ struct resource * __request_region(struct resource *parent, + { + DECLARE_WAITQUEUE(wait, current); + struct resource *res = alloc_resource(GFP_KERNEL); ++ struct resource *orig_parent = parent; + + if (!res) + return NULL; +@@ -1176,6 +1177,10 @@ struct resource * __request_region(struct resource *parent, + break; + } + write_unlock(&resource_lock); ++ ++ if (res && orig_parent == &iomem_resource) ++ revoke_devmem(res); ++ + return res; + } + EXPORT_SYMBOL(__request_region); +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c +index e7e483cdbea6..a677aa84ccb6 100644 +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -999,8 +999,10 @@ static void blk_add_trace_split(void *ignore, + + __blk_add_trace(bt, bio->bi_iter.bi_sector, + bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, +- BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), +- &rpdu, blk_trace_bio_get_cgid(q, bio)); ++ BLK_TA_SPLIT, ++ blk_status_to_errno(bio->bi_status), ++ sizeof(rpdu), &rpdu, ++ blk_trace_bio_get_cgid(q, bio)); + } + rcu_read_unlock(); + } +@@ -1037,7 +1039,8 @@ static void blk_add_trace_bio_remap(void *ignore, + r.sector_from = cpu_to_be64(from); + + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, +- bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, ++ bio_op(bio), bio->bi_opf, BLK_TA_REMAP, ++ blk_status_to_errno(bio->bi_status), + sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); + rcu_read_unlock(); + } +@@ -1259,21 +1262,10 @@ static inline __u16 t_error(const struct trace_entry *ent) + + static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) + { +- const __u64 *val = pdu_start(ent, has_cg); ++ const __be64 *val = pdu_start(ent, has_cg); + return be64_to_cpu(*val); + } + +-static void get_pdu_remap(const struct trace_entry *ent, +- struct blk_io_trace_remap *r, bool has_cg) +-{ +- const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); +- __u64 sector_from = __r->sector_from; +- +- r->device_from = be32_to_cpu(__r->device_from); +- r->device_to = be32_to_cpu(__r->device_to); +- r->sector_from = be64_to_cpu(sector_from); +-} +- + typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, + bool has_cg); + +@@ -1399,13 +1391,13 @@ static void blk_log_with_error(struct trace_seq *s, + + static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) + { +- struct blk_io_trace_remap r = { .device_from = 0, }; ++ const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); + +- get_pdu_remap(ent, &r, has_cg); + trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", + t_sector(ent), t_sec(ent), +- MAJOR(r.device_from), MINOR(r.device_from), +- (unsigned long long)r.sector_from); ++ MAJOR(be32_to_cpu(__r->device_from)), ++ MINOR(be32_to_cpu(__r->device_from)), ++ be64_to_cpu(__r->sector_from)); + } + + static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index fba4b48451f6..26de9c654956 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -1464,7 +1464,7 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, + if (perf_type_tracepoint) + tk = find_trace_kprobe(pevent, group); + else +- tk = event->tp_event->data; ++ tk = trace_kprobe_primary_from_call(event->tp_event); + if (!tk) + return -EINVAL; + +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c +index ab8b6436d53f..f98d6d94cbbf 100644 +--- a/kernel/trace/trace_probe.c ++++ b/kernel/trace/trace_probe.c +@@ -639,8 +639,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, + ret = -EINVAL; + goto fail; + } +- if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) || +- parg->count) { ++ if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM || ++ code->op == FETCH_OP_DATA) || parg->count) { + /* + * IMM, DATA and COMM is pointing actual address, those + * must be kept, and if parg->count != 0, this is an +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c +index 2619bc5ed520..5294843de6ef 100644 +--- a/kernel/trace/trace_uprobe.c ++++ b/kernel/trace/trace_uprobe.c +@@ -1405,7 +1405,7 @@ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, + if (perf_type_tracepoint) + tu = find_probe_event(pevent, group); + else +- tu = event->tp_event->data; ++ tu = trace_uprobe_primary_from_call(event->tp_event); + if (!tu) + return -EINVAL; + +diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c +index 2c13ecc5bb2c..ed1f3df27260 100644 +--- a/lib/zlib_inflate/inffast.c ++++ b/lib/zlib_inflate/inffast.c +@@ -10,17 +10,6 @@ + + #ifndef ASMINF + +-/* Allow machine dependent optimization for post-increment or pre-increment. +- Based on testing to date, +- Pre-increment preferred for: +- - PowerPC G3 (Adler) +- - MIPS R5000 (Randers-Pehrson) +- Post-increment preferred for: +- - none +- No measurable difference: +- - Pentium III (Anderson) +- - M68060 (Nikl) +- */ + union uu { + unsigned short us; + unsigned char b[2]; +@@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p) + return mm.us; + } + +-#ifdef POSTINC +-# define OFF 0 +-# define PUP(a) *(a)++ +-# define UP_UNALIGNED(a) get_unaligned16((a)++) +-#else +-# define OFF 1 +-# define PUP(a) *++(a) +-# define UP_UNALIGNED(a) get_unaligned16(++(a)) +-#endif +- + /* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is +@@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start) + + /* copy state to local variables */ + state = (struct inflate_state *)strm->state; +- in = strm->next_in - OFF; ++ in = strm->next_in; + last = in + (strm->avail_in - 5); +- out = strm->next_out - OFF; ++ out = strm->next_out; + beg = out - (start - strm->avail_out); + end = out + (strm->avail_out - 257); + #ifdef INFLATE_STRICT +@@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start) + input data or output space */ + do { + if (bits < 15) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + this = lcode[hold & lmask]; +@@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start) + bits -= op; + op = (unsigned)(this.op); + if (op == 0) { /* literal */ +- PUP(out) = (unsigned char)(this.val); ++ *out++ = (unsigned char)(this.val); + } + else if (op & 16) { /* length base */ + len = (unsigned)(this.val); + op &= 15; /* number of extra bits */ + if (op) { + if (bits < op) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + len += (unsigned)hold & ((1U << op) - 1); +@@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start) + bits -= op; + } + if (bits < 15) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + this = dcode[hold & dmask]; +@@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start) + dist = (unsigned)(this.val); + op &= 15; /* number of extra bits */ + if (bits < op) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + if (bits < op) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + } +@@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start) + state->mode = BAD; + break; + } +- from = window - OFF; ++ from = window; + if (write == 0) { /* very common case */ + from += wsize - op; + if (op < len) { /* some from window */ + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } +@@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start) + if (op < len) { /* some from end of window */ + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); +- from = window - OFF; ++ from = window; + if (write < len) { /* some from start of window */ + op = write; + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } +@@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start) + if (op < len) { /* some from window */ + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } + } + while (len > 2) { +- PUP(out) = PUP(from); +- PUP(out) = PUP(from); +- PUP(out) = PUP(from); ++ *out++ = *from++; ++ *out++ = *from++; ++ *out++ = *from++; + len -= 3; + } + if (len) { +- PUP(out) = PUP(from); ++ *out++ = *from++; + if (len > 1) +- PUP(out) = PUP(from); ++ *out++ = *from++; + } + } + else { +@@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start) + from = out - dist; /* copy direct from output */ + /* minimum length is three */ + /* Align out addr */ +- if (!((long)(out - 1 + OFF) & 1)) { +- PUP(out) = PUP(from); ++ if (!((long)(out - 1) & 1)) { ++ *out++ = *from++; + len--; + } +- sout = (unsigned short *)(out - OFF); ++ sout = (unsigned short *)(out); + if (dist > 2) { + unsigned short *sfrom; + +- sfrom = (unsigned short *)(from - OFF); ++ sfrom = (unsigned short *)(from); + loops = len >> 1; + do + #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +- PUP(sout) = PUP(sfrom); ++ *sout++ = *sfrom++; + #else +- PUP(sout) = UP_UNALIGNED(sfrom); ++ *sout++ = get_unaligned16(sfrom++); + #endif + while (--loops); +- out = (unsigned char *)sout + OFF; +- from = (unsigned char *)sfrom + OFF; ++ out = (unsigned char *)sout; ++ from = (unsigned char *)sfrom; + } else { /* dist == 1 or dist == 2 */ + unsigned short pat16; + +- pat16 = *(sout-1+OFF); ++ pat16 = *(sout-1); + if (dist == 1) { + union uu mm; + /* copy one char pattern to both bytes */ +@@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start) + } + loops = len >> 1; + do +- PUP(sout) = pat16; ++ *sout++ = pat16; + while (--loops); +- out = (unsigned char *)sout + OFF; ++ out = (unsigned char *)sout; + } + if (len & 1) +- PUP(out) = PUP(from); ++ *out++ = *from++; + } + } + else if ((op & 64) == 0) { /* 2nd level distance code */ +@@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start) + hold &= (1U << bits) - 1; + + /* update state and return */ +- strm->next_in = in + OFF; +- strm->next_out = out + OFF; ++ strm->next_in = in; ++ strm->next_out = out; + strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); + strm->avail_out = (unsigned)(out < end ? + 257 + (end - out) : 257 - (out - end)); +diff --git a/net/core/dev.c b/net/core/dev.c +index 8552874e5aac..204d87e7c9b1 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -79,6 +79,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -194,7 +195,7 @@ static DEFINE_SPINLOCK(napi_hash_lock); + static unsigned int napi_gen_id = NR_CPUS; + static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); + +-static seqcount_t devnet_rename_seq; ++static DECLARE_RWSEM(devnet_rename_sem); + + static inline void dev_base_seq_inc(struct net *net) + { +@@ -816,33 +817,28 @@ EXPORT_SYMBOL(dev_get_by_napi_id); + * @net: network namespace + * @name: a pointer to the buffer where the name will be stored. + * @ifindex: the ifindex of the interface to get the name from. +- * +- * The use of raw_seqcount_begin() and cond_resched() before +- * retrying is required as we want to give the writers a chance +- * to complete when CONFIG_PREEMPT is not set. + */ + int netdev_get_name(struct net *net, char *name, int ifindex) + { + struct net_device *dev; +- unsigned int seq; ++ int ret; + +-retry: +- seq = raw_seqcount_begin(&devnet_rename_seq); ++ down_read(&devnet_rename_sem); + rcu_read_lock(); ++ + dev = dev_get_by_index_rcu(net, ifindex); + if (!dev) { +- rcu_read_unlock(); +- return -ENODEV; ++ ret = -ENODEV; ++ goto out; + } + + strcpy(name, dev->name); +- rcu_read_unlock(); +- if (read_seqcount_retry(&devnet_rename_seq, seq)) { +- cond_resched(); +- goto retry; +- } + +- return 0; ++ ret = 0; ++out: ++ rcu_read_unlock(); ++ up_read(&devnet_rename_sem); ++ return ret; + } + + /** +@@ -1115,10 +1111,10 @@ int dev_change_name(struct net_device *dev, const char *newname) + likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) + return -EBUSY; + +- write_seqcount_begin(&devnet_rename_seq); ++ down_write(&devnet_rename_sem); + + if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + return 0; + } + +@@ -1126,7 +1122,7 @@ int dev_change_name(struct net_device *dev, const char *newname) + + err = dev_get_valid_name(net, dev, newname); + if (err < 0) { +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + return err; + } + +@@ -1141,11 +1137,11 @@ rollback: + if (ret) { + memcpy(dev->name, oldname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + return ret; + } + +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + + netdev_adjacent_rename_links(dev, oldname); + +@@ -1166,7 +1162,7 @@ rollback: + /* err >= 0 after dev_alloc_name() or stores the first errno */ + if (err >= 0) { + err = ret; +- write_seqcount_begin(&devnet_rename_seq); ++ down_write(&devnet_rename_sem); + memcpy(dev->name, oldname, IFNAMSIZ); + memcpy(oldname, newname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +diff --git a/net/core/filter.c b/net/core/filter.c +index f1f2304822e3..a0a492f7cf9c 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -1766,25 +1766,27 @@ BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, + u32, offset, void *, to, u32, len, u32, start_header) + { + u8 *end = skb_tail_pointer(skb); +- u8 *net = skb_network_header(skb); +- u8 *mac = skb_mac_header(skb); +- u8 *ptr; ++ u8 *start, *ptr; + +- if (unlikely(offset > 0xffff || len > (end - mac))) ++ if (unlikely(offset > 0xffff)) + goto err_clear; + + switch (start_header) { + case BPF_HDR_START_MAC: +- ptr = mac + offset; ++ if (unlikely(!skb_mac_header_was_set(skb))) ++ goto err_clear; ++ start = skb_mac_header(skb); + break; + case BPF_HDR_START_NET: +- ptr = net + offset; ++ start = skb_network_header(skb); + break; + default: + goto err_clear; + } + +- if (likely(ptr >= mac && ptr + len <= end)) { ++ ptr = start + offset; ++ ++ if (likely(ptr + len <= end)) { + memcpy(to, ptr, len); + return 0; + } +diff --git a/net/core/sock_map.c b/net/core/sock_map.c +index 8291568b707f..6bbc118bf00e 100644 +--- a/net/core/sock_map.c ++++ b/net/core/sock_map.c +@@ -837,11 +837,15 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) + err = -EINVAL; + goto free_htab; + } ++ err = bpf_map_charge_init(&htab->map.memory, cost); ++ if (err) ++ goto free_htab; + + htab->buckets = bpf_map_area_alloc(htab->buckets_num * + sizeof(struct bpf_htab_bucket), + htab->map.numa_node); + if (!htab->buckets) { ++ bpf_map_charge_finish(&htab->map.memory); + err = -ENOMEM; + goto free_htab; + } +@@ -861,6 +865,7 @@ static void sock_hash_free(struct bpf_map *map) + { + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct bpf_htab_bucket *bucket; ++ struct hlist_head unlink_list; + struct bpf_htab_elem *elem; + struct hlist_node *node; + int i; +@@ -872,13 +877,32 @@ static void sock_hash_free(struct bpf_map *map) + synchronize_rcu(); + for (i = 0; i < htab->buckets_num; i++) { + bucket = sock_hash_select_bucket(htab, i); +- hlist_for_each_entry_safe(elem, node, &bucket->head, node) { +- hlist_del_rcu(&elem->node); ++ ++ /* We are racing with sock_hash_delete_from_link to ++ * enter the spin-lock critical section. Every socket on ++ * the list is still linked to sockhash. Since link ++ * exists, psock exists and holds a ref to socket. That ++ * lets us to grab a socket ref too. ++ */ ++ raw_spin_lock_bh(&bucket->lock); ++ hlist_for_each_entry(elem, &bucket->head, node) ++ sock_hold(elem->sk); ++ hlist_move_list(&bucket->head, &unlink_list); ++ raw_spin_unlock_bh(&bucket->lock); ++ ++ /* Process removed entries out of atomic context to ++ * block for socket lock before deleting the psock's ++ * link to sockhash. ++ */ ++ hlist_for_each_entry_safe(elem, node, &unlink_list, node) { ++ hlist_del(&elem->node); + lock_sock(elem->sk); + rcu_read_lock(); + sock_map_unref(elem->sk, elem); + rcu_read_unlock(); + release_sock(elem->sk); ++ sock_put(elem->sk); ++ sock_hash_free_elem(htab, elem); + } + } + +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c +index 69b025408390..ad9f38202731 100644 +--- a/net/ipv4/tcp_bpf.c ++++ b/net/ipv4/tcp_bpf.c +@@ -96,6 +96,9 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, + } while (i != msg_rx->sg.end); + + if (unlikely(peek)) { ++ if (msg_rx == list_last_entry(&psock->ingress_msg, ++ struct sk_msg, list)) ++ break; + msg_rx = list_next_entry(msg_rx, list); + continue; + } +diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c +index 8b179e3c802a..543afd9bd664 100644 +--- a/net/rxrpc/proc.c ++++ b/net/rxrpc/proc.c +@@ -68,7 +68,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) + "Proto Local " + " Remote " + " SvID ConnID CallID End Use State Abort " +- " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n"); ++ " DebugId TxSeq TW RxSeq RW RxSerial RxTimo\n"); + return 0; + } + +@@ -100,7 +100,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) + rx_hard_ack = READ_ONCE(call->rx_hard_ack); + seq_printf(seq, + "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" +- " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n", ++ " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n", + lbuff, + rbuff, + call->service_id, +@@ -110,7 +110,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) + atomic_read(&call->usage), + rxrpc_call_states[call->state], + call->abort_code, +- call->user_call_ID, ++ call->debug_id, + tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack, + rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack, + call->rx_serial, +diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c +index d024af4be85e..105d17af4abc 100644 +--- a/net/sunrpc/addr.c ++++ b/net/sunrpc/addr.c +@@ -82,11 +82,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap, + + rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", + IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id); +- if (unlikely((size_t)rc > sizeof(scopebuf))) ++ if (unlikely((size_t)rc >= sizeof(scopebuf))) + return 0; + + len += rc; +- if (unlikely(len > buflen)) ++ if (unlikely(len >= buflen)) + return 0; + + strcat(buf, scopebuf); +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c +index 7181a30666b4..f9eb5efb237c 100644 +--- a/net/xdp/xsk.c ++++ b/net/xdp/xsk.c +@@ -362,10 +362,8 @@ static int xsk_generic_xmit(struct sock *sk) + + len = desc.len; + skb = sock_alloc_send_skb(sk, len, 1, &err); +- if (unlikely(!skb)) { +- err = -EAGAIN; ++ if (unlikely(!skb)) + goto out; +- } + + skb_put(skb, len); + addr = desc.addr; +diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh +index a07668a5c36b..94a833597a88 100755 +--- a/scripts/headers_install.sh ++++ b/scripts/headers_install.sh +@@ -64,7 +64,7 @@ configs=$(sed -e ' + d + ' $OUTFILE) + +-# The entries in the following list are not warned. ++# The entries in the following list do not result in an error. + # Please do not add a new entry. This list is only for existing ones. + # The list will be reduced gradually, and deleted eventually. (hopefully) + # +@@ -98,18 +98,19 @@ include/uapi/linux/raw.h:CONFIG_MAX_RAW_DEVS + + for c in $configs + do +- warn=1 ++ leak_error=1 + + for ignore in $config_leak_ignores + do + if echo "$INFILE:$c" | grep -q "$ignore$"; then +- warn= ++ leak_error= + break + fi + done + +- if [ "$warn" = 1 ]; then +- echo "warning: $INFILE: leak $c to user-space" >&2 ++ if [ "$leak_error" = 1 ]; then ++ echo "error: $INFILE: leak $c to user-space" >&2 ++ exit 1 + fi + done + +diff --git a/scripts/mksysmap b/scripts/mksysmap +index a35acc0d0b82..9aa23d15862a 100755 +--- a/scripts/mksysmap ++++ b/scripts/mksysmap +@@ -41,4 +41,4 @@ + # so we just ignore them to let readprofile continue to work. + # (At least sparc64 has __crc_ in the middle). + +-$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)' > $2 ++$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2 +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c +index 5dedc0173b02..1a33f490e667 100644 +--- a/security/apparmor/domain.c ++++ b/security/apparmor/domain.c +@@ -935,7 +935,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + * aways results in a further reduction of permissions. + */ + if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) && +- !unconfined(label) && !aa_label_is_subset(new, ctx->nnp)) { ++ !unconfined(label) && ++ !aa_label_is_unconfined_subset(new, ctx->nnp)) { + error = -EPERM; + info = "no new privs"; + goto audit; +@@ -1213,7 +1214,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags) + * reduce restrictions. + */ + if (task_no_new_privs(current) && !unconfined(label) && +- !aa_label_is_subset(new, ctx->nnp)) { ++ !aa_label_is_unconfined_subset(new, ctx->nnp)) { + /* not an apparmor denial per se, so don't log it */ + AA_DEBUG("no_new_privs - change_hat denied"); + error = -EPERM; +@@ -1234,7 +1235,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags) + * reduce restrictions. + */ + if (task_no_new_privs(current) && !unconfined(label) && +- !aa_label_is_subset(previous, ctx->nnp)) { ++ !aa_label_is_unconfined_subset(previous, ctx->nnp)) { + /* not an apparmor denial per se, so don't log it */ + AA_DEBUG("no_new_privs - change_hat denied"); + error = -EPERM; +@@ -1429,7 +1430,7 @@ check: + * reduce restrictions. + */ + if (task_no_new_privs(current) && !unconfined(label) && +- !aa_label_is_subset(new, ctx->nnp)) { ++ !aa_label_is_unconfined_subset(new, ctx->nnp)) { + /* not an apparmor denial per se, so don't log it */ + AA_DEBUG("no_new_privs - change_hat denied"); + error = -EPERM; +diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h +index 47942c4ba7ca..255764ab06e2 100644 +--- a/security/apparmor/include/label.h ++++ b/security/apparmor/include/label.h +@@ -281,6 +281,7 @@ bool aa_label_init(struct aa_label *label, int size, gfp_t gfp); + struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp); + + bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub); ++bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub); + struct aa_profile *__aa_label_next_not_in_set(struct label_it *I, + struct aa_label *set, + struct aa_label *sub); +diff --git a/security/apparmor/label.c b/security/apparmor/label.c +index 470693239e64..5f324d63ceaa 100644 +--- a/security/apparmor/label.c ++++ b/security/apparmor/label.c +@@ -550,6 +550,39 @@ bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub) + return __aa_label_next_not_in_set(&i, set, sub) == NULL; + } + ++/** ++ * aa_label_is_unconfined_subset - test if @sub is a subset of @set ++ * @set: label to test against ++ * @sub: label to test if is subset of @set ++ * ++ * This checks for subset but taking into account unconfined. IF ++ * @sub contains an unconfined profile that does not have a matching ++ * unconfined in @set then this will not cause the test to fail. ++ * Conversely we don't care about an unconfined in @set that is not in ++ * @sub ++ * ++ * Returns: true if @sub is special_subset of @set ++ * else false ++ */ ++bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub) ++{ ++ struct label_it i = { }; ++ struct aa_profile *p; ++ ++ AA_BUG(!set); ++ AA_BUG(!sub); ++ ++ if (sub == set) ++ return true; ++ ++ do { ++ p = __aa_label_next_not_in_set(&i, set, sub); ++ if (p && !profile_unconfined(p)) ++ break; ++ } while (p); ++ ++ return p == NULL; ++} + + + /** +@@ -1531,13 +1564,13 @@ static const char *label_modename(struct aa_ns *ns, struct aa_label *label, + + label_for_each(i, label, profile) { + if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) { +- if (profile->mode == APPARMOR_UNCONFINED) ++ count++; ++ if (profile == profile->ns->unconfined) + /* special case unconfined so stacks with + * unconfined don't report as mixed. ie. + * profile_foo//&:ns1:unconfined (mixed) + */ + continue; +- count++; + if (mode == -1) + mode = profile->mode; + else if (mode != profile->mode) +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c +index ec3a928af829..e31965dc6dd1 100644 +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -791,7 +791,12 @@ static void apparmor_sk_clone_security(const struct sock *sk, + struct aa_sk_ctx *ctx = SK_CTX(sk); + struct aa_sk_ctx *new = SK_CTX(newsk); + ++ if (new->label) ++ aa_put_label(new->label); + new->label = aa_get_label(ctx->label); ++ ++ if (new->peer) ++ aa_put_label(new->peer); + new->peer = aa_get_label(ctx->peer); + } + +diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c +index a5813c7629c1..f62adf3cfce8 100644 +--- a/security/selinux/ss/services.c ++++ b/security/selinux/ss/services.c +@@ -2844,8 +2844,12 @@ err: + if (*names) { + for (i = 0; i < *len; i++) + kfree((*names)[i]); ++ kfree(*names); + } + kfree(*values); ++ *len = 0; ++ *names = NULL; ++ *values = NULL; + goto out; + } + +diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c +index 67d735e9a6a4..fea92e148790 100644 +--- a/sound/firewire/amdtp-am824.c ++++ b/sound/firewire/amdtp-am824.c +@@ -82,7 +82,8 @@ int amdtp_am824_set_parameters(struct amdtp_stream *s, unsigned int rate, + if (err < 0) + return err; + +- s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc; ++ if (s->direction == AMDTP_OUT_STREAM) ++ s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc; + + p->pcm_channels = pcm_channels; + p->midi_ports = midi_ports; +diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c +index c5b1d5900eed..d6420d224d09 100644 +--- a/sound/isa/wavefront/wavefront_synth.c ++++ b/sound/isa/wavefront/wavefront_synth.c +@@ -1171,7 +1171,10 @@ wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header) + "alias for %d\n", + header->number, + header->hdr.a.OriginalSample); +- ++ ++ if (header->number >= WF_MAX_SAMPLE) ++ return -EINVAL; ++ + munge_int32 (header->number, &alias_hdr[0], 2); + munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2); + munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset), +@@ -1202,6 +1205,9 @@ wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) + int num_samples; + unsigned char *msample_hdr; + ++ if (header->number >= WF_MAX_SAMPLE) ++ return -EINVAL; ++ + msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL); + if (! msample_hdr) + return -ENOMEM; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index df5afac0b600..459a7d61326e 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -81,6 +81,7 @@ struct alc_spec { + + /* mute LED for HP laptops, see alc269_fixup_mic_mute_hook() */ + int mute_led_polarity; ++ int micmute_led_polarity; + hda_nid_t mute_led_nid; + hda_nid_t cap_mute_led_nid; + +@@ -4080,11 +4081,9 @@ static void alc269_fixup_hp_mute_led_mic3(struct hda_codec *codec, + + /* update LED status via GPIO */ + static void alc_update_gpio_led(struct hda_codec *codec, unsigned int mask, +- bool enabled) ++ int polarity, bool enabled) + { +- struct alc_spec *spec = codec->spec; +- +- if (spec->mute_led_polarity) ++ if (polarity) + enabled = !enabled; + alc_update_gpio_data(codec, mask, !enabled); /* muted -> LED on */ + } +@@ -4095,7 +4094,8 @@ static void alc_fixup_gpio_mute_hook(void *private_data, int enabled) + struct hda_codec *codec = private_data; + struct alc_spec *spec = codec->spec; + +- alc_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled); ++ alc_update_gpio_led(codec, spec->gpio_mute_led_mask, ++ spec->mute_led_polarity, enabled); + } + + /* turn on/off mic-mute LED via GPIO per capture hook */ +@@ -4104,6 +4104,7 @@ static void alc_gpio_micmute_update(struct hda_codec *codec) + struct alc_spec *spec = codec->spec; + + alc_update_gpio_led(codec, spec->gpio_mic_led_mask, ++ spec->micmute_led_polarity, + spec->gen.micmute_led.led_value); + } + +@@ -5808,7 +5809,8 @@ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, + + snd_hda_gen_hp_automute(codec, jack); + /* mute_led_polarity is set to 0, so we pass inverted value here */ +- alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present); ++ alc_update_gpio_led(codec, 0x10, spec->mute_led_polarity, ++ !spec->gen.hp_jack_present); + } + + /* Manage GPIOs for HP EliteBook Folio 9480m. +diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c +index cae1def8902d..96718e3a1ad0 100644 +--- a/sound/soc/codecs/max98373.c ++++ b/sound/soc/codecs/max98373.c +@@ -850,8 +850,8 @@ static int max98373_resume(struct device *dev) + { + struct max98373_priv *max98373 = dev_get_drvdata(dev); + +- max98373_reset(max98373, dev); + regcache_cache_only(max98373->regmap, false); ++ max98373_reset(max98373, dev); + regcache_sync(max98373->regmap); + return 0; + } +diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c +index 19662ee330d6..c83f7f5da96b 100644 +--- a/sound/soc/codecs/rt5645.c ++++ b/sound/soc/codecs/rt5645.c +@@ -3625,6 +3625,12 @@ static const struct rt5645_platform_data asus_t100ha_platform_data = { + .inv_jd1_1 = true, + }; + ++static const struct rt5645_platform_data asus_t101ha_platform_data = { ++ .dmic1_data_pin = RT5645_DMIC_DATA_IN2N, ++ .dmic2_data_pin = RT5645_DMIC2_DISABLE, ++ .jd_mode = 3, ++}; ++ + static const struct rt5645_platform_data lenovo_ideapad_miix_310_pdata = { + .jd_mode = 3, + .in2_diff = true, +@@ -3702,6 +3708,14 @@ static const struct dmi_system_id dmi_platform_data[] = { + }, + .driver_data = (void *)&asus_t100ha_platform_data, + }, ++ { ++ .ident = "ASUS T101HA", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "T101HA"), ++ }, ++ .driver_data = (void *)&asus_t101ha_platform_data, ++ }, + { + .ident = "MINIX Z83-4", + .matches = { +diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c +index 01052a0808b0..5aee6b8366d2 100644 +--- a/sound/soc/fsl/fsl_asrc_dma.c ++++ b/sound/soc/fsl/fsl_asrc_dma.c +@@ -241,6 +241,7 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream, + ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be); + if (ret) { + dev_err(dev, "failed to config DMA channel for Back-End\n"); ++ dma_release_channel(pair->dma_chan[dir]); + return ret; + } + +diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c +index c7a49d03463a..84290be778f0 100644 +--- a/sound/soc/fsl/fsl_esai.c ++++ b/sound/soc/fsl/fsl_esai.c +@@ -87,6 +87,10 @@ static irqreturn_t esai_isr(int irq, void *devid) + if ((saisr & (ESAI_SAISR_TUE | ESAI_SAISR_ROE)) && + esai_priv->reset_at_xrun) { + dev_dbg(&pdev->dev, "reset module for xrun\n"); ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, ++ ESAI_xCR_xEIE_MASK, 0); ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, ++ ESAI_xCR_xEIE_MASK, 0); + tasklet_schedule(&esai_priv->task); + } + +diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c +index fdd2c73fd2fa..869fe0068cbd 100644 +--- a/sound/soc/img/img-i2s-in.c ++++ b/sound/soc/img/img-i2s-in.c +@@ -482,6 +482,7 @@ static int img_i2s_in_probe(struct platform_device *pdev) + if (IS_ERR(rst)) { + if (PTR_ERR(rst) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; ++ pm_runtime_put(&pdev->dev); + goto err_suspend; + } + +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c +index e62e1d7815aa..f7964d1ec486 100644 +--- a/sound/soc/intel/boards/bytcr_rt5640.c ++++ b/sound/soc/intel/boards/bytcr_rt5640.c +@@ -742,6 +742,30 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, ++ { /* Toshiba Encore WT8-A */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT8-A"), ++ }, ++ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | ++ BYT_RT5640_JD_SRC_JD2_IN4N | ++ BYT_RT5640_OVCD_TH_2000UA | ++ BYT_RT5640_OVCD_SF_0P75 | ++ BYT_RT5640_JD_NOT_INV | ++ BYT_RT5640_MCLK_EN), ++ }, ++ { /* Toshiba Encore WT10-A */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A-103"), ++ }, ++ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | ++ BYT_RT5640_JD_SRC_JD1_IN4P | ++ BYT_RT5640_OVCD_TH_2000UA | ++ BYT_RT5640_OVCD_SF_0P75 | ++ BYT_RT5640_SSP0_AIF2 | ++ BYT_RT5640_MCLK_EN), ++ }, + { /* Catch-all for generic Insyde tablets, must be last */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), +diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c +index d286dff3171d..898ef1d5608f 100644 +--- a/sound/soc/meson/axg-fifo.c ++++ b/sound/soc/meson/axg-fifo.c +@@ -244,7 +244,7 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss) + /* Enable pclk to access registers and clock the fifo ip */ + ret = clk_prepare_enable(fifo->pclk); + if (ret) +- return ret; ++ goto free_irq; + + /* Setup status2 so it reports the memory pointer */ + regmap_update_bits(fifo->map, FIFO_CTRL1, +@@ -264,8 +264,14 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss) + /* Take memory arbitror out of reset */ + ret = reset_control_deassert(fifo->arb); + if (ret) +- clk_disable_unprepare(fifo->pclk); ++ goto free_clk; ++ ++ return 0; + ++free_clk: ++ clk_disable_unprepare(fifo->pclk); ++free_irq: ++ free_irq(fifo->irq, ss); + return ret; + } + +diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c +index 548eb4fa2da6..9f0ffdcef637 100644 +--- a/sound/soc/qcom/qdsp6/q6asm-dai.c ++++ b/sound/soc/qcom/qdsp6/q6asm-dai.c +@@ -171,7 +171,7 @@ static const struct snd_compr_codec_caps q6asm_compr_caps = { + }; + + static void event_handler(uint32_t opcode, uint32_t token, +- uint32_t *payload, void *priv) ++ void *payload, void *priv) + { + struct q6asm_dai_rtd *prtd = priv; + struct snd_pcm_substream *substream = prtd->substream; +@@ -494,7 +494,7 @@ static struct snd_pcm_ops q6asm_dai_ops = { + }; + + static void compress_event_handler(uint32_t opcode, uint32_t token, +- uint32_t *payload, void *priv) ++ void *payload, void *priv) + { + struct q6asm_dai_rtd *prtd = priv; + struct snd_compr_stream *substream = prtd->cstream; +diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c +index af19010b9d88..8bd49c8a9517 100644 +--- a/sound/soc/sh/rcar/gen.c ++++ b/sound/soc/sh/rcar/gen.c +@@ -224,6 +224,14 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv) + RSND_GEN_S_REG(SSI_SYS_STATUS5, 0x884), + RSND_GEN_S_REG(SSI_SYS_STATUS6, 0x888), + RSND_GEN_S_REG(SSI_SYS_STATUS7, 0x88c), ++ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE0, 0x850), ++ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE1, 0x854), ++ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE2, 0x858), ++ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE3, 0x85c), ++ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE4, 0x890), ++ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE5, 0x894), ++ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE6, 0x898), ++ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE7, 0x89c), + RSND_GEN_S_REG(HDMI0_SEL, 0x9e0), + RSND_GEN_S_REG(HDMI1_SEL, 0x9e4), + +diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h +index ea6cbaa9743e..d47608ff5fac 100644 +--- a/sound/soc/sh/rcar/rsnd.h ++++ b/sound/soc/sh/rcar/rsnd.h +@@ -189,6 +189,14 @@ enum rsnd_reg { + SSI_SYS_STATUS5, + SSI_SYS_STATUS6, + SSI_SYS_STATUS7, ++ SSI_SYS_INT_ENABLE0, ++ SSI_SYS_INT_ENABLE1, ++ SSI_SYS_INT_ENABLE2, ++ SSI_SYS_INT_ENABLE3, ++ SSI_SYS_INT_ENABLE4, ++ SSI_SYS_INT_ENABLE5, ++ SSI_SYS_INT_ENABLE6, ++ SSI_SYS_INT_ENABLE7, + HDMI0_SEL, + HDMI1_SEL, + SSI9_BUSIF0_MODE, +@@ -237,6 +245,7 @@ enum rsnd_reg { + #define SSI9_BUSIF_ADINR(i) (SSI9_BUSIF0_ADINR + (i)) + #define SSI9_BUSIF_DALIGN(i) (SSI9_BUSIF0_DALIGN + (i)) + #define SSI_SYS_STATUS(i) (SSI_SYS_STATUS0 + (i)) ++#define SSI_SYS_INT_ENABLE(i) (SSI_SYS_INT_ENABLE0 + (i)) + + + struct rsnd_priv; +diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c +index 4a7d3413917f..47d5ddb526f2 100644 +--- a/sound/soc/sh/rcar/ssi.c ++++ b/sound/soc/sh/rcar/ssi.c +@@ -372,6 +372,9 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod, + u32 wsr = ssi->wsr; + int width; + int is_tdm, is_tdm_split; ++ int id = rsnd_mod_id(mod); ++ int i; ++ u32 sys_int_enable = 0; + + is_tdm = rsnd_runtime_is_tdm(io); + is_tdm_split = rsnd_runtime_is_tdm_split(io); +@@ -447,6 +450,38 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod, + cr_mode = DIEN; /* PIO : enable Data interrupt */ + } + ++ /* enable busif buffer over/under run interrupt. */ ++ if (is_tdm || is_tdm_split) { ++ switch (id) { ++ case 0: ++ case 1: ++ case 2: ++ case 3: ++ case 4: ++ for (i = 0; i < 4; i++) { ++ sys_int_enable = rsnd_mod_read(mod, ++ SSI_SYS_INT_ENABLE(i * 2)); ++ sys_int_enable |= 0xf << (id * 4); ++ rsnd_mod_write(mod, ++ SSI_SYS_INT_ENABLE(i * 2), ++ sys_int_enable); ++ } ++ ++ break; ++ case 9: ++ for (i = 0; i < 4; i++) { ++ sys_int_enable = rsnd_mod_read(mod, ++ SSI_SYS_INT_ENABLE((i * 2) + 1)); ++ sys_int_enable |= 0xf << 4; ++ rsnd_mod_write(mod, ++ SSI_SYS_INT_ENABLE((i * 2) + 1), ++ sys_int_enable); ++ } ++ ++ break; ++ } ++ } ++ + init_end: + ssi->cr_own = cr_own; + ssi->cr_mode = cr_mode; +@@ -496,6 +531,13 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod, + { + struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); + struct device *dev = rsnd_priv_to_dev(priv); ++ int is_tdm, is_tdm_split; ++ int id = rsnd_mod_id(mod); ++ int i; ++ u32 sys_int_enable = 0; ++ ++ is_tdm = rsnd_runtime_is_tdm(io); ++ is_tdm_split = rsnd_runtime_is_tdm_split(io); + + if (!rsnd_ssi_is_run_mods(mod, io)) + return 0; +@@ -517,6 +559,38 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod, + ssi->wsr = 0; + } + ++ /* disable busif buffer over/under run interrupt. */ ++ if (is_tdm || is_tdm_split) { ++ switch (id) { ++ case 0: ++ case 1: ++ case 2: ++ case 3: ++ case 4: ++ for (i = 0; i < 4; i++) { ++ sys_int_enable = rsnd_mod_read(mod, ++ SSI_SYS_INT_ENABLE(i * 2)); ++ sys_int_enable &= ~(0xf << (id * 4)); ++ rsnd_mod_write(mod, ++ SSI_SYS_INT_ENABLE(i * 2), ++ sys_int_enable); ++ } ++ ++ break; ++ case 9: ++ for (i = 0; i < 4; i++) { ++ sys_int_enable = rsnd_mod_read(mod, ++ SSI_SYS_INT_ENABLE((i * 2) + 1)); ++ sys_int_enable &= ~(0xf << 4); ++ rsnd_mod_write(mod, ++ SSI_SYS_INT_ENABLE((i * 2) + 1), ++ sys_int_enable); ++ } ++ ++ break; ++ } ++ } ++ + return 0; + } + +@@ -622,6 +696,11 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod, + int enable) + { + u32 val = 0; ++ int is_tdm, is_tdm_split; ++ int id = rsnd_mod_id(mod); ++ ++ is_tdm = rsnd_runtime_is_tdm(io); ++ is_tdm_split = rsnd_runtime_is_tdm_split(io); + + if (rsnd_is_gen1(priv)) + return 0; +@@ -635,6 +714,19 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod, + if (enable) + val = rsnd_ssi_is_dma_mode(mod) ? 0x0e000000 : 0x0f000000; + ++ if (is_tdm || is_tdm_split) { ++ switch (id) { ++ case 0: ++ case 1: ++ case 2: ++ case 3: ++ case 4: ++ case 9: ++ val |= 0x0000ff00; ++ break; ++ } ++ } ++ + rsnd_mod_write(mod, SSI_INT_ENABLE, val); + + return 0; +@@ -651,6 +743,12 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, + u32 status; + bool elapsed = false; + bool stop = false; ++ int id = rsnd_mod_id(mod); ++ int i; ++ int is_tdm, is_tdm_split; ++ ++ is_tdm = rsnd_runtime_is_tdm(io); ++ is_tdm_split = rsnd_runtime_is_tdm_split(io); + + spin_lock(&priv->lock); + +@@ -672,6 +770,53 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, + stop = true; + } + ++ status = 0; ++ ++ if (is_tdm || is_tdm_split) { ++ switch (id) { ++ case 0: ++ case 1: ++ case 2: ++ case 3: ++ case 4: ++ for (i = 0; i < 4; i++) { ++ status = rsnd_mod_read(mod, ++ SSI_SYS_STATUS(i * 2)); ++ status &= 0xf << (id * 4); ++ ++ if (status) { ++ rsnd_dbg_irq_status(dev, ++ "%s err status : 0x%08x\n", ++ rsnd_mod_name(mod), status); ++ rsnd_mod_write(mod, ++ SSI_SYS_STATUS(i * 2), ++ 0xf << (id * 4)); ++ stop = true; ++ break; ++ } ++ } ++ break; ++ case 9: ++ for (i = 0; i < 4; i++) { ++ status = rsnd_mod_read(mod, ++ SSI_SYS_STATUS((i * 2) + 1)); ++ status &= 0xf << 4; ++ ++ if (status) { ++ rsnd_dbg_irq_status(dev, ++ "%s err status : 0x%08x\n", ++ rsnd_mod_name(mod), status); ++ rsnd_mod_write(mod, ++ SSI_SYS_STATUS((i * 2) + 1), ++ 0xf << 4); ++ stop = true; ++ break; ++ } ++ } ++ break; ++ } ++ } ++ + rsnd_ssi_status_clear(mod); + rsnd_ssi_interrupt_out: + spin_unlock(&priv->lock); +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c +index 0215e2c94bf0..9df20768a8f2 100644 +--- a/sound/soc/soc-core.c ++++ b/sound/soc/soc-core.c +@@ -1895,9 +1895,25 @@ match: + dai_link->platforms->name = component->name; + + /* convert non BE into BE */ +- dai_link->no_pcm = 1; +- dai_link->dpcm_playback = 1; +- dai_link->dpcm_capture = 1; ++ if (!dai_link->no_pcm) { ++ dai_link->no_pcm = 1; ++ ++ if (dai_link->dpcm_playback) ++ dev_warn(card->dev, ++ "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_playback=1\n", ++ dai_link->name); ++ if (dai_link->dpcm_capture) ++ dev_warn(card->dev, ++ "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_capture=1\n", ++ dai_link->name); ++ ++ /* convert normal link into DPCM one */ ++ if (!(dai_link->dpcm_playback || ++ dai_link->dpcm_capture)) { ++ dai_link->dpcm_playback = !dai_link->capture_only; ++ dai_link->dpcm_capture = !dai_link->playback_only; ++ } ++ } + + /* override any BE fixups */ + dai_link->be_hw_params_fixup = +diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c +index 12aec140819a..2a6b84d2781e 100644 +--- a/sound/soc/sof/core.c ++++ b/sound/soc/sof/core.c +@@ -372,6 +372,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev) + /* init the IPC */ + sdev->ipc = snd_sof_ipc_init(sdev); + if (!sdev->ipc) { ++ ret = -ENOMEM; + dev_err(sdev->dev, "error: failed to init DSP IPC %d\n", ret); + goto ipc_err; + } +diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig +index 71f318bc2c74..b4f0426685c4 100644 +--- a/sound/soc/sof/imx/Kconfig ++++ b/sound/soc/sof/imx/Kconfig +@@ -14,7 +14,7 @@ if SND_SOC_SOF_IMX_TOPLEVEL + config SND_SOC_SOF_IMX8_SUPPORT + bool "SOF support for i.MX8" + depends on IMX_SCU +- depends on IMX_DSP ++ select IMX_DSP + help + This adds support for Sound Open Firmware for NXP i.MX8 platforms + Say Y if you have such a device. +diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c +index 3d128e5a132c..ea0fe9a09f3f 100644 +--- a/sound/soc/sof/nocodec.c ++++ b/sound/soc/sof/nocodec.c +@@ -52,8 +52,10 @@ static int sof_nocodec_bes_setup(struct device *dev, + links[i].platforms->name = dev_name(dev); + links[i].codecs->dai_name = "snd-soc-dummy-dai"; + links[i].codecs->name = "snd-soc-dummy"; +- links[i].dpcm_playback = 1; +- links[i].dpcm_capture = 1; ++ if (ops->drv[i].playback.channels_min) ++ links[i].dpcm_playback = 1; ++ if (ops->drv[i].capture.channels_min) ++ links[i].dpcm_capture = 1; + } + + card->dai_link = links; +diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c +index 195af259e78e..128680b09c20 100644 +--- a/sound/soc/sof/pm.c ++++ b/sound/soc/sof/pm.c +@@ -266,7 +266,10 @@ static int sof_resume(struct device *dev, bool runtime_resume) + int ret; + + /* do nothing if dsp resume callbacks are not set */ +- if (!sof_ops(sdev)->resume || !sof_ops(sdev)->runtime_resume) ++ if (!runtime_resume && !sof_ops(sdev)->resume) ++ return 0; ++ ++ if (runtime_resume && !sof_ops(sdev)->runtime_resume) + return 0; + + /* DSP was never successfully started, nothing to resume */ +@@ -346,7 +349,10 @@ static int sof_suspend(struct device *dev, bool runtime_suspend) + int ret; + + /* do nothing if dsp suspend callback is not set */ +- if (!sof_ops(sdev)->suspend) ++ if (!runtime_suspend && !sof_ops(sdev)->suspend) ++ return 0; ++ ++ if (runtime_suspend && !sof_ops(sdev)->runtime_suspend) + return 0; + + if (sdev->fw_state != SOF_FW_BOOT_COMPLETE) +diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c +index 6211dfda2195..0fa01cacfec9 100644 +--- a/sound/soc/tegra/tegra_wm8903.c ++++ b/sound/soc/tegra/tegra_wm8903.c +@@ -159,6 +159,7 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd) + struct snd_soc_component *component = codec_dai->component; + struct snd_soc_card *card = rtd->card; + struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); ++ int shrt = 0; + + if (gpio_is_valid(machine->gpio_hp_det)) { + tegra_wm8903_hp_jack_gpio.gpio = machine->gpio_hp_det; +@@ -171,12 +172,15 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd) + &tegra_wm8903_hp_jack_gpio); + } + ++ if (of_property_read_bool(card->dev->of_node, "nvidia,headset")) ++ shrt = SND_JACK_MICROPHONE; ++ + snd_soc_card_jack_new(rtd->card, "Mic Jack", SND_JACK_MICROPHONE, + &tegra_wm8903_mic_jack, + tegra_wm8903_mic_jack_pins, + ARRAY_SIZE(tegra_wm8903_mic_jack_pins)); + wm8903_mic_detect(component, &tegra_wm8903_mic_jack, SND_JACK_MICROPHONE, +- 0); ++ shrt); + + snd_soc_dapm_force_enable_pin(&card->dapm, "MICBIAS"); + +diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c +index 7aa3c32e4a49..0541071f454b 100644 +--- a/sound/soc/ti/davinci-mcasp.c ++++ b/sound/soc/ti/davinci-mcasp.c +@@ -1875,8 +1875,10 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp) + PTR_ERR(chan)); + return PTR_ERR(chan); + } +- if (WARN_ON(!chan->device || !chan->device->dev)) ++ if (WARN_ON(!chan->device || !chan->device->dev)) { ++ dma_release_channel(chan); + return -EINVAL; ++ } + + if (chan->device->dev->of_node) + ret = of_property_read_string(chan->device->dev->of_node, +diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c +index 26b503bbdb5f..3273b317fa3b 100644 +--- a/sound/soc/ti/omap-mcbsp.c ++++ b/sound/soc/ti/omap-mcbsp.c +@@ -686,7 +686,7 @@ static int omap_mcbsp_init(struct platform_device *pdev) + mcbsp->dma_data[1].addr = omap_mcbsp_dma_reg_params(mcbsp, + SNDRV_PCM_STREAM_CAPTURE); + +- mcbsp->fclk = clk_get(&pdev->dev, "fck"); ++ mcbsp->fclk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(mcbsp->fclk)) { + ret = PTR_ERR(mcbsp->fclk); + dev_err(mcbsp->dev, "unable to get fck: %d\n", ret); +@@ -711,7 +711,7 @@ static int omap_mcbsp_init(struct platform_device *pdev) + if (ret) { + dev_err(mcbsp->dev, + "Unable to create additional controls\n"); +- goto err_thres; ++ return ret; + } + } + +@@ -724,8 +724,6 @@ static int omap_mcbsp_init(struct platform_device *pdev) + err_st: + if (mcbsp->pdata->buffer_size) + sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group); +-err_thres: +- clk_put(mcbsp->fclk); + return ret; + } + +@@ -1442,8 +1440,6 @@ static int asoc_mcbsp_remove(struct platform_device *pdev) + + omap_mcbsp_st_cleanup(pdev); + +- clk_put(mcbsp->fclk); +- + return 0; + } + +diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c +index 2873e8e6f02b..cdae1190b930 100644 +--- a/sound/soc/ux500/mop500.c ++++ b/sound/soc/ux500/mop500.c +@@ -63,10 +63,11 @@ static void mop500_of_node_put(void) + { + int i; + +- for (i = 0; i < 2; i++) { ++ for (i = 0; i < 2; i++) + of_node_put(mop500_dai_links[i].cpus->of_node); +- of_node_put(mop500_dai_links[i].codecs->of_node); +- } ++ ++ /* Both links use the same codec, which is refcounted only once */ ++ of_node_put(mop500_dai_links[0].codecs->of_node); + } + + static int mop500_of_probe(struct platform_device *pdev, +@@ -81,7 +82,9 @@ static int mop500_of_probe(struct platform_device *pdev, + + if (!(msp_np[0] && msp_np[1] && codec_np)) { + dev_err(&pdev->dev, "Phandle missing or invalid\n"); +- mop500_of_node_put(); ++ for (i = 0; i < 2; i++) ++ of_node_put(msp_np[i]); ++ of_node_put(codec_np); + return -EINVAL; + } + +diff --git a/sound/usb/card.h b/sound/usb/card.h +index 395403a2d33f..d6219fba9699 100644 +--- a/sound/usb/card.h ++++ b/sound/usb/card.h +@@ -84,6 +84,10 @@ struct snd_usb_endpoint { + dma_addr_t sync_dma; /* DMA address of syncbuf */ + + unsigned int pipe; /* the data i/o pipe */ ++ unsigned int framesize[2]; /* small/large frame sizes in samples */ ++ unsigned int sample_rem; /* remainder from division fs/fps */ ++ unsigned int sample_accum; /* sample accumulator */ ++ unsigned int fps; /* frames per second */ + unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */ + unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */ + int freqshift; /* how much to shift the feedback value to get Q16.16 */ +@@ -104,6 +108,7 @@ struct snd_usb_endpoint { + int iface, altsetting; + int skip_packets; /* quirks for devices to ignore the first n packets + in a stream */ ++ bool is_implicit_feedback; /* This endpoint is used as implicit feedback */ + + spinlock_t lock; + struct list_head list; +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c +index 4a9a2f6ef5a4..9bea7d3f99f8 100644 +--- a/sound/usb/endpoint.c ++++ b/sound/usb/endpoint.c +@@ -124,12 +124,12 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep) + + /* + * For streaming based on information derived from sync endpoints, +- * prepare_outbound_urb_sizes() will call next_packet_size() to ++ * prepare_outbound_urb_sizes() will call slave_next_packet_size() to + * determine the number of samples to be sent in the next packet. + * +- * For implicit feedback, next_packet_size() is unused. ++ * For implicit feedback, slave_next_packet_size() is unused. + */ +-int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) ++int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep) + { + unsigned long flags; + int ret; +@@ -146,6 +146,29 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) + return ret; + } + ++/* ++ * For adaptive and synchronous endpoints, prepare_outbound_urb_sizes() ++ * will call next_packet_size() to determine the number of samples to be ++ * sent in the next packet. ++ */ ++int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) ++{ ++ int ret; ++ ++ if (ep->fill_max) ++ return ep->maxframesize; ++ ++ ep->sample_accum += ep->sample_rem; ++ if (ep->sample_accum >= ep->fps) { ++ ep->sample_accum -= ep->fps; ++ ret = ep->framesize[1]; ++ } else { ++ ret = ep->framesize[0]; ++ } ++ ++ return ret; ++} ++ + static void retire_outbound_urb(struct snd_usb_endpoint *ep, + struct snd_urb_ctx *urb_ctx) + { +@@ -190,6 +213,8 @@ static void prepare_silent_urb(struct snd_usb_endpoint *ep, + + if (ctx->packet_size[i]) + counts = ctx->packet_size[i]; ++ else if (ep->sync_master) ++ counts = snd_usb_endpoint_slave_next_packet_size(ep); + else + counts = snd_usb_endpoint_next_packet_size(ep); + +@@ -321,17 +346,17 @@ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep) + ep->next_packet_read_pos %= MAX_URBS; + + /* take URB out of FIFO */ +- if (!list_empty(&ep->ready_playback_urbs)) ++ if (!list_empty(&ep->ready_playback_urbs)) { + ctx = list_first_entry(&ep->ready_playback_urbs, + struct snd_urb_ctx, ready_list); ++ list_del_init(&ctx->ready_list); ++ } + } + spin_unlock_irqrestore(&ep->lock, flags); + + if (ctx == NULL) + return; + +- list_del_init(&ctx->ready_list); +- + /* copy over the length information */ + for (i = 0; i < packet->packets; i++) + ctx->packet_size[i] = packet->packet_size[i]; +@@ -497,6 +522,8 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip, + + list_add_tail(&ep->list, &chip->ep_list); + ++ ep->is_implicit_feedback = 0; ++ + __exit_unlock: + mutex_unlock(&chip->mutex); + +@@ -596,6 +623,178 @@ static void release_urbs(struct snd_usb_endpoint *ep, int force) + ep->nurbs = 0; + } + ++/* ++ * Check data endpoint for format differences ++ */ ++static bool check_ep_params(struct snd_usb_endpoint *ep, ++ snd_pcm_format_t pcm_format, ++ unsigned int channels, ++ unsigned int period_bytes, ++ unsigned int frames_per_period, ++ unsigned int periods_per_buffer, ++ struct audioformat *fmt, ++ struct snd_usb_endpoint *sync_ep) ++{ ++ unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb; ++ unsigned int max_packs_per_period, urbs_per_period, urb_packs; ++ unsigned int max_urbs; ++ int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels; ++ int tx_length_quirk = (ep->chip->tx_length_quirk && ++ usb_pipeout(ep->pipe)); ++ bool ret = 1; ++ ++ if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) { ++ /* ++ * When operating in DSD DOP mode, the size of a sample frame ++ * in hardware differs from the actual physical format width ++ * because we need to make room for the DOP markers. ++ */ ++ frame_bits += channels << 3; ++ } ++ ++ ret = ret && (ep->datainterval == fmt->datainterval); ++ ret = ret && (ep->stride == frame_bits >> 3); ++ ++ switch (pcm_format) { ++ case SNDRV_PCM_FORMAT_U8: ++ ret = ret && (ep->silence_value == 0x80); ++ break; ++ case SNDRV_PCM_FORMAT_DSD_U8: ++ case SNDRV_PCM_FORMAT_DSD_U16_LE: ++ case SNDRV_PCM_FORMAT_DSD_U32_LE: ++ case SNDRV_PCM_FORMAT_DSD_U16_BE: ++ case SNDRV_PCM_FORMAT_DSD_U32_BE: ++ ret = ret && (ep->silence_value == 0x69); ++ break; ++ default: ++ ret = ret && (ep->silence_value == 0); ++ } ++ ++ /* assume max. frequency is 50% higher than nominal */ ++ ret = ret && (ep->freqmax == ep->freqn + (ep->freqn >> 1)); ++ /* Round up freqmax to nearest integer in order to calculate maximum ++ * packet size, which must represent a whole number of frames. ++ * This is accomplished by adding 0x0.ffff before converting the ++ * Q16.16 format into integer. ++ * In order to accurately calculate the maximum packet size when ++ * the data interval is more than 1 (i.e. ep->datainterval > 0), ++ * multiply by the data interval prior to rounding. For instance, ++ * a freqmax of 41 kHz will result in a max packet size of 6 (5.125) ++ * frames with a data interval of 1, but 11 (10.25) frames with a ++ * data interval of 2. ++ * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the ++ * maximum datainterval value of 3, at USB full speed, higher for ++ * USB high speed, noting that ep->freqmax is in units of ++ * frames per packet in Q16.16 format.) ++ */ ++ maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) * ++ (frame_bits >> 3); ++ if (tx_length_quirk) ++ maxsize += sizeof(__le32); /* Space for length descriptor */ ++ /* but wMaxPacketSize might reduce this */ ++ if (ep->maxpacksize && ep->maxpacksize < maxsize) { ++ /* whatever fits into a max. size packet */ ++ unsigned int data_maxsize = maxsize = ep->maxpacksize; ++ ++ if (tx_length_quirk) ++ /* Need to remove the length descriptor to calc freq */ ++ data_maxsize -= sizeof(__le32); ++ ret = ret && (ep->freqmax == (data_maxsize / (frame_bits >> 3)) ++ << (16 - ep->datainterval)); ++ } ++ ++ if (ep->fill_max) ++ ret = ret && (ep->curpacksize == ep->maxpacksize); ++ else ++ ret = ret && (ep->curpacksize == maxsize); ++ ++ if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) { ++ packs_per_ms = 8 >> ep->datainterval; ++ max_packs_per_urb = MAX_PACKS_HS; ++ } else { ++ packs_per_ms = 1; ++ max_packs_per_urb = MAX_PACKS; ++ } ++ if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep)) ++ max_packs_per_urb = min(max_packs_per_urb, ++ 1U << sync_ep->syncinterval); ++ max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval); ++ ++ /* ++ * Capture endpoints need to use small URBs because there's no way ++ * to tell in advance where the next period will end, and we don't ++ * want the next URB to complete much after the period ends. ++ * ++ * Playback endpoints with implicit sync much use the same parameters ++ * as their corresponding capture endpoint. ++ */ ++ if (usb_pipein(ep->pipe) || ++ snd_usb_endpoint_implicit_feedback_sink(ep)) { ++ ++ urb_packs = packs_per_ms; ++ /* ++ * Wireless devices can poll at a max rate of once per 4ms. ++ * For dataintervals less than 5, increase the packet count to ++ * allow the host controller to use bursting to fill in the ++ * gaps. ++ */ ++ if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) { ++ int interval = ep->datainterval; ++ ++ while (interval < 5) { ++ urb_packs <<= 1; ++ ++interval; ++ } ++ } ++ /* make capture URBs <= 1 ms and smaller than a period */ ++ urb_packs = min(max_packs_per_urb, urb_packs); ++ while (urb_packs > 1 && urb_packs * maxsize >= period_bytes) ++ urb_packs >>= 1; ++ ret = ret && (ep->nurbs == MAX_URBS); ++ ++ /* ++ * Playback endpoints without implicit sync are adjusted so that ++ * a period fits as evenly as possible in the smallest number of ++ * URBs. The total number of URBs is adjusted to the size of the ++ * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits. ++ */ ++ } else { ++ /* determine how small a packet can be */ ++ minsize = (ep->freqn >> (16 - ep->datainterval)) * ++ (frame_bits >> 3); ++ /* with sync from device, assume it can be 12% lower */ ++ if (sync_ep) ++ minsize -= minsize >> 3; ++ minsize = max(minsize, 1u); ++ ++ /* how many packets will contain an entire ALSA period? */ ++ max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize); ++ ++ /* how many URBs will contain a period? */ ++ urbs_per_period = DIV_ROUND_UP(max_packs_per_period, ++ max_packs_per_urb); ++ /* how many packets are needed in each URB? */ ++ urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period); ++ ++ /* limit the number of frames in a single URB */ ++ ret = ret && (ep->max_urb_frames == ++ DIV_ROUND_UP(frames_per_period, urbs_per_period)); ++ ++ /* try to use enough URBs to contain an entire ALSA buffer */ ++ max_urbs = min((unsigned) MAX_URBS, ++ MAX_QUEUE * packs_per_ms / urb_packs); ++ ret = ret && (ep->nurbs == min(max_urbs, ++ urbs_per_period * periods_per_buffer)); ++ } ++ ++ ret = ret && (ep->datainterval == fmt->datainterval); ++ ret = ret && (ep->maxpacksize == fmt->maxpacksize); ++ ret = ret && ++ (ep->fill_max == !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX)); ++ ++ return ret; ++} ++ + /* + * configure a data endpoint + */ +@@ -861,10 +1060,23 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, + int err; + + if (ep->use_count != 0) { +- usb_audio_warn(ep->chip, +- "Unable to change format on ep #%x: already in use\n", +- ep->ep_num); +- return -EBUSY; ++ bool check = ep->is_implicit_feedback && ++ check_ep_params(ep, pcm_format, ++ channels, period_bytes, ++ period_frames, buffer_periods, ++ fmt, sync_ep); ++ ++ if (!check) { ++ usb_audio_warn(ep->chip, ++ "Unable to change format on ep #%x: already in use\n", ++ ep->ep_num); ++ return -EBUSY; ++ } ++ ++ usb_audio_dbg(ep->chip, ++ "Ep #%x already in use as implicit feedback but format not changed\n", ++ ep->ep_num); ++ return 0; + } + + /* release old buffers, if any */ +@@ -874,10 +1086,17 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, + ep->maxpacksize = fmt->maxpacksize; + ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX); + +- if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) ++ if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) { + ep->freqn = get_usb_full_speed_rate(rate); +- else ++ ep->fps = 1000; ++ } else { + ep->freqn = get_usb_high_speed_rate(rate); ++ ep->fps = 8000; ++ } ++ ++ ep->sample_rem = rate % ep->fps; ++ ep->framesize[0] = rate / ep->fps; ++ ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps; + + /* calculate the frequency in 16.16 format */ + ep->freqm = ep->freqn; +@@ -936,6 +1155,7 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) + ep->active_mask = 0; + ep->unlink_mask = 0; + ep->phase = 0; ++ ep->sample_accum = 0; + + snd_usb_endpoint_start_quirk(ep); + +diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h +index 63a39d4fa8d8..d23fa0a8c11b 100644 +--- a/sound/usb/endpoint.h ++++ b/sound/usb/endpoint.h +@@ -28,6 +28,7 @@ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep); + void snd_usb_endpoint_free(struct snd_usb_endpoint *ep); + + int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep); ++int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep); + int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); + + void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c +index ad8f38380aa3..6c391e5fad2a 100644 +--- a/sound/usb/pcm.c ++++ b/sound/usb/pcm.c +@@ -386,6 +386,8 @@ add_sync_ep: + if (!subs->sync_endpoint) + return -EINVAL; + ++ subs->sync_endpoint->is_implicit_feedback = 1; ++ + subs->data_endpoint->sync_master = subs->sync_endpoint; + + return 1; +@@ -484,12 +486,15 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, + implicit_fb ? + SND_USB_ENDPOINT_TYPE_DATA : + SND_USB_ENDPOINT_TYPE_SYNC); ++ + if (!subs->sync_endpoint) { + if (is_playback && attr == USB_ENDPOINT_SYNC_NONE) + return 0; + return -EINVAL; + } + ++ subs->sync_endpoint->is_implicit_feedback = implicit_fb; ++ + subs->data_endpoint->sync_master = subs->sync_endpoint; + + return 0; +@@ -1575,6 +1580,8 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, + for (i = 0; i < ctx->packets; i++) { + if (ctx->packet_size[i]) + counts = ctx->packet_size[i]; ++ else if (ep->sync_master) ++ counts = snd_usb_endpoint_slave_next_packet_size(ep); + else + counts = snd_usb_endpoint_next_packet_size(ep); + +diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c +index 87f27e2664c5..d9e386b8f47e 100644 +--- a/tools/lib/bpf/btf_dump.c ++++ b/tools/lib/bpf/btf_dump.c +@@ -1141,6 +1141,20 @@ static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack) + } + } + ++static void btf_dump_drop_mods(struct btf_dump *d, struct id_stack *decl_stack) ++{ ++ const struct btf_type *t; ++ __u32 id; ++ ++ while (decl_stack->cnt) { ++ id = decl_stack->ids[decl_stack->cnt - 1]; ++ t = btf__type_by_id(d->btf, id); ++ if (!btf_is_mod(t)) ++ return; ++ decl_stack->cnt--; ++ } ++} ++ + static void btf_dump_emit_name(const struct btf_dump *d, + const char *name, bool last_was_ptr) + { +@@ -1239,14 +1253,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, + * a const/volatile modifier for array, so we are + * going to silently skip them here. + */ +- while (decls->cnt) { +- next_id = decls->ids[decls->cnt - 1]; +- next_t = btf__type_by_id(d->btf, next_id); +- if (btf_is_mod(next_t)) +- decls->cnt--; +- else +- break; +- } ++ btf_dump_drop_mods(d, decls); + + if (decls->cnt == 0) { + btf_dump_emit_name(d, fname, last_was_ptr); +@@ -1274,7 +1281,15 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, + __u16 vlen = btf_vlen(t); + int i; + +- btf_dump_emit_mods(d, decls); ++ /* ++ * GCC emits extra volatile qualifier for ++ * __attribute__((noreturn)) function pointers. Clang ++ * doesn't do it. It's a GCC quirk for backwards ++ * compatibility with code written for GCC <2.5. So, ++ * similarly to extra qualifiers for array, just drop ++ * them, instead of handling them. ++ */ ++ btf_dump_drop_mods(d, decls); + if (decls->cnt) { + btf_dump_printf(d, " ("); + btf_dump_emit_type_chain(d, decls, fname, lvl); +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c +index 4d8db41b949a..d3c0b04e2e22 100644 +--- a/tools/perf/builtin-report.c ++++ b/tools/perf/builtin-report.c +@@ -462,8 +462,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report + if (rep->time_str) + ret += fprintf(fp, " (time slices: %s)", rep->time_str); + +- if (symbol_conf.show_ref_callgraph && +- strstr(evname, "call-graph=no")) { ++ if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { + ret += fprintf(fp, ", show reference callgraph"); + } + +diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c +index ed3b0ac2f785..373e399e57d2 100644 +--- a/tools/perf/util/stat-display.c ++++ b/tools/perf/util/stat-display.c +@@ -661,7 +661,7 @@ static void print_aggr(struct perf_stat_config *config, + int s; + bool first; + +- if (!(config->aggr_map || config->aggr_get_id)) ++ if (!config->aggr_map || !config->aggr_get_id) + return; + + aggr_update_shadow(config, evlist); +@@ -1140,7 +1140,7 @@ static void print_percore(struct perf_stat_config *config, + int s; + bool first = true; + +- if (!(config->aggr_map || config->aggr_get_id)) ++ if (!config->aggr_map || !config->aggr_get_id) + return; + + for (s = 0; s < config->aggr_map->nr; s++) { +diff --git a/tools/testing/selftests/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c +index aca3491174a1..f4bb4fef0f39 100644 +--- a/tools/testing/selftests/networking/timestamping/timestamping.c ++++ b/tools/testing/selftests/networking/timestamping/timestamping.c +@@ -313,10 +313,16 @@ int main(int argc, char **argv) + int val; + socklen_t len; + struct timeval next; ++ size_t if_len; + + if (argc < 2) + usage(0); + interface = argv[1]; ++ if_len = strlen(interface); ++ if (if_len >= IFNAMSIZ) { ++ printf("interface name exceeds IFNAMSIZ\n"); ++ exit(1); ++ } + + for (i = 2; i < argc; i++) { + if (!strcasecmp(argv[i], "SO_TIMESTAMP")) +@@ -350,12 +356,12 @@ int main(int argc, char **argv) + bail("socket"); + + memset(&device, 0, sizeof(device)); +- strncpy(device.ifr_name, interface, sizeof(device.ifr_name)); ++ memcpy(device.ifr_name, interface, if_len + 1); + if (ioctl(sock, SIOCGIFADDR, &device) < 0) + bail("getting interface IP address"); + + memset(&hwtstamp, 0, sizeof(hwtstamp)); +- strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name)); ++ memcpy(hwtstamp.ifr_name, interface, if_len + 1); + hwtstamp.ifr_data = (void *)&hwconfig; + memset(&hwconfig, 0, sizeof(hwconfig)); + hwconfig.tx_type = +diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh +index 9c60337317c6..020137b61407 100755 +--- a/tools/testing/selftests/ntb/ntb_test.sh ++++ b/tools/testing/selftests/ntb/ntb_test.sh +@@ -241,7 +241,7 @@ function get_files_count() + split_remote $LOC + + if [[ "$REMOTE" == "" ]]; then +- echo $(ls -1 "$LOC"/${NAME}* 2>/dev/null | wc -l) ++ echo $(ls -1 "$VPATH"/${NAME}* 2>/dev/null | wc -l) + else + echo $(ssh "$REMOTE" "ls -1 \"$VPATH\"/${NAME}* | \ + wc -l" 2> /dev/null) +diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c +index 480995bceefa..47191af46617 100644 +--- a/tools/testing/selftests/x86/protection_keys.c ++++ b/tools/testing/selftests/x86/protection_keys.c +@@ -24,6 +24,7 @@ + #define _GNU_SOURCE + #include + #include ++#include + #include + #include + #include +@@ -612,10 +613,10 @@ int alloc_random_pkey(void) + int nr_alloced = 0; + int random_index; + memset(alloced_pkeys, 0, sizeof(alloced_pkeys)); ++ srand((unsigned int)time(NULL)); + + /* allocate every possible key and make a note of which ones we got */ + max_nr_pkey_allocs = NR_PKEYS; +- max_nr_pkey_allocs = 1; + for (i = 0; i < max_nr_pkey_allocs; i++) { + int new_pkey = alloc_pkey(); + if (new_pkey < 0) diff --git a/patch/kernel/odroidxu4-legacy/patch-4.14.184-185.patch b/patch/kernel/odroidxu4-legacy/patch-4.14.184-185.patch new file mode 100644 index 000000000..de75e71d9 --- /dev/null +++ b/patch/kernel/odroidxu4-legacy/patch-4.14.184-185.patch @@ -0,0 +1,6754 @@ +diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +index b6a7e7397b8b..b944fe067188 100644 +--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt ++++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +@@ -16,6 +16,9 @@ Required properties: + Documentation/devicetree/bindings/graph.txt. This port should be connected + to the input port of an attached HDMI or LVDS encoder chip. + ++Optional properties: ++- pinctrl-names: Contain "default" and "sleep". ++ + Example: + + dpi0: dpi@1401d000 { +@@ -26,6 +29,9 @@ dpi0: dpi@1401d000 { + <&mmsys CLK_MM_DPI_ENGINE>, + <&apmixedsys CLK_APMIXED_TVDPLL>; + clock-names = "pixel", "engine", "pll"; ++ pinctrl-names = "default", "sleep"; ++ pinctrl-0 = <&dpi_pin_func>; ++ pinctrl-1 = <&dpi_pin_idle>; + + port { + dpi0_out: endpoint { +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt +index f67ed33d1054..81a8802cea88 100644 +--- a/Documentation/virtual/kvm/api.txt ++++ b/Documentation/virtual/kvm/api.txt +@@ -3737,9 +3737,11 @@ EOI was received. + #define KVM_EXIT_HYPERV_SYNIC 1 + #define KVM_EXIT_HYPERV_HCALL 2 + __u32 type; ++ __u32 pad1; + union { + struct { + __u32 msr; ++ __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; +diff --git a/Makefile b/Makefile +index ce607fe26228..04d63a6b4f46 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 184 ++SUBLEVEL = 185 + EXTRAVERSION = + NAME = Petit Gorille + +@@ -542,12 +542,8 @@ KBUILD_MODULES := + KBUILD_BUILTIN := 1 + + # If we have only "make modules", don't compile built-in objects. +-# When we're building modules with modversions, we need to consider +-# the built-in objects during the descend as well, in order to +-# make sure the checksums are up to date before we record them. +- + ifeq ($(MAKECMDGOALS),modules) +- KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) ++ KBUILD_BUILTIN := + endif + + # If we have "make modules", compile modules +@@ -1249,6 +1245,13 @@ ifdef CONFIG_MODULES + + all: modules + ++# When we're building modules with modversions, we need to consider ++# the built-in objects during the descend as well, in order to ++# make sure the checksums are up to date before we record them. ++ifdef CONFIG_MODVERSIONS ++ KBUILD_BUILTIN := 1 ++endif ++ + # Build modules + # + # A module can be listed more than once in obj-m resulting in +diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h +index 87d8c4f0307d..7295967b5028 100644 +--- a/arch/alpha/include/asm/uaccess.h ++++ b/arch/alpha/include/asm/uaccess.h +@@ -30,11 +30,13 @@ + * Address valid if: + * - "addr" doesn't have any high-bits set + * - AND "size" doesn't have any high-bits set +- * - AND "addr+size" doesn't have any high-bits set ++ * - AND "addr+size-(size != 0)" doesn't have any high-bits set + * - OR we are in kernel mode. + */ +-#define __access_ok(addr, size) \ +- ((get_fs().seg & (addr | size | (addr+size))) == 0) ++#define __access_ok(addr, size) ({ \ ++ unsigned long __ao_a = (addr), __ao_b = (size); \ ++ unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ ++ (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; }) + + #define access_ok(type, addr, size) \ + ({ \ +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c +index 58e3771e4c5b..368b4b404985 100644 +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -228,8 +228,8 @@ static struct undef_hook arm_break_hook = { + }; + + static struct undef_hook thumb_break_hook = { +- .instr_mask = 0xffff, +- .instr_val = 0xde01, ++ .instr_mask = 0xffffffff, ++ .instr_val = 0x0000de01, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = PSR_T_BIT, + .fn = break_trap, +diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c +index 02e712d2ea30..bbc2926bd12b 100644 +--- a/arch/arm/mach-tegra/tegra.c ++++ b/arch/arm/mach-tegra/tegra.c +@@ -108,8 +108,8 @@ static const char * const tegra_dt_board_compat[] = { + }; + + DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") +- .l2c_aux_val = 0x3c400001, +- .l2c_aux_mask = 0xc20fc3fe, ++ .l2c_aux_val = 0x3c400000, ++ .l2c_aux_mask = 0xc20fc3ff, + .smp = smp_ops(tegra_smp_ops), + .map_io = tegra_map_common_io, + .init_early = tegra_init_early, +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S +index 5461d589a1e2..60ac7c5999a9 100644 +--- a/arch/arm/mm/proc-macros.S ++++ b/arch/arm/mm/proc-macros.S +@@ -5,6 +5,7 @@ + * VMA_VM_FLAGS + * VM_EXEC + */ ++#include + #include + #include + +@@ -30,7 +31,7 @@ + * act_mm - get current->active_mm + */ + .macro act_mm, rd +- bic \rd, sp, #8128 ++ bic \rd, sp, #(THREAD_SIZE - 1) & ~63 + bic \rd, \rd, #63 + ldr \rd, [\rd, #TI_TASK] + .if (TSK_ACTIVE_MM > IMM12_MASK) +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h +index f982c9d1d10b..87615facf959 100644 +--- a/arch/arm64/include/asm/kvm_host.h ++++ b/arch/arm64/include/asm/kvm_host.h +@@ -292,8 +292,10 @@ struct kvm_vcpu_arch { + * CP14 and CP15 live in the same array, as they are backed by the + * same system registers. + */ +-#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) +-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) ++#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ++ ++#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) ++#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) + + #ifdef CONFIG_CPU_BIG_ENDIAN + #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r)) +diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h +index de1470c4d829..1149251ea58d 100644 +--- a/arch/m68k/include/asm/mac_via.h ++++ b/arch/m68k/include/asm/mac_via.h +@@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping; + + struct irq_desc; + ++extern void via_l2_flush(int writeback); + extern void via_register_interrupts(void); + extern void via_irq_enable(int); + extern void via_irq_disable(int); +diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c +index 2004b3f72d80..3ea7450c51f2 100644 +--- a/arch/m68k/mac/config.c ++++ b/arch/m68k/mac/config.c +@@ -61,7 +61,6 @@ extern void iop_preinit(void); + extern void iop_init(void); + extern void via_init(void); + extern void via_init_clock(irq_handler_t func); +-extern void via_flush_cache(void); + extern void oss_init(void); + extern void psc_init(void); + extern void baboon_init(void); +@@ -132,21 +131,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record) + return unknown; + } + +-/* +- * Flip into 24bit mode for an instant - flushes the L2 cache card. We +- * have to disable interrupts for this. Our IRQ handlers will crap +- * themselves if they take an IRQ in 24bit mode! +- */ +- +-static void mac_cache_card_flush(int writeback) +-{ +- unsigned long flags; +- +- local_irq_save(flags); +- via_flush_cache(); +- local_irq_restore(flags); +-} +- + void __init config_mac(void) + { + if (!MACH_IS_MAC) +@@ -179,9 +163,8 @@ void __init config_mac(void) + * not. + */ + +- if (macintosh_config->ident == MAC_MODEL_IICI +- || macintosh_config->ident == MAC_MODEL_IIFX) +- mach_l2_flush = mac_cache_card_flush; ++ if (macintosh_config->ident == MAC_MODEL_IICI) ++ mach_l2_flush = via_l2_flush; + } + + +diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c +index 863806e6775a..6ab6a1d54b37 100644 +--- a/arch/m68k/mac/via.c ++++ b/arch/m68k/mac/via.c +@@ -300,10 +300,14 @@ void via_debug_dump(void) + * the system into 24-bit mode for an instant. + */ + +-void via_flush_cache(void) ++void via_l2_flush(int writeback) + { ++ unsigned long flags; ++ ++ local_irq_save(flags); + via2[gBufB] &= ~VIA2B_vMode32; + via2[gBufB] |= VIA2B_vMode32; ++ local_irq_restore(flags); + } + + /* +diff --git a/arch/mips/Makefile b/arch/mips/Makefile +index 5977884b008e..a4a06d173858 100644 +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -279,12 +279,23 @@ ifdef CONFIG_64BIT + endif + endif + ++# When linking a 32-bit executable the LLVM linker cannot cope with a ++# 32-bit load address that has been sign-extended to 64 bits. Simply ++# remove the upper 32 bits then, as it is safe to do so with other ++# linkers. ++ifdef CONFIG_64BIT ++ load-ld = $(load-y) ++else ++ load-ld = $(subst 0xffffffff,0x,$(load-y)) ++endif ++ + KBUILD_AFLAGS += $(cflags-y) + KBUILD_CFLAGS += $(cflags-y) +-KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) ++KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld) + KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) + + bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ ++ LINKER_LOAD_ADDRESS=$(load-ld) \ + VMLINUX_ENTRY_ADDRESS=$(entry-y) \ + PLATFORM="$(platform-y)" \ + ITS_INPUTS="$(its-y)" +diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile +index baa34e4deb78..516e593a8ee9 100644 +--- a/arch/mips/boot/compressed/Makefile ++++ b/arch/mips/boot/compressed/Makefile +@@ -87,7 +87,7 @@ ifneq ($(zload-y),) + VMLINUZ_LOAD_ADDRESS := $(zload-y) + else + VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ +- $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) ++ $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS)) + endif + UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS) + +diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig +index 324dfee23dfb..c871e40b8878 100644 +--- a/arch/mips/configs/loongson3_defconfig ++++ b/arch/mips/configs/loongson3_defconfig +@@ -250,7 +250,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y + CONFIG_MEDIA_USB_SUPPORT=y + CONFIG_USB_VIDEO_CLASS=m + CONFIG_DRM=y +-CONFIG_DRM_RADEON=y ++CONFIG_DRM_RADEON=m + CONFIG_FB_RADEON=y + CONFIG_LCD_CLASS_DEVICE=y + CONFIG_LCD_PLATFORM=m +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h +index 673049bf29b6..f21dd4cb33ad 100644 +--- a/arch/mips/include/asm/kvm_host.h ++++ b/arch/mips/include/asm/kvm_host.h +@@ -274,8 +274,12 @@ enum emulation_result { + #define MIPS3_PG_SHIFT 6 + #define MIPS3_PG_FRAME 0x3fffffc0 + ++#if defined(CONFIG_64BIT) ++#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) ++#else + #define VPN2_MASK 0xffffe000 +-#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID ++#endif ++#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) + #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) + #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) + #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) +diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h +index a6810923b3f0..a7f9acb42034 100644 +--- a/arch/mips/include/asm/mipsregs.h ++++ b/arch/mips/include/asm/mipsregs.h +@@ -737,7 +737,7 @@ + + /* MAAR bit definitions */ + #define MIPS_MAAR_VH (_U64CAST_(1) << 63) +-#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) ++#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12) + #define MIPS_MAAR_ADDR_SHIFT 12 + #define MIPS_MAAR_S (_ULCAST_(1) << 1) + #define MIPS_MAAR_VL (_ULCAST_(1) << 0) +diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S +index 37b9383eacd3..cf74a963839f 100644 +--- a/arch/mips/kernel/genex.S ++++ b/arch/mips/kernel/genex.S +@@ -431,20 +431,20 @@ NESTED(nmi_handler, PT_SIZE, sp) + .endm + + .macro __build_clear_fpe ++ CLI ++ TRACE_IRQS_OFF + .set push + /* gas fails to assemble cfc1 for some archs (octeon).*/ \ + .set mips1 + SET_HARDFLOAT + cfc1 a1, fcr31 + .set pop +- CLI +- TRACE_IRQS_OFF + .endm + + .macro __build_clear_msa_fpe +- _cfcmsa a1, MSA_CSR + CLI + TRACE_IRQS_OFF ++ _cfcmsa a1, MSA_CSR + .endm + + .macro __build_clear_ade +diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c +index 7f3f136572de..50d3d74001cb 100644 +--- a/arch/mips/kernel/mips-cm.c ++++ b/arch/mips/kernel/mips-cm.c +@@ -123,9 +123,9 @@ static char *cm2_causes[32] = { + "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07", + "0x08", "0x09", "0x0a", "0x0b", + "0x0c", "0x0d", "0x0e", "0x0f", +- "0x10", "0x11", "0x12", "0x13", +- "0x14", "0x15", "0x16", "INTVN_WR_ERR", +- "INTVN_RD_ERR", "0x19", "0x1a", "0x1b", ++ "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13", ++ "0x14", "0x15", "0x16", "0x17", ++ "0x18", "0x19", "0x1a", "0x1b", + "0x1c", "0x1d", "0x1e", "0x1f" + }; + +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c +index 05ed4ed411c7..abd7ee9e90ab 100644 +--- a/arch/mips/kernel/setup.c ++++ b/arch/mips/kernel/setup.c +@@ -911,7 +911,17 @@ static void __init arch_mem_init(char **cmdline_p) + BOOTMEM_DEFAULT); + #endif + device_tree_init(); ++ ++ /* ++ * In order to reduce the possibility of kernel panic when failed to ++ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate ++ * low memory as small as possible before plat_swiotlb_setup(), so ++ * make sparse_init() using top-down allocation. ++ */ ++ memblock_set_bottom_up(false); + sparse_init(); ++ memblock_set_bottom_up(true); ++ + plat_swiotlb_setup(); + + dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); +diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c +index a6ebc8135112..df18f386d457 100644 +--- a/arch/mips/kernel/time.c ++++ b/arch/mips/kernel/time.c +@@ -22,12 +22,82 @@ + #include + #include + #include ++#include ++#include + + #include + #include + #include + #include + ++#ifdef CONFIG_CPU_FREQ ++ ++static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref); ++static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq); ++static unsigned long glb_lpj_ref; ++static unsigned long glb_lpj_ref_freq; ++ ++static int cpufreq_callback(struct notifier_block *nb, ++ unsigned long val, void *data) ++{ ++ struct cpufreq_freqs *freq = data; ++ struct cpumask *cpus = freq->policy->cpus; ++ unsigned long lpj; ++ int cpu; ++ ++ /* ++ * Skip lpj numbers adjustment if the CPU-freq transition is safe for ++ * the loops delay. (Is this possible?) ++ */ ++ if (freq->flags & CPUFREQ_CONST_LOOPS) ++ return NOTIFY_OK; ++ ++ /* Save the initial values of the lpjes for future scaling. */ ++ if (!glb_lpj_ref) { ++ glb_lpj_ref = boot_cpu_data.udelay_val; ++ glb_lpj_ref_freq = freq->old; ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(pcp_lpj_ref, cpu) = ++ cpu_data[cpu].udelay_val; ++ per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; ++ } ++ } ++ ++ /* ++ * Adjust global lpj variable and per-CPU udelay_val number in ++ * accordance with the new CPU frequency. ++ */ ++ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || ++ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { ++ loops_per_jiffy = cpufreq_scale(glb_lpj_ref, ++ glb_lpj_ref_freq, ++ freq->new); ++ ++ for_each_cpu(cpu, cpus) { ++ lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), ++ per_cpu(pcp_lpj_ref_freq, cpu), ++ freq->new); ++ cpu_data[cpu].udelay_val = (unsigned int)lpj; ++ } ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block cpufreq_notifier = { ++ .notifier_call = cpufreq_callback, ++}; ++ ++static int __init register_cpufreq_notifier(void) ++{ ++ return cpufreq_register_notifier(&cpufreq_notifier, ++ CPUFREQ_TRANSITION_NOTIFIER); ++} ++core_initcall(register_cpufreq_notifier); ++ ++#endif /* CONFIG_CPU_FREQ */ ++ + /* + * forward reference + */ +diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S +index 36f2e860ba3e..be63fff95b2a 100644 +--- a/arch/mips/kernel/vmlinux.lds.S ++++ b/arch/mips/kernel/vmlinux.lds.S +@@ -50,7 +50,7 @@ SECTIONS + /* . = 0xa800000000300000; */ + . = 0xffffffff80300000; + #endif +- . = VMLINUX_LOAD_ADDRESS; ++ . = LINKER_LOAD_ADDRESS; + /* read-only */ + _text = .; /* Text and read-only data */ + .text : { +diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h +index bbf5c79cce7a..8b204cd1f531 100644 +--- a/arch/openrisc/include/asm/uaccess.h ++++ b/arch/openrisc/include/asm/uaccess.h +@@ -58,8 +58,12 @@ + /* Ensure that addr is below task's addr_limit */ + #define __addr_ok(addr) ((unsigned long) addr < get_fs()) + +-#define access_ok(type, addr, size) \ +- __range_ok((unsigned long)addr, (unsigned long)size) ++#define access_ok(type, addr, size) \ ++({ \ ++ unsigned long __ao_addr = (unsigned long)(addr); \ ++ unsigned long __ao_size = (unsigned long)(size); \ ++ __range_ok(__ao_addr, __ao_size); \ ++}) + + /* + * These are the main single-value transfer routines. They automatically +diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c +index 7ed2b1b6643c..09134df01bfd 100644 +--- a/arch/powerpc/kernel/dt_cpu_ftrs.c ++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c +@@ -385,6 +385,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f) + { + u64 lpcr; + ++ /* ++ * Linux relies on FSCR[DSCR] being clear, so that we can take the ++ * facility unavailable interrupt and track the task's usage of DSCR. ++ * See facility_unavailable_exception(). ++ * Clear the bit here so that feat_enable() doesn't set it. ++ */ ++ f->fscr_bit_nr = -1; ++ + feat_enable(f); + + lpcr = mfspr(SPRN_LPCR); +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c +index d96b28415090..bbe9c57dd1a3 100644 +--- a/arch/powerpc/kernel/prom.c ++++ b/arch/powerpc/kernel/prom.c +@@ -658,6 +658,23 @@ static void __init early_reserve_mem(void) + #endif + } + ++#ifdef CONFIG_PPC64 ++static void __init save_fscr_to_task(void) ++{ ++ /* ++ * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we ++ * have configured via the device tree features or via __init_FSCR(). ++ * That value will then be propagated to pid 1 (init) and all future ++ * processes. ++ */ ++ if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) ++ init_task.thread.fscr = mfspr(SPRN_FSCR); ++} ++#else ++static inline void save_fscr_to_task(void) {}; ++#endif ++ ++ + void __init early_init_devtree(void *params) + { + phys_addr_t limit; +@@ -743,6 +760,8 @@ void __init early_init_devtree(void *params) + BUG(); + } + ++ save_fscr_to_task(); ++ + #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) + /* We'll later wait for secondaries to check in; there are + * NCPUS-1 non-boot CPUs :-) +diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c +index 5ffcdeb1eb17..9d9fffaedeef 100644 +--- a/arch/powerpc/platforms/cell/spufs/file.c ++++ b/arch/powerpc/platforms/cell/spufs/file.c +@@ -1988,8 +1988,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, + static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { +- int ret; + struct spu_context *ctx = file->private_data; ++ u32 stat, data; ++ int ret; + + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; +@@ -1998,11 +1999,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_mbox_info_read(ctx, buf, len, pos); ++ stat = ctx->csa.prob.mb_stat_R; ++ data = ctx->csa.prob.pu_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ /* EOF if there's no entry in the mbox */ ++ if (!(stat & 0x0000ff)) ++ return 0; ++ ++ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); + } + + static const struct file_operations spufs_mbox_info_fops = { +@@ -2029,6 +2035,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ u32 stat, data; + int ret; + + if (!access_ok(VERIFY_WRITE, buf, len)) +@@ -2038,11 +2045,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_ibox_info_read(ctx, buf, len, pos); ++ stat = ctx->csa.prob.mb_stat_R; ++ data = ctx->csa.priv2.puint_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ /* EOF if there's no entry in the ibox */ ++ if (!(stat & 0xff0000)) ++ return 0; ++ ++ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); + } + + static const struct file_operations spufs_ibox_info_fops = { +@@ -2051,6 +2063,11 @@ static const struct file_operations spufs_ibox_info_fops = { + .llseek = generic_file_llseek, + }; + ++static size_t spufs_wbox_info_cnt(struct spu_context *ctx) ++{ ++ return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); ++} ++ + static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) + { +@@ -2059,7 +2076,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, + u32 wbox_stat; + + wbox_stat = ctx->csa.prob.mb_stat_R; +- cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); ++ cnt = spufs_wbox_info_cnt(ctx); + for (i = 0; i < cnt; i++) { + data[i] = ctx->csa.spu_mailbox_data[i]; + } +@@ -2072,7 +2089,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; +- int ret; ++ u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; ++ int ret, count; + + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; +@@ -2081,11 +2099,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_wbox_info_read(ctx, buf, len, pos); ++ count = spufs_wbox_info_cnt(ctx); ++ memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data)); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &data, ++ count * sizeof(u32)); + } + + static const struct file_operations spufs_wbox_info_fops = { +@@ -2094,27 +2114,33 @@ static const struct file_operations spufs_wbox_info_fops = { + .llseek = generic_file_llseek, + }; + +-static ssize_t __spufs_dma_info_read(struct spu_context *ctx, +- char __user *buf, size_t len, loff_t *pos) ++static void spufs_get_dma_info(struct spu_context *ctx, ++ struct spu_dma_info *info) + { +- struct spu_dma_info info; +- struct mfc_cq_sr *qp, *spuqp; + int i; + +- info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; +- info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; +- info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; +- info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; +- info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; ++ info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; ++ info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; ++ info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; ++ info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; ++ info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; + for (i = 0; i < 16; i++) { +- qp = &info.dma_info_command_data[i]; +- spuqp = &ctx->csa.priv2.spuq[i]; ++ struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; ++ struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; + + qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; + qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; + qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; + qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; + } ++} ++ ++static ssize_t __spufs_dma_info_read(struct spu_context *ctx, ++ char __user *buf, size_t len, loff_t *pos) ++{ ++ struct spu_dma_info info; ++ ++ spufs_get_dma_info(ctx, &info); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof info); +@@ -2124,6 +2150,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ struct spu_dma_info info; + int ret; + + if (!access_ok(VERIFY_WRITE, buf, len)) +@@ -2133,11 +2160,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_dma_info_read(ctx, buf, len, pos); ++ spufs_get_dma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &info, ++ sizeof(info)); + } + + static const struct file_operations spufs_dma_info_fops = { +@@ -2146,13 +2174,31 @@ static const struct file_operations spufs_dma_info_fops = { + .llseek = no_llseek, + }; + ++static void spufs_get_proxydma_info(struct spu_context *ctx, ++ struct spu_proxydma_info *info) ++{ ++ int i; ++ ++ info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; ++ info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; ++ info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; ++ ++ for (i = 0; i < 8; i++) { ++ struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; ++ struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; ++ ++ qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; ++ qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; ++ qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; ++ qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; ++ } ++} ++ + static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) + { + struct spu_proxydma_info info; +- struct mfc_cq_sr *qp, *puqp; + int ret = sizeof info; +- int i; + + if (len < ret) + return -EINVAL; +@@ -2160,18 +2206,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; + +- info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; +- info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; +- info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; +- for (i = 0; i < 8; i++) { +- qp = &info.proxydma_info_command_data[i]; +- puqp = &ctx->csa.priv2.puq[i]; +- +- qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; +- qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; +- qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; +- qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; +- } ++ spufs_get_proxydma_info(ctx, &info); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof info); +@@ -2181,17 +2216,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ struct spu_proxydma_info info; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_proxydma_info_read(ctx, buf, len, pos); ++ spufs_get_proxydma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &info, ++ sizeof(info)); + } + + static const struct file_operations spufs_proxydma_info_fops = { +diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c +index b7ae5a027714..f8181c8af32d 100644 +--- a/arch/powerpc/sysdev/xive/common.c ++++ b/arch/powerpc/sysdev/xive/common.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -932,12 +933,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq); + void xive_cleanup_irq_data(struct xive_irq_data *xd) + { + if (xd->eoi_mmio) { ++ unmap_kernel_range((unsigned long)xd->eoi_mmio, ++ 1u << xd->esb_shift); + iounmap(xd->eoi_mmio); + if (xd->eoi_mmio == xd->trig_mmio) + xd->trig_mmio = NULL; + xd->eoi_mmio = NULL; + } + if (xd->trig_mmio) { ++ unmap_kernel_range((unsigned long)xd->trig_mmio, ++ 1u << xd->esb_shift); + iounmap(xd->trig_mmio); + xd->trig_mmio = NULL; + } +diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h +index 32eb56e00c11..6e7816360a75 100644 +--- a/arch/sh/include/asm/uaccess.h ++++ b/arch/sh/include/asm/uaccess.h +@@ -16,8 +16,11 @@ + * sum := addr + size; carry? --> flag = true; + * if (sum >= addr_limit) flag = true; + */ +-#define __access_ok(addr, size) \ +- (__addr_ok((addr) + (size))) ++#define __access_ok(addr, size) ({ \ ++ unsigned long __ao_a = (addr), __ao_b = (size); \ ++ unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ ++ __ao_end >= __ao_a && __addr_ok(__ao_end); }) ++ + #define access_ok(type, addr, size) \ + (__chk_user_ptr(addr), \ + __access_ok((unsigned long __force)(addr), (size))) +diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c +index 16b50afe7b52..60f7205ebe40 100644 +--- a/arch/sparc/kernel/ptrace_32.c ++++ b/arch/sparc/kernel/ptrace_32.c +@@ -46,82 +46,79 @@ enum sparc_regset { + REGSET_FP, + }; + ++static int regwindow32_get(struct task_struct *target, ++ const struct pt_regs *regs, ++ u32 *uregs) ++{ ++ unsigned long reg_window = regs->u_regs[UREG_I6]; ++ int size = 16 * sizeof(u32); ++ ++ if (target == current) { ++ if (copy_from_user(uregs, (void __user *)reg_window, size)) ++ return -EFAULT; ++ } else { ++ if (access_process_vm(target, reg_window, uregs, size, ++ FOLL_FORCE) != size) ++ return -EFAULT; ++ } ++ return 0; ++} ++ ++static int regwindow32_set(struct task_struct *target, ++ const struct pt_regs *regs, ++ u32 *uregs) ++{ ++ unsigned long reg_window = regs->u_regs[UREG_I6]; ++ int size = 16 * sizeof(u32); ++ ++ if (target == current) { ++ if (copy_to_user((void __user *)reg_window, uregs, size)) ++ return -EFAULT; ++ } else { ++ if (access_process_vm(target, reg_window, uregs, size, ++ FOLL_FORCE | FOLL_WRITE) != size) ++ return -EFAULT; ++ } ++ return 0; ++} ++ + static int genregs32_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) + { + const struct pt_regs *regs = target->thread.kregs; +- unsigned long __user *reg_window; +- unsigned long *k = kbuf; +- unsigned long __user *u = ubuf; +- unsigned long reg; ++ u32 uregs[16]; ++ int ret; + + if (target == current) + flush_user_windows(); + +- pos /= sizeof(reg); +- count /= sizeof(reg); +- +- if (kbuf) { +- for (; count > 0 && pos < 16; count--) +- *k++ = regs->u_regs[pos++]; +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(*k++, ®_window[pos++])) +- return -EFAULT; +- } +- } else { +- for (; count > 0 && pos < 16; count--) { +- if (put_user(regs->u_regs[pos++], u++)) +- return -EFAULT; +- } +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(reg, ®_window[pos++]) || +- put_user(reg, u++)) +- return -EFAULT; +- } +- } +- while (count > 0) { +- switch (pos) { +- case 32: /* PSR */ +- reg = regs->psr; +- break; +- case 33: /* PC */ +- reg = regs->pc; +- break; +- case 34: /* NPC */ +- reg = regs->npc; +- break; +- case 35: /* Y */ +- reg = regs->y; +- break; +- case 36: /* WIM */ +- case 37: /* TBR */ +- reg = 0; +- break; +- default: +- goto finish; +- } ++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ regs->u_regs, ++ 0, 16 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + +- if (kbuf) +- *k++ = reg; +- else if (put_user(reg, u++)) ++ if (pos < 32 * sizeof(u32)) { ++ if (regwindow32_get(target, regs, uregs)) + return -EFAULT; +- pos++; +- count--; ++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 16 * sizeof(u32), 32 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + } +-finish: +- pos *= sizeof(reg); +- count *= sizeof(reg); + +- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, +- 38 * sizeof(reg), -1); ++ uregs[0] = regs->psr; ++ uregs[1] = regs->pc; ++ uregs[2] = regs->npc; ++ uregs[3] = regs->y; ++ uregs[4] = 0; /* WIM */ ++ uregs[5] = 0; /* TBR */ ++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 32 * sizeof(u32), 38 * sizeof(u32)); + } + + static int genregs32_set(struct task_struct *target, +@@ -130,82 +127,53 @@ static int genregs32_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + struct pt_regs *regs = target->thread.kregs; +- unsigned long __user *reg_window; +- const unsigned long *k = kbuf; +- const unsigned long __user *u = ubuf; +- unsigned long reg; ++ u32 uregs[16]; ++ u32 psr; ++ int ret; + + if (target == current) + flush_user_windows(); + +- pos /= sizeof(reg); +- count /= sizeof(reg); +- +- if (kbuf) { +- for (; count > 0 && pos < 16; count--) +- regs->u_regs[pos++] = *k++; +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (put_user(*k++, ®_window[pos++])) +- return -EFAULT; +- } +- } else { +- for (; count > 0 && pos < 16; count--) { +- if (get_user(reg, u++)) +- return -EFAULT; +- regs->u_regs[pos++] = reg; +- } +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(reg, u++) || +- put_user(reg, ®_window[pos++])) +- return -EFAULT; +- } +- } +- while (count > 0) { +- unsigned long psr; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ regs->u_regs, ++ 0, 16 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + +- if (kbuf) +- reg = *k++; +- else if (get_user(reg, u++)) ++ if (pos < 32 * sizeof(u32)) { ++ if (regwindow32_get(target, regs, uregs)) + return -EFAULT; +- +- switch (pos) { +- case 32: /* PSR */ +- psr = regs->psr; +- psr &= ~(PSR_ICC | PSR_SYSCALL); +- psr |= (reg & (PSR_ICC | PSR_SYSCALL)); +- regs->psr = psr; +- break; +- case 33: /* PC */ +- regs->pc = reg; +- break; +- case 34: /* NPC */ +- regs->npc = reg; +- break; +- case 35: /* Y */ +- regs->y = reg; +- break; +- case 36: /* WIM */ +- case 37: /* TBR */ +- break; +- default: +- goto finish; +- } +- +- pos++; +- count--; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 16 * sizeof(u32), 32 * sizeof(u32)); ++ if (ret) ++ return ret; ++ if (regwindow32_set(target, regs, uregs)) ++ return -EFAULT; ++ if (!count) ++ return 0; + } +-finish: +- pos *= sizeof(reg); +- count *= sizeof(reg); +- ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ &psr, ++ 32 * sizeof(u32), 33 * sizeof(u32)); ++ if (ret) ++ return ret; ++ regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | ++ (psr & (PSR_ICC | PSR_SYSCALL)); ++ if (!count) ++ return 0; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ ®s->pc, ++ 33 * sizeof(u32), 34 * sizeof(u32)); ++ if (ret || !count) ++ return ret; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ ®s->y, ++ 34 * sizeof(u32), 35 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, +- 38 * sizeof(reg), -1); ++ 35 * sizeof(u32), 38 * sizeof(u32)); + } + + static int fpregs32_get(struct task_struct *target, +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c +index e1d965e90e16..0c478c85e380 100644 +--- a/arch/sparc/kernel/ptrace_64.c ++++ b/arch/sparc/kernel/ptrace_64.c +@@ -571,19 +571,13 @@ static int genregs32_get(struct task_struct *target, + for (; count > 0 && pos < 32; count--) { + if (access_process_vm(target, + (unsigned long) +- ®_window[pos], ++ ®_window[pos++], + ®, sizeof(reg), + FOLL_FORCE) + != sizeof(reg)) + return -EFAULT; +- if (access_process_vm(target, +- (unsigned long) u, +- ®, sizeof(reg), +- FOLL_FORCE | FOLL_WRITE) +- != sizeof(reg)) ++ if (put_user(reg, u++)) + return -EFAULT; +- pos++; +- u++; + } + } + } +@@ -683,12 +677,7 @@ static int genregs32_set(struct task_struct *target, + } + } else { + for (; count > 0 && pos < 32; count--) { +- if (access_process_vm(target, +- (unsigned long) +- u, +- ®, sizeof(reg), +- FOLL_FORCE) +- != sizeof(reg)) ++ if (get_user(reg, u++)) + return -EFAULT; + if (access_process_vm(target, + (unsigned long) +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index 01d628ea3402..c6c4b877f3d2 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -49,16 +49,17 @@ + * Position Independent Executable (PIE) so that linker won't optimize + * R_386_GOT32X relocation to its fixed symbol address. Older + * linkers generate R_386_32 relocations against locally defined symbols, +- * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less ++ * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less + * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle + * R_386_32 relocations when relocating the kernel. To generate +- * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as ++ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as + * hidden: + */ + .hidden _bss + .hidden _ebss + .hidden _got + .hidden _egot ++ .hidden _end + + __HEAD + ENTRY(startup_32) +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index a25127916e67..7ab1c6bcc66a 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -41,6 +41,7 @@ + .hidden _ebss + .hidden _got + .hidden _egot ++ .hidden _end + + __HEAD + .code32 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 764cbf1774d9..e08866cd2287 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -291,6 +291,7 @@ + #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ + #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ + #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ + #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ + #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ + #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index b73a16a56e4f..041d2a04be1d 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -232,6 +232,7 @@ enum spectre_v2_mitigation { + enum spectre_v2_user_mitigation { + SPECTRE_V2_USER_NONE, + SPECTRE_V2_USER_STRICT, ++ SPECTRE_V2_USER_STRICT_PREFERRED, + SPECTRE_V2_USER_PRCTL, + SPECTRE_V2_USER_SECCOMP, + }; +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h +index 971830341061..82b0ff6cac97 100644 +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -711,7 +711,17 @@ extern struct movsl_mask { + * checking before using them, but you have to surround them with the + * user_access_begin/end() pair. + */ +-#define user_access_begin() __uaccess_begin() ++static __must_check inline bool user_access_begin(int type, ++ const void __user *ptr, ++ size_t len) ++{ ++ if (unlikely(!access_ok(type, ptr, len))) ++ return 0; ++ __uaccess_begin_nospec(); ++ return 1; ++} ++ ++#define user_access_begin(a, b, c) user_access_begin(a, b, c) + #define user_access_end() __uaccess_end() + + #define unsafe_put_user(x, ptr, err_label) \ +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 1de9a3c404af..245184152892 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -61,7 +61,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; + u64 __ro_after_init x86_amd_ls_cfg_base; + u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; + +-/* Control conditional STIPB in switch_to() */ ++/* Control conditional STIBP in switch_to() */ + DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); + /* Control conditional IBPB in switch_mm() */ + DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +@@ -581,7 +581,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline); + static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + SPECTRE_V2_NONE; + +-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = ++static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = ++ SPECTRE_V2_USER_NONE; ++static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = + SPECTRE_V2_USER_NONE; + + #ifdef CONFIG_RETPOLINE +@@ -633,10 +635,11 @@ enum spectre_v2_user_cmd { + }; + + static const char * const spectre_v2_user_strings[] = { +- [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", +- [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", +- [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", +- [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", ++ [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", ++ [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", ++ [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", ++ [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", ++ [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", + }; + + static const struct { +@@ -748,23 +751,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) + pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", + static_key_enabled(&switch_mm_always_ibpb) ? + "always-on" : "conditional"); ++ ++ spectre_v2_user_ibpb = mode; + } + +- /* If enhanced IBRS is enabled no STIPB required */ +- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) ++ /* ++ * If enhanced IBRS is enabled or SMT impossible, STIBP is not ++ * required. ++ */ ++ if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + /* +- * If SMT is not possible or STIBP is not available clear the STIPB +- * mode. ++ * At this point, an STIBP mode other than "off" has been set. ++ * If STIBP support is not being forced, check if STIBP always-on ++ * is preferred. + */ +- if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) ++ if (mode != SPECTRE_V2_USER_STRICT && ++ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) ++ mode = SPECTRE_V2_USER_STRICT_PREFERRED; ++ ++ /* ++ * If STIBP is not available, clear the STIBP mode. ++ */ ++ if (!boot_cpu_has(X86_FEATURE_STIBP)) + mode = SPECTRE_V2_USER_NONE; ++ ++ spectre_v2_user_stibp = mode; ++ + set_mode: +- spectre_v2_user = mode; +- /* Only print the STIBP mode when SMT possible */ +- if (smt_possible) +- pr_info("%s\n", spectre_v2_user_strings[mode]); ++ pr_info("%s\n", spectre_v2_user_strings[mode]); + } + + static const char * const spectre_v2_strings[] = { +@@ -995,10 +1011,11 @@ void arch_smt_update(void) + { + mutex_lock(&spec_ctrl_mutex); + +- switch (spectre_v2_user) { ++ switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + break; + case SPECTRE_V2_USER_STRICT: ++ case SPECTRE_V2_USER_STRICT_PREFERRED: + update_stibp_strict(); + break; + case SPECTRE_V2_USER_PRCTL: +@@ -1227,13 +1244,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + { + switch (ctrl) { + case PR_SPEC_ENABLE: +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return 0; + /* + * Indirect branch speculation is always disabled in strict +- * mode. ++ * mode. It can neither be enabled if it was force-disabled ++ * by a previous prctl call. ++ + */ +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || ++ task_spec_ib_force_disable(task)) + return -EPERM; + task_clear_spec_ib_disable(task); + task_update_spec_tif(task); +@@ -1244,9 +1267,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + * Indirect branch speculation is always allowed when + * mitigation is force disabled. + */ +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return -EPERM; +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + return 0; + task_set_spec_ib_disable(task); + if (ctrl == PR_SPEC_FORCE_DISABLE) +@@ -1277,7 +1303,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) + { + if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) + ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); +- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) + ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); + } + #endif +@@ -1306,21 +1333,24 @@ static int ib_prctl_get(struct task_struct *task) + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return PR_SPEC_NOT_AFFECTED; + +- switch (spectre_v2_user) { +- case SPECTRE_V2_USER_NONE: ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_PRCTL: +- case SPECTRE_V2_USER_SECCOMP: ++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) ++ return PR_SPEC_DISABLE; ++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) { + if (task_spec_ib_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ib_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_STRICT: +- return PR_SPEC_DISABLE; +- default: ++ } else + return PR_SPEC_NOT_AFFECTED; +- } + } + + int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +@@ -1559,11 +1589,13 @@ static char *stibp_state(void) + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return ""; + +- switch (spectre_v2_user) { ++ switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + return ", STIBP: disabled"; + case SPECTRE_V2_USER_STRICT: + return ", STIBP: forced"; ++ case SPECTRE_V2_USER_STRICT_PREFERRED: ++ return ", STIBP: always-on"; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (static_key_enabled(&switch_to_cond_stibp)) +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index d2ef967bfafb..a07b09f68e7e 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -414,28 +414,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, + u64 msr = x86_spec_ctrl_base; + bool updmsr = false; + +- /* +- * If TIF_SSBD is different, select the proper mitigation +- * method. Note that if SSBD mitigation is disabled or permanentely +- * enabled this branch can't be taken because nothing can set +- * TIF_SSBD. +- */ +- if (tif_diff & _TIF_SSBD) { +- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ /* Handle change of TIF_SSBD depending on the mitigation method. */ ++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_ssb_virt_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_core_ssb_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || +- static_cpu_has(X86_FEATURE_AMD_SSBD)) { +- msr |= ssbd_tif_to_spec_ctrl(tifn); +- updmsr = true; +- } ++ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || ++ static_cpu_has(X86_FEATURE_AMD_SSBD)) { ++ updmsr |= !!(tif_diff & _TIF_SSBD); ++ msr |= ssbd_tif_to_spec_ctrl(tifn); + } + +- /* +- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, +- * otherwise avoid the MSR write. +- */ ++ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ + if (IS_ENABLED(CONFIG_SMP) && + static_branch_unlikely(&switch_to_cond_stibp)) { + updmsr |= !!(tif_diff & _TIF_SPEC_IB); +diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h +index 898e97cf6629..320ab978fb1f 100644 +--- a/arch/x86/kernel/process.h ++++ b/arch/x86/kernel/process.h +@@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev, + if (IS_ENABLED(CONFIG_SMP)) { + /* + * Avoid __switch_to_xtra() invocation when conditional +- * STIPB is disabled and the only different bit is ++ * STIBP is disabled and the only different bit is + * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not + * in the TIF_WORK_CTXSW masks. + */ +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c +index c663d5fcff2e..b7663a1f89ee 100644 +--- a/arch/x86/kernel/reboot.c ++++ b/arch/x86/kernel/reboot.c +@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), + }, + }, ++ { /* Handle problems with rebooting on Apple MacBook6,1 */ ++ .callback = set_pci_reboot, ++ .ident = "Apple MacBook6,1", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), ++ }, ++ }, + { /* Handle problems with rebooting on Apple MacBookPro5 */ + .callback = set_pci_reboot, + .ident = "Apple MacBookPro5", +diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c +index ab0176ae985b..12f90f17f4f6 100644 +--- a/arch/x86/kernel/time.c ++++ b/arch/x86/kernel/time.c +@@ -24,10 +24,6 @@ + #include + #include + +-#ifdef CONFIG_X86_64 +-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES; +-#endif +- + unsigned long profile_pc(struct pt_regs *regs) + { + unsigned long pc = instruction_pointer(regs); +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S +index 2384a2ae5ec3..8d8e33b720b4 100644 +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -36,13 +36,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) + #ifdef CONFIG_X86_32 + OUTPUT_ARCH(i386) + ENTRY(phys_startup_32) +-jiffies = jiffies_64; + #else + OUTPUT_ARCH(i386:x86-64) + ENTRY(phys_startup_64) +-jiffies_64 = jiffies; + #endif + ++jiffies = jiffies_64; ++ + #if defined(CONFIG_X86_64) + /* + * On 64-bit, align RODATA to 2MB so we retain large page mappings for +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index e5af08b58132..7220ab210dcf 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -275,11 +275,18 @@ static bool is_executable_pte(u64 spte); + void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value) + { + BUG_ON((mmio_mask & mmio_value) != mmio_value); ++ WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); ++ WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); + shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK; + shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; + } + EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); + ++static bool is_mmio_spte(u64 spte) ++{ ++ return (spte & shadow_mmio_mask) == shadow_mmio_value; ++} ++ + static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) + { + return sp->role.ad_disabled; +@@ -287,7 +294,7 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) + + static inline bool spte_ad_enabled(u64 spte) + { +- MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); ++ MMU_WARN_ON(is_mmio_spte(spte)); + return !(spte & shadow_acc_track_value); + } + +@@ -298,13 +305,13 @@ static bool is_nx_huge_page_enabled(void) + + static inline u64 spte_shadow_accessed_mask(u64 spte) + { +- MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); ++ MMU_WARN_ON(is_mmio_spte(spte)); + return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; + } + + static inline u64 spte_shadow_dirty_mask(u64 spte) + { +- MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); ++ MMU_WARN_ON(is_mmio_spte(spte)); + return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; + } + +@@ -374,11 +381,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, + mmu_spte_set(sptep, mask); + } + +-static bool is_mmio_spte(u64 spte) +-{ +- return (spte & shadow_mmio_mask) == shadow_mmio_value; +-} +- + static gfn_t get_mmio_spte_gfn(u64 spte) + { + u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; +@@ -460,16 +462,23 @@ static void kvm_mmu_reset_all_pte_masks(void) + * If the CPU has 46 or less physical address bits, then set an + * appropriate mask to guard against L1TF attacks. Otherwise, it is + * assumed that the CPU is not vulnerable to L1TF. ++ * ++ * Some Intel CPUs address the L1 cache using more PA bits than are ++ * reported by CPUID. Use the PA width of the L1 cache when possible ++ * to achieve more effective mitigation, e.g. if system RAM overlaps ++ * the most significant bits of legal physical address space. + */ ++ shadow_nonpresent_or_rsvd_mask = 0; + low_phys_bits = boot_cpu_data.x86_phys_bits; +- if (boot_cpu_data.x86_phys_bits < +- 52 - shadow_nonpresent_or_rsvd_mask_len) { ++ if (boot_cpu_has_bug(X86_BUG_L1TF) && ++ !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= ++ 52 - shadow_nonpresent_or_rsvd_mask_len)) { ++ low_phys_bits = boot_cpu_data.x86_cache_bits ++ - shadow_nonpresent_or_rsvd_mask_len; + shadow_nonpresent_or_rsvd_mask = +- rsvd_bits(boot_cpu_data.x86_phys_bits - +- shadow_nonpresent_or_rsvd_mask_len, +- boot_cpu_data.x86_phys_bits - 1); +- low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; ++ rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); + } ++ + shadow_nonpresent_or_rsvd_lower_gfn_mask = + GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); + } +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index d63621386418..78826d123fb8 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -2757,8 +2757,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) + return NESTED_EXIT_HOST; + break; + case SVM_EXIT_EXCP_BASE + PF_VECTOR: +- /* When we're shadowing, trap PFs, but not async PF */ +- if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0) ++ /* Trap async PF even if not shadowing */ ++ if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason) + return NESTED_EXIT_HOST; + break; + default: +@@ -2847,7 +2847,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr + dst->iopm_base_pa = from->iopm_base_pa; + dst->msrpm_base_pa = from->msrpm_base_pa; + dst->tsc_offset = from->tsc_offset; +- dst->asid = from->asid; ++ /* asid not copied, it is handled manually for svm->vmcb. */ + dst->tlb_ctl = from->tlb_ctl; + dst->int_ctl = from->int_ctl; + dst->int_vector = from->int_vector; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 133b9b25e7c5..42c6ca05a613 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -8711,7 +8711,7 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) + vmcs_read32(VM_EXIT_INTR_ERROR_CODE), + KVM_ISA_VMX); + +- switch (exit_reason) { ++ switch ((u16)exit_reason) { + case EXIT_REASON_EXCEPTION_NMI: + if (is_nmi(intr_info)) + return false; +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 32bb38f6fc18..8039a951db8f 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -112,8 +112,6 @@ __ref void *alloc_low_pages(unsigned int num) + } else { + pfn = pgt_buf_end; + pgt_buf_end += num; +- printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", +- pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); + } + + for (i = 0; i < num; i++) { +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c +index 33e9b4f1ce20..c177da94fc79 100644 +--- a/arch/x86/pci/fixup.c ++++ b/arch/x86/pci/fixup.c +@@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar); + + /* + * Device [1022:7808] +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 9d53f476c517..cf56bdad2e06 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -2738,6 +2738,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, + + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_freeze_queue(q); ++ /* ++ * Sync with blk_mq_queue_tag_busy_iter. ++ */ ++ synchronize_rcu(); + + set->nr_hw_queues = nr_hw_queues; + blk_mq_update_queue_map(set); +@@ -2748,10 +2752,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, + + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_unfreeze_queue(q); +- /* +- * Sync with blk_mq_queue_tag_busy_iter. +- */ +- synchronize_rcu(); + } + + void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index 7bf1948b1223..732549ee1fe3 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -800,6 +800,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) + "acpi_cppc"); + if (ret) { + per_cpu(cpc_desc_ptr, pr->id) = NULL; ++ kobject_put(&cpc_ptr->kobj); + goto out_free; + } + +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c +index afb1bc104a6f..6681174caf84 100644 +--- a/drivers/acpi/device_pm.c ++++ b/drivers/acpi/device_pm.c +@@ -172,7 +172,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) + * possibly drop references to the power resources in use. + */ + state = ACPI_STATE_D3_HOT; +- /* If _PR3 is not available, use D3hot as the target state. */ ++ /* If D3cold is not supported, use D3hot as the target state. */ + if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) + target_state = state; + } else if (!device->power.states[state].flags.valid) { +diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c +index 46f060356a22..339e6d3dba7c 100644 +--- a/drivers/acpi/evged.c ++++ b/drivers/acpi/evged.c +@@ -82,6 +82,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, + struct resource r; + struct acpi_resource_irq *p = &ares->data.irq; + struct acpi_resource_extended_irq *pext = &ares->data.extended_irq; ++ char ev_name[5]; ++ u8 trigger; + + if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) + return AE_OK; +@@ -90,14 +92,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, + dev_err(dev, "unable to parse IRQ resource\n"); + return AE_ERROR; + } +- if (ares->type == ACPI_RESOURCE_TYPE_IRQ) ++ if (ares->type == ACPI_RESOURCE_TYPE_IRQ) { + gsi = p->interrupts[0]; +- else ++ trigger = p->triggering; ++ } else { + gsi = pext->interrupts[0]; ++ trigger = pext->triggering; ++ } + + irq = r.start; + +- if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) { ++ switch (gsi) { ++ case 0 ... 255: ++ sprintf(ev_name, "_%c%02hhX", ++ trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); ++ ++ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) ++ break; ++ /* fall through */ ++ default: ++ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) ++ break; ++ + dev_err(dev, "cannot locate _EVT method\n"); + return AE_ERROR; + } +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 2eddbb1fae6a..8bc1a778b3a4 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -920,12 +920,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) + + if (buffer.length && package + && package->type == ACPI_TYPE_PACKAGE +- && package->package.count) { +- int err = acpi_extract_power_resources(package, 0, +- &ps->resources); +- if (!err) +- device->power.flags.power_resources = 1; +- } ++ && package->package.count) ++ acpi_extract_power_resources(package, 0, &ps->resources); ++ + ACPI_FREE(buffer.pointer); + } + +@@ -972,14 +969,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) + acpi_bus_init_power_state(device, i); + + INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); +- if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) +- device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; + +- /* Set defaults for D0 and D3hot states (always valid) */ ++ /* Set the defaults for D0 and D3hot (always supported). */ + device->power.states[ACPI_STATE_D0].flags.valid = 1; + device->power.states[ACPI_STATE_D0].power = 100; + device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; + ++ /* ++ * Use power resources only if the D0 list of them is populated, because ++ * some platforms may provide _PR3 only to indicate D3cold support and ++ * in those cases the power resources list returned by it may be bogus. ++ */ ++ if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { ++ device->power.flags.power_resources = 1; ++ /* ++ * D3cold is supported if the D3hot list of power resources is ++ * not empty. ++ */ ++ if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) ++ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; ++ } ++ + if (acpi_bus_init_power(device)) + device->flags.power_manageable = 0; + } +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c +index 0fd57bf33524..a663014cfa52 100644 +--- a/drivers/acpi/sysfs.c ++++ b/drivers/acpi/sysfs.c +@@ -997,8 +997,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, + + error = kobject_init_and_add(&hotplug->kobj, + &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); +- if (error) ++ if (error) { ++ kobject_put(&hotplug->kobj); + goto err_out; ++ } + + kobject_uevent(&hotplug->kobj, KOBJ_ADD); + return; +diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c +index dde7caac7f9f..7516ba981b63 100644 +--- a/drivers/char/agp/intel-gtt.c ++++ b/drivers/char/agp/intel-gtt.c +@@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr, + unsigned int flags) + { + intel_private.driver->write_entry(addr, pg, flags); ++ readl(intel_private.gtt + pg); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); + } +@@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, + j++; + } + } +- wmb(); ++ readl(intel_private.gtt + j - 1); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); + } +@@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void) + + static void i9xx_chipset_flush(void) + { ++ wmb(); + if (intel_private.i9xx_flush_page) + writel(1, intel_private.i9xx_flush_page); + } +diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c +index 1f5f734e4919..a018199575e3 100644 +--- a/drivers/clocksource/dw_apb_timer.c ++++ b/drivers/clocksource/dw_apb_timer.c +@@ -225,7 +225,8 @@ static int apbt_next_event(unsigned long delta, + /** + * dw_apb_clockevent_init() - use an APB timer as a clock_event_device + * +- * @cpu: The CPU the events will be targeted at. ++ * @cpu: The CPU the events will be targeted at or -1 if CPU affiliation ++ * isn't required. + * @name: The name used for the timer and the IRQ for it. + * @rating: The rating to give the timer. + * @base: I/O base for the timer registers. +@@ -260,7 +261,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, + dw_ced->ced.max_delta_ticks = 0x7fffffff; + dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); + dw_ced->ced.min_delta_ticks = 5000; +- dw_ced->ced.cpumask = cpumask_of(cpu); ++ dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu); + dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; + dw_ced->ced.set_state_shutdown = apbt_shutdown; +diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c +index 69866cd8f4bb..3e4d0e5733d3 100644 +--- a/drivers/clocksource/dw_apb_timer_of.c ++++ b/drivers/clocksource/dw_apb_timer_of.c +@@ -146,10 +146,6 @@ static int num_called; + static int __init dw_apb_timer_init(struct device_node *timer) + { + switch (num_called) { +- case 0: +- pr_debug("%s: found clockevent timer\n", __func__); +- add_clockevent(timer); +- break; + case 1: + pr_debug("%s: found clocksource timer\n", __func__); + add_clocksource(timer); +@@ -160,6 +156,8 @@ static int __init dw_apb_timer_init(struct device_node *timer) + #endif + break; + default: ++ pr_debug("%s: found clockevent timer\n", __func__); ++ add_clockevent(timer); + break; + } + +diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c +index ae948b1da93a..909bd2255978 100644 +--- a/drivers/cpuidle/sysfs.c ++++ b/drivers/cpuidle/sysfs.c +@@ -414,7 +414,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) + ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, + &kdev->kobj, "state%d", i); + if (ret) { +- kfree(kobj); ++ kobject_put(&kobj->kobj); + goto error_state; + } + kobject_uevent(&kobj->kobj, KOBJ_ADD); +@@ -544,7 +544,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) + ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, + &kdev->kobj, "driver"); + if (ret) { +- kfree(kdrv); ++ kobject_put(&kdrv->kobj); + return ret; + } + +@@ -638,7 +638,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) + error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj, + "cpuidle"); + if (error) { +- kfree(kdev); ++ kobject_put(&kdev->kobj); + return error; + } + +diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c +index fee7cb2ce747..a81f3c7e941d 100644 +--- a/drivers/crypto/cavium/nitrox/nitrox_main.c ++++ b/drivers/crypto/cavium/nitrox/nitrox_main.c +@@ -183,7 +183,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev) + + struct nitrox_device *nitrox_get_first_device(void) + { +- struct nitrox_device *ndev = NULL; ++ struct nitrox_device *ndev; + + mutex_lock(&devlist_lock); + list_for_each_entry(ndev, &ndevlist, list) { +@@ -191,7 +191,7 @@ struct nitrox_device *nitrox_get_first_device(void) + break; + } + mutex_unlock(&devlist_lock); +- if (!ndev) ++ if (&ndev->list == &ndevlist) + return NULL; + + refcount_inc(&ndev->refcnt); +diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig +index 6d626606b9c5..898dcf3200c3 100644 +--- a/drivers/crypto/ccp/Kconfig ++++ b/drivers/crypto/ccp/Kconfig +@@ -8,10 +8,9 @@ config CRYPTO_DEV_CCP_DD + config CRYPTO_DEV_SP_CCP + bool "Cryptographic Coprocessor device" + default y +- depends on CRYPTO_DEV_CCP_DD ++ depends on CRYPTO_DEV_CCP_DD && DMADEVICES + select HW_RANDOM + select DMA_ENGINE +- select DMADEVICES + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help +diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c +index 8d39f3a07bf8..99c3827855c7 100644 +--- a/drivers/crypto/chelsio/chcr_algo.c ++++ b/drivers/crypto/chelsio/chcr_algo.c +@@ -2201,7 +2201,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, + unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; + unsigned int c_id = chcrctx->dev->rx_channel_id; + unsigned int ccm_xtra; +- unsigned char tag_offset = 0, auth_offset = 0; ++ unsigned int tag_offset = 0, auth_offset = 0; + unsigned int assoclen; + + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c +index fef2b306cdee..6c8a03a1132f 100644 +--- a/drivers/crypto/talitos.c ++++ b/drivers/crypto/talitos.c +@@ -2636,7 +2636,6 @@ static struct talitos_alg_template driver_algs[] = { + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, +- .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | +@@ -2670,6 +2669,7 @@ static struct talitos_alg_template driver_algs[] = { + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, + .setkey = ablkcipher_aes_setkey, + } + }, +diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c +index e2231a1a05a1..e6b889ce395e 100644 +--- a/drivers/crypto/virtio/virtio_crypto_algs.c ++++ b/drivers/crypto/virtio/virtio_crypto_algs.c +@@ -354,13 +354,18 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, + int err; + unsigned long flags; + struct scatterlist outhdr, iv_sg, status_sg, **sgs; +- int i; + u64 dst_len; + unsigned int num_out = 0, num_in = 0; + int sg_total; + uint8_t *iv; ++ struct scatterlist *sg; + + src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (src_nents < 0) { ++ pr_err("Invalid number of src SG.\n"); ++ return src_nents; ++ } ++ + dst_nents = sg_nents(req->dst); + + pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n", +@@ -406,6 +411,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, + goto free; + } + ++ dst_len = min_t(unsigned int, req->nbytes, dst_len); + pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n", + req->nbytes, dst_len); + +@@ -441,12 +447,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, + vc_sym_req->iv = iv; + + /* Source data */ +- for (i = 0; i < src_nents; i++) +- sgs[num_out++] = &req->src[i]; ++ for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--) ++ sgs[num_out++] = sg; + + /* Destination data */ +- for (i = 0; i < dst_nents; i++) +- sgs[num_out + num_in++] = &req->dst[i]; ++ for (sg = req->dst; sg; sg = sg_next(sg)) ++ sgs[num_out + num_in++] = sg; + + /* Status */ + sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status)); +@@ -569,10 +575,11 @@ static void virtio_crypto_ablkcipher_finalize_req( + struct ablkcipher_request *req, + int err) + { +- crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine, +- req, err); + kzfree(vc_sym_req->iv); + virtcrypto_clear_request(&vc_sym_req->base); ++ ++ crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine, ++ req, err); + } + + static struct crypto_alg virtio_crypto_algs[] = { { +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index 1c65f5ac4368..6529addd1e82 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -586,8 +586,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, + NULL, "%s", short_name); + kfree(short_name); +- if (ret) ++ if (ret) { ++ kobject_put(&new_var->kobj); + return ret; ++ } + + kobject_uevent(&new_var->kobj, KOBJ_ADD); + if (efivar_entry_add(new_var, &efivar_sysfs_list)) { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index 4894d8a87c04..ae23f7e0290c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -728,7 +728,6 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) + + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + drm_kms_helper_poll_disable(drm_dev); +- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); + + ret = amdgpu_device_suspend(drm_dev, false, false); + pci_save_state(pdev); +@@ -765,7 +764,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) + + ret = amdgpu_device_resume(drm_dev, false, false); + drm_kms_helper_poll_enable(drm_dev); +- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); + drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; + return 0; + } +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +index 67469c26bae8..45a027d7a1e4 100644 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +@@ -20,13 +20,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, + { + switch (fs) { + case 32000: +- *n = 4096; ++ case 48000: ++ case 96000: ++ case 192000: ++ *n = fs * 128 / 1000; + break; + case 44100: +- *n = 6272; +- break; +- case 48000: +- *n = 6144; ++ case 88200: ++ case 176400: ++ *n = fs * 128 / 900; + break; + } + +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +index d99d05a91032..bf13299ebb55 100644 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +@@ -1566,7 +1566,9 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) + * happened we would make the mistake of assuming that the + * relocations were valid. + */ +- user_access_begin(); ++ if (!user_access_begin(VERIFY_WRITE, urelocs, size)) ++ goto end_user; ++ + for (copied = 0; copied < nreloc; copied++) + unsafe_put_user(-1, + &urelocs[copied].presumed_offset, +@@ -2601,6 +2603,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, + struct drm_i915_gem_execbuffer2 *args = data; + struct drm_i915_gem_exec_object2 *exec2_list; + struct drm_syncobj **fences = NULL; ++ const size_t count = args->buffer_count; + int err; + + if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) { +@@ -2649,7 +2652,17 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, + unsigned int i; + + /* Copy the new buffer offsets back to the user's exec list. */ +- user_access_begin(); ++ /* ++ * Note: count * sizeof(*user_exec_list) does not overflow, ++ * because we checked 'count' in check_buffer_count(). ++ * ++ * And this range already got effectively checked earlier ++ * when we did the "copy_from_user()" above. ++ */ ++ if (!user_access_begin(VERIFY_WRITE, user_exec_list, ++ count * sizeof(*user_exec_list))) ++ goto end_user; ++ + for (i = 0; i < args->buffer_count; i++) { + if (!(exec2_list[i].offset & UPDATE)) + continue; +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c +index 70a8d0b0c4f1..d00524a5d7f0 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c +@@ -754,7 +754,6 @@ nouveau_pmops_runtime_suspend(struct device *dev) + } + + drm_kms_helper_poll_disable(drm_dev); +- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); + nouveau_switcheroo_optimus_dsm(); + ret = nouveau_do_suspend(drm_dev, true); + pci_save_state(pdev); +@@ -789,7 +788,6 @@ nouveau_pmops_runtime_resume(struct device *dev) + + /* do magic */ + nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); +- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); + drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; + + /* Monitors may have been connected / disconnected during suspend */ +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c +index f4becad0a78c..f6908e2f9e55 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.c ++++ b/drivers/gpu/drm/radeon/radeon_drv.c +@@ -424,7 +424,6 @@ static int radeon_pmops_runtime_suspend(struct device *dev) + + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + drm_kms_helper_poll_disable(drm_dev); +- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); + + ret = radeon_suspend_kms(drm_dev, false, false, false); + pci_save_state(pdev); +@@ -461,7 +460,6 @@ static int radeon_pmops_runtime_resume(struct device *dev) + + ret = radeon_resume_kms(drm_dev, false, false); + drm_kms_helper_poll_enable(drm_dev); +- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); + drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; + return 0; + } +diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c +index 3cd153c6d271..f188c85b3b7a 100644 +--- a/drivers/gpu/vga/vga_switcheroo.c ++++ b/drivers/gpu/vga/vga_switcheroo.c +@@ -92,7 +92,8 @@ + * struct vga_switcheroo_client - registered client + * @pdev: client pci device + * @fb_info: framebuffer to which console is remapped on switching +- * @pwr_state: current power state ++ * @pwr_state: current power state if manual power control is used. ++ * For driver power control, call vga_switcheroo_pwr_state(). + * @ops: client callbacks + * @id: client identifier. Determining the id requires the handler, + * so gpus are initially assigned VGA_SWITCHEROO_UNKNOWN_ID +@@ -104,8 +105,7 @@ + * @list: client list + * + * Registered client. A client can be either a GPU or an audio device on a GPU. +- * For audio clients, the @fb_info, @active and @driver_power_control members +- * are bogus. ++ * For audio clients, the @fb_info and @active members are bogus. + */ + struct vga_switcheroo_client { + struct pci_dev *pdev; +@@ -331,8 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client); + * @ops: client callbacks + * @id: client identifier + * +- * Register audio client (audio device on a GPU). The power state of the +- * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer() ++ * Register audio client (audio device on a GPU). The client is assumed ++ * to use runtime PM. Beforehand, vga_switcheroo_client_probe_defer() + * shall be called to ensure that all prerequisites are met. + * + * Return: 0 on success, -ENOMEM on memory allocation error. +@@ -341,7 +341,7 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev, + const struct vga_switcheroo_client_ops *ops, + enum vga_switcheroo_client_id id) + { +- return register_client(pdev, ops, id | ID_BIT_AUDIO, false, false); ++ return register_client(pdev, ops, id | ID_BIT_AUDIO, false, true); + } + EXPORT_SYMBOL(vga_switcheroo_register_audio_client); + +@@ -406,6 +406,19 @@ bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) + } + EXPORT_SYMBOL(vga_switcheroo_client_probe_defer); + ++static enum vga_switcheroo_state ++vga_switcheroo_pwr_state(struct vga_switcheroo_client *client) ++{ ++ if (client->driver_power_control) ++ if (pm_runtime_enabled(&client->pdev->dev) && ++ pm_runtime_active(&client->pdev->dev)) ++ return VGA_SWITCHEROO_ON; ++ else ++ return VGA_SWITCHEROO_OFF; ++ else ++ return client->pwr_state; ++} ++ + /** + * vga_switcheroo_get_client_state() - obtain power state of a given client + * @pdev: client pci device +@@ -425,7 +438,7 @@ enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *pdev) + if (!client) + ret = VGA_SWITCHEROO_NOT_FOUND; + else +- ret = client->pwr_state; ++ ret = vga_switcheroo_pwr_state(client); + mutex_unlock(&vgasr_mutex); + return ret; + } +@@ -598,7 +611,7 @@ static int vga_switcheroo_show(struct seq_file *m, void *v) + client_is_vga(client) ? "" : "-Audio", + client->active ? '+' : ' ', + client->driver_power_control ? "Dyn" : "", +- client->pwr_state ? "Pwr" : "Off", ++ vga_switcheroo_pwr_state(client) ? "Pwr" : "Off", + pci_name(client->pdev)); + i++; + } +@@ -641,10 +654,8 @@ static void set_audio_state(enum vga_switcheroo_client_id id, + struct vga_switcheroo_client *client; + + client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO); +- if (client && client->pwr_state != state) { ++ if (client) + client->ops->set_gpu_state(client->pdev, state); +- client->pwr_state = state; +- } + } + + /* stage one happens before delay */ +@@ -656,7 +667,7 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) + if (!active) + return 0; + +- if (new_client->pwr_state == VGA_SWITCHEROO_OFF) ++ if (vga_switcheroo_pwr_state(new_client) == VGA_SWITCHEROO_OFF) + vga_switchon(new_client); + + vga_set_default_device(new_client->pdev); +@@ -695,7 +706,7 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) + if (new_client->ops->reprobe) + new_client->ops->reprobe(new_client->pdev); + +- if (active->pwr_state == VGA_SWITCHEROO_ON) ++ if (vga_switcheroo_pwr_state(active) == VGA_SWITCHEROO_ON) + vga_switchoff(active); + + set_audio_state(new_client->id, VGA_SWITCHEROO_ON); +@@ -939,11 +950,6 @@ EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); + * Specifying nouveau.runpm=0, radeon.runpm=0 or amdgpu.runpm=0 on the kernel + * command line disables it. + * +- * When the driver decides to power up or down, it notifies vga_switcheroo +- * thereof so that it can (a) power the audio device on the GPU up or down, +- * and (b) update its internal power state representation for the device. +- * This is achieved by vga_switcheroo_set_dynamic_switch(). +- * + * After the GPU has been suspended, the handler needs to be called to cut + * power to the GPU. Likewise it needs to reinstate power before the GPU + * can resume. This is achieved by vga_switcheroo_init_domain_pm_ops(), +@@ -951,8 +957,9 @@ EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); + * calls to the handler. + * + * When the audio device resumes, the GPU needs to be woken. This is achieved +- * by vga_switcheroo_init_domain_pm_optimus_hdmi_audio(), which augments the +- * audio device's resume function. ++ * by a PCI quirk which calls device_link_add() to declare a dependency on the ++ * GPU. That way, the GPU is kept awake whenever and as long as the audio ++ * device is in use. + * + * On muxed machines, if the mux is initially switched to the discrete GPU, + * the user ends up with a black screen when the GPU powers down after boot. +@@ -978,35 +985,6 @@ static void vga_switcheroo_power_switch(struct pci_dev *pdev, + vgasr_priv.handler->power_state(client->id, state); + } + +-/** +- * vga_switcheroo_set_dynamic_switch() - helper for driver power control +- * @pdev: client pci device +- * @dynamic: new power state +- * +- * Helper for GPUs whose power state is controlled by the driver's runtime pm. +- * When the driver decides to power up or down, it notifies vga_switcheroo +- * thereof using this helper so that it can (a) power the audio device on +- * the GPU up or down, and (b) update its internal power state representation +- * for the device. +- */ +-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, +- enum vga_switcheroo_state dynamic) +-{ +- struct vga_switcheroo_client *client; +- +- mutex_lock(&vgasr_mutex); +- client = find_client_from_pci(&vgasr_priv.clients, pdev); +- if (!client || !client->driver_power_control) { +- mutex_unlock(&vgasr_mutex); +- return; +- } +- +- client->pwr_state = dynamic; +- set_audio_state(client->id, dynamic); +- mutex_unlock(&vgasr_mutex); +-} +-EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch); +- + /* switcheroo power domain */ + static int vga_switcheroo_runtime_suspend(struct device *dev) + { +@@ -1076,69 +1054,3 @@ void vga_switcheroo_fini_domain_pm_ops(struct device *dev) + dev_pm_domain_set(dev, NULL); + } + EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops); +- +-static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) +-{ +- struct pci_dev *pdev = to_pci_dev(dev); +- struct vga_switcheroo_client *client; +- struct device *video_dev = NULL; +- int ret; +- +- /* we need to check if we have to switch back on the video +- * device so the audio device can come back +- */ +- mutex_lock(&vgasr_mutex); +- list_for_each_entry(client, &vgasr_priv.clients, list) { +- if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && +- client_is_vga(client)) { +- video_dev = &client->pdev->dev; +- break; +- } +- } +- mutex_unlock(&vgasr_mutex); +- +- if (video_dev) { +- ret = pm_runtime_get_sync(video_dev); +- if (ret && ret != 1) +- return ret; +- } +- ret = dev->bus->pm->runtime_resume(dev); +- +- /* put the reference for the gpu */ +- if (video_dev) { +- pm_runtime_mark_last_busy(video_dev); +- pm_runtime_put_autosuspend(video_dev); +- } +- return ret; +-} +- +-/** +- * vga_switcheroo_init_domain_pm_optimus_hdmi_audio() - helper for driver +- * power control +- * @dev: audio client device +- * @domain: power domain +- * +- * Helper for GPUs whose power state is controlled by the driver's runtime pm. +- * When the audio device resumes, the GPU needs to be woken. This helper +- * augments the audio device's resume function to do that. +- * +- * Return: 0 on success, -EINVAL if no power management operations are +- * defined for this device. +- */ +-int +-vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, +- struct dev_pm_domain *domain) +-{ +- /* copy over all the bus versions */ +- if (dev->bus && dev->bus->pm) { +- domain->ops = *dev->bus->pm; +- domain->ops.runtime_resume = +- vga_switcheroo_runtime_resume_hdmi_audio; +- +- dev_pm_domain_set(dev, domain); +- return 0; +- } +- dev_pm_domain_set(dev, NULL); +- return -EINVAL; +-} +-EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio); +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index 2bca84f4c2b2..85db184321f7 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = { + "LEN005b", /* P50 */ + "LEN005e", /* T560 */ + "LEN006c", /* T470s */ ++ "LEN007a", /* T470s */ + "LEN0071", /* T480 */ + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ + "LEN0073", /* X1 Carbon G5 (Elantech) */ +diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c +index 96d16fca68b2..088ca17a843d 100644 +--- a/drivers/macintosh/windfarm_pm112.c ++++ b/drivers/macintosh/windfarm_pm112.c +@@ -133,14 +133,6 @@ static int create_cpu_loop(int cpu) + s32 tmax; + int fmin; + +- /* Get PID params from the appropriate SAT */ +- hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); +- if (hdr == NULL) { +- printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); +- return -EINVAL; +- } +- piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; +- + /* Get FVT params to get Tmax; if not found, assume default */ + hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL); + if (hdr) { +@@ -153,6 +145,16 @@ static int create_cpu_loop(int cpu) + if (tmax < cpu_all_tmax) + cpu_all_tmax = tmax; + ++ kfree(hdr); ++ ++ /* Get PID params from the appropriate SAT */ ++ hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); ++ if (hdr == NULL) { ++ printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); ++ return -EINVAL; ++ } ++ piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; ++ + /* + * Darwin has a minimum fan speed of 1000 rpm for the 4-way and + * 515 for the 2-way. That appears to be overkill, so for now, +@@ -175,6 +177,9 @@ static int create_cpu_loop(int cpu) + pid.min = fmin; + + wf_cpu_pid_init(&cpu_pid[cpu], &pid); ++ ++ kfree(hdr); ++ + return 0; + } + +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index d9a67759fdb5..5e38ceb36000 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -3088,7 +3088,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) + limits->max_segment_size = PAGE_SIZE; + + limits->logical_block_size = +- max_t(unsigned short, limits->logical_block_size, cc->sector_size); ++ max_t(unsigned, limits->logical_block_size, cc->sector_size); + limits->physical_block_size = + max_t(unsigned, limits->physical_block_size, cc->sector_size); + limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); +diff --git a/drivers/md/md.c b/drivers/md/md.c +index b942c74f1ce8..948344531baf 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -7411,7 +7411,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) + */ + mddev_put(mddev); + /* Wait until bdev->bd_disk is definitely gone */ +- flush_workqueue(md_misc_wq); ++ if (work_pending(&mddev->del_work)) ++ flush_workqueue(md_misc_wq); + /* Then retry the open from the top */ + return -ERESTARTSYS; + } +diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c +index 0d7d687aeea0..061b7824f698 100644 +--- a/drivers/media/cec/cec-adap.c ++++ b/drivers/media/cec/cec-adap.c +@@ -1624,6 +1624,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap, + unsigned j; + + log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID; ++ if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { ++ dprintk(1, "unknown logical address type\n"); ++ return -EINVAL; ++ } + if (type_mask & (1 << log_addrs->log_addr_type[i])) { + dprintk(1, "duplicate logical address type\n"); + return -EINVAL; +@@ -1644,10 +1648,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap, + dprintk(1, "invalid primary device type\n"); + return -EINVAL; + } +- if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { +- dprintk(1, "unknown logical address type\n"); +- return -EINVAL; +- } + for (j = 0; j < feature_sz; j++) { + if ((features[j] & 0x80) == 0) { + if (op_is_dev_features) +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c +index 69f564b0837a..eb0331b8a583 100644 +--- a/drivers/media/i2c/ov5640.c ++++ b/drivers/media/i2c/ov5640.c +@@ -2298,8 +2298,8 @@ static int ov5640_probe(struct i2c_client *client, + free_ctrls: + v4l2_ctrl_handler_free(&sensor->ctrls.handler); + entity_cleanup: +- mutex_destroy(&sensor->lock); + media_entity_cleanup(&sensor->sd.entity); ++ mutex_destroy(&sensor->lock); + return ret; + } + +@@ -2309,9 +2309,9 @@ static int ov5640_remove(struct i2c_client *client) + struct ov5640_dev *sensor = to_ov5640_dev(sd); + + v4l2_async_unregister_subdev(&sensor->sd); +- mutex_destroy(&sensor->lock); + media_entity_cleanup(&sensor->sd.entity); + v4l2_ctrl_handler_free(&sensor->ctrls.handler); ++ mutex_destroy(&sensor->lock); + + return 0; + } +diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c +index 2988031d285d..0047d144c932 100644 +--- a/drivers/media/platform/rcar-fcp.c ++++ b/drivers/media/platform/rcar-fcp.c +@@ -12,6 +12,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -24,6 +25,7 @@ + struct rcar_fcp_device { + struct list_head list; + struct device *dev; ++ struct device_dma_parameters dma_parms; + }; + + static LIST_HEAD(fcp_devices); +@@ -139,6 +141,9 @@ static int rcar_fcp_probe(struct platform_device *pdev) + + fcp->dev = &pdev->dev; + ++ fcp->dev->dma_parms = &fcp->dma_parms; ++ dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32)); ++ + pm_runtime_enable(&pdev->dev); + + mutex_lock(&fcp_lock); +diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c +index e35b1faf0ddc..c826997f5433 100644 +--- a/drivers/media/tuners/si2157.c ++++ b/drivers/media/tuners/si2157.c +@@ -84,24 +84,23 @@ static int si2157_init(struct dvb_frontend *fe) + struct si2157_cmd cmd; + const struct firmware *fw; + const char *fw_name; +- unsigned int uitmp, chip_id; ++ unsigned int chip_id, xtal_trim; + + dev_dbg(&client->dev, "\n"); + +- /* Returned IF frequency is garbage when firmware is not running */ +- memcpy(cmd.args, "\x15\x00\x06\x07", 4); ++ /* Try to get Xtal trim property, to verify tuner still running */ ++ memcpy(cmd.args, "\x15\x00\x04\x02", 4); + cmd.wlen = 4; + cmd.rlen = 4; + ret = si2157_cmd_execute(client, &cmd); +- if (ret) +- goto err; + +- uitmp = cmd.args[2] << 0 | cmd.args[3] << 8; +- dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp); ++ xtal_trim = cmd.args[2] | (cmd.args[3] << 8); + +- if (uitmp == dev->if_frequency / 1000) ++ if (ret == 0 && xtal_trim < 16) + goto warm; + ++ dev->if_frequency = 0; /* we no longer know current tuner state */ ++ + /* power up */ + if (dev->chiptype == SI2157_CHIPTYPE_SI2146) { + memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9); +diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c +index a0057641cc86..c55180912c3a 100644 +--- a/drivers/media/usb/dvb-usb/dibusb-mb.c ++++ b/drivers/media/usb/dvb-usb/dibusb-mb.c +@@ -84,7 +84,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap) + + if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) { + err("tuner i2c write failed."); +- ret = -EREMOTEIO; ++ return -EREMOTEIO; + } + + if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl) +diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c +index c618764480c6..a19c01083124 100644 +--- a/drivers/media/usb/go7007/snd-go7007.c ++++ b/drivers/media/usb/go7007/snd-go7007.c +@@ -243,22 +243,18 @@ int go7007_snd_init(struct go7007 *go) + gosnd->capturing = 0; + ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, + &gosnd->card); +- if (ret < 0) { +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_snd; ++ + ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, + &go7007_snd_device_ops); +- if (ret < 0) { +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; ++ + ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); +- if (ret < 0) { +- snd_card_free(gosnd->card); +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; ++ + strlcpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); + strlcpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver)); + strlcpy(gosnd->card->longname, gosnd->card->shortname, +@@ -269,11 +265,8 @@ int go7007_snd_init(struct go7007 *go) + &go7007_snd_capture_ops); + + ret = snd_card_register(gosnd->card); +- if (ret < 0) { +- snd_card_free(gosnd->card); +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; + + gosnd->substream = NULL; + go->snd_context = gosnd; +@@ -281,6 +274,12 @@ int go7007_snd_init(struct go7007 *go) + ++dev; + + return 0; ++ ++free_card: ++ snd_card_free(gosnd->card); ++free_snd: ++ kfree(gosnd); ++ return ret; + } + EXPORT_SYMBOL(go7007_snd_init); + +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c +index 7ee8c9082021..7568cea55922 100644 +--- a/drivers/mmc/core/sdio.c ++++ b/drivers/mmc/core/sdio.c +@@ -717,9 +717,8 @@ try_again: + /* Retry init sequence, but without R4_18V_PRESENT. */ + retries = 0; + goto try_again; +- } else { +- goto remove; + } ++ return err; + } + + /* +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c +index 8c0b80a54e4d..6d1ac9443eb2 100644 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c +@@ -79,7 +79,7 @@ + #define ESDHC_STD_TUNING_EN (1 << 24) + /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ + #define ESDHC_TUNING_START_TAP_DEFAULT 0x1 +-#define ESDHC_TUNING_START_TAP_MASK 0xff ++#define ESDHC_TUNING_START_TAP_MASK 0x7f + #define ESDHC_TUNING_STEP_MASK 0x00070000 + #define ESDHC_TUNING_STEP_SHIFT 16 + +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c +index 192844b50c69..75cf66ffc705 100644 +--- a/drivers/mmc/host/sdhci-msm.c ++++ b/drivers/mmc/host/sdhci-msm.c +@@ -860,6 +860,12 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) + /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ + msm_host->use_cdr = true; + ++ /* ++ * Clear tuning_done flag before tuning to ensure proper ++ * HS400 settings. ++ */ ++ msm_host->tuning_done = 0; ++ + /* + * For HS400 tuning in HS200 timing requires: + * - select MCLK/2 in VENDOR_SPEC +@@ -1162,7 +1168,9 @@ static const struct sdhci_pltfm_data sdhci_msm_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_NO_CARD_NO_RESET | + SDHCI_QUIRK_SINGLE_POWER_WRITE | +- SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, ++ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | ++ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, ++ + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &sdhci_msm_ops, + }; +diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c +index a838bf5480d8..a863a345fc59 100644 +--- a/drivers/mmc/host/via-sdmmc.c ++++ b/drivers/mmc/host/via-sdmmc.c +@@ -323,6 +323,8 @@ struct via_crdr_mmc_host { + /* some devices need a very long delay for power to stabilize */ + #define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001 + ++#define VIA_CMD_TIMEOUT_MS 1000 ++ + static const struct pci_device_id via_ids[] = { + {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, +@@ -555,14 +557,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host, + { + void __iomem *addrbase; + struct mmc_data *data; ++ unsigned int timeout_ms; + u32 cmdctrl = 0; + + WARN_ON(host->cmd); + + data = cmd->data; +- mod_timer(&host->timer, jiffies + HZ); + host->cmd = cmd; + ++ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS; ++ mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms)); ++ + /*Command index*/ + cmdctrl = cmd->opcode << 8; + +diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c +index 2a978d9832a7..f8d793b15a7a 100644 +--- a/drivers/mtd/nand/brcmnand/brcmnand.c ++++ b/drivers/mtd/nand/brcmnand/brcmnand.c +@@ -911,11 +911,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, + if (!section) { + /* + * Small-page NAND use byte 6 for BBI while large-page +- * NAND use byte 0. ++ * NAND use bytes 0 and 1. + */ +- if (cfg->page_size > 512) +- oobregion->offset++; +- oobregion->length--; ++ if (cfg->page_size > 512) { ++ oobregion->offset += 2; ++ oobregion->length -= 2; ++ } else { ++ oobregion->length--; ++ } + } + } + +diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c +index a47a7e4bd25a..d69e5bae541e 100644 +--- a/drivers/mtd/nand/pasemi_nand.c ++++ b/drivers/mtd/nand/pasemi_nand.c +@@ -163,7 +163,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) + if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { + dev_err(dev, "Unable to register MTD device\n"); + err = -ENODEV; +- goto out_lpc; ++ goto out_cleanup_nand; + } + + dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, +@@ -171,6 +171,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev) + + return 0; + ++ out_cleanup_nand: ++ nand_cleanup(chip); + out_lpc: + release_region(lpcctl, 4); + out_ior: +diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c +index daed57d3d209..2b994bbf85ca 100644 +--- a/drivers/net/can/usb/kvaser_usb.c ++++ b/drivers/net/can/usb/kvaser_usb.c +@@ -791,7 +791,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, + if (!urb) + return -ENOMEM; + +- buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); ++ buf = kzalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; +@@ -1459,7 +1459,7 @@ static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv) + struct kvaser_msg *msg; + int rc; + +- msg = kmalloc(sizeof(*msg), GFP_KERNEL); ++ msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + +@@ -1592,7 +1592,7 @@ static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv) + struct kvaser_msg *msg; + int rc; + +- msg = kmalloc(sizeof(*msg), GFP_KERNEL); ++ msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + +diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c +index 3143de45baaa..c458b81ba63a 100644 +--- a/drivers/net/ethernet/allwinner/sun4i-emac.c ++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c +@@ -433,7 +433,7 @@ static void emac_timeout(struct net_device *dev) + /* Hardware start transmission. + * Send a packet to media from the upper layer. + */ +-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) ++static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct emac_board_info *db = netdev_priv(dev); + unsigned long channel; +@@ -441,7 +441,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) + + channel = db->tx_fifo_stat & 3; + if (channel == 3) +- return 1; ++ return NETDEV_TX_BUSY; + + channel = (channel == 1 ? 1 : 0); + +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c +index dc9149a32f41..bb1710ff910a 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_com.c ++++ b/drivers/net/ethernet/amazon/ena/ena_com.c +@@ -2131,6 +2131,9 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + rss->hash_key; + int rc; + ++ if (unlikely(!func)) ++ return -EINVAL; ++ + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, +@@ -2143,8 +2146,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + if (rss->hash_func) + rss->hash_func--; + +- if (func) +- *func = rss->hash_func; ++ *func = rss->hash_func; + + if (key) + memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index 38391230ca86..7d3cbbd88a00 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -72,6 +72,9 @@ + #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + ++/* Forward declarations */ ++static void bcmgenet_set_rx_mode(struct net_device *dev); ++ + static inline void bcmgenet_writel(u32 value, void __iomem *offset) + { + /* MIPS chips strapped for BE will automagically configure the +@@ -2858,6 +2861,7 @@ static void bcmgenet_netif_start(struct net_device *dev) + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Start the network engine */ ++ bcmgenet_set_rx_mode(dev); + bcmgenet_enable_rx_napi(priv); + bcmgenet_enable_tx_napi(priv); + +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 956fbb164e6f..85c11dafb4cd 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -3560,12 +3560,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, + dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); + break; + } +- dev_info(dev, "Partner protocol version is %d\n", +- crq->version_exchange_rsp.version); +- if (be16_to_cpu(crq->version_exchange_rsp.version) < +- ibmvnic_version) +- ibmvnic_version = ++ ibmvnic_version = + be16_to_cpu(crq->version_exchange_rsp.version); ++ dev_info(dev, "Partner protocol version is %d\n", ++ ibmvnic_version); + send_cap_queries(adapter); + break; + case QUERY_CAPABILITY_RSP: +diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c +index 3dd4aeb2706d..175681aa5260 100644 +--- a/drivers/net/ethernet/intel/e1000/e1000_main.c ++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c +@@ -3169,8 +3169,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { ++ case e1000_82544: { + unsigned int pull_size; +- case e1000_82544: ++ + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements +@@ -3191,6 +3192,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + } + len = skb_headlen(skb); + break; ++ } + default: + /* do nothing */ + break; +diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h +index 98e68888abb1..e0ef6007c275 100644 +--- a/drivers/net/ethernet/intel/e1000e/e1000.h ++++ b/drivers/net/ethernet/intel/e1000e/e1000.h +@@ -596,7 +596,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) + + #define er32(reg) __er32(hw, E1000_##reg) + +-s32 __ew32_prepare(struct e1000_hw *hw); + void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); + + #define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index 1ad345796e80..5269af303f55 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -137,14 +137,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +-s32 __ew32_prepare(struct e1000_hw *hw) ++static void __ew32_prepare(struct e1000_hw *hw) + { + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); +- +- return i; + } + + void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +@@ -625,11 +623,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) + { + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; +- s32 ret_val = __ew32_prepare(hw); + ++ __ew32_prepare(hw); + writel(i, rx_ring->tail); + +- if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { ++ if (unlikely(i != readl(rx_ring->tail))) { + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl & ~E1000_RCTL_EN); +@@ -642,11 +640,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) + { + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; +- s32 ret_val = __ew32_prepare(hw); + ++ __ew32_prepare(hw); + writel(i, tx_ring->tail); + +- if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { ++ if (unlikely(i != readl(tx_ring->tail))) { + u32 tctl = er32(TCTL); + + ew32(TCTL, tctl & ~E1000_TCTL_EN); +@@ -5271,6 +5269,10 @@ static void e1000_watchdog_task(struct work_struct *work) + /* oops */ + break; + } ++ if (hw->mac.type == e1000_pch_spt) { ++ netdev->features &= ~NETIF_F_TSO; ++ netdev->features &= ~NETIF_F_TSO6; ++ } + } + + /* enable transmits in the hardware, need to do this +diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c +index 82028ce355fb..ff2be34bff39 100644 +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c +@@ -163,7 +163,8 @@ static int igb_get_link_ksettings(struct net_device *netdev, + u32 speed; + u32 supported, advertising; + +- status = rd32(E1000_STATUS); ++ status = pm_runtime_suspended(&adapter->pdev->dev) ? ++ 0 : rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + supported = (SUPPORTED_10baseT_Half | +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +index 815284fe9324..6b5662674c75 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +@@ -2267,7 +2267,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + } + + /* Configure pause time (2 TCs per register) */ +- reg = hw->fc.pause_time * 0x00010001; ++ reg = hw->fc.pause_time * 0x00010001U; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index ba184287e11f..64ee45b6680a 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -2274,7 +2274,8 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, + rx_buffer->page_offset ^= truesize; + #else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? +- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : ++ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + SKB_DATA_ALIGN(size); + + rx_buffer->page_offset += truesize; +diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c +index 41d30f55c946..6bd6c261f2ba 100644 +--- a/drivers/net/ethernet/nxp/lpc_eth.c ++++ b/drivers/net/ethernet/nxp/lpc_eth.c +@@ -845,7 +845,8 @@ static int lpc_mii_init(struct netdata_local *pldat) + if (mdiobus_register(pldat->mii_bus)) + goto err_out_unregister_bus; + +- if (lpc_mii_probe(pldat->ndev) != 0) ++ err = lpc_mii_probe(pldat->ndev); ++ if (err) + goto err_out_unregister_bus; + + return 0; +diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h +index a80531b5aecc..c132b08cefde 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede.h ++++ b/drivers/net/ethernet/qlogic/qede/qede.h +@@ -528,12 +528,14 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); + #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) + #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) + #define NUM_RX_BDS_MIN 128 ++#define NUM_RX_BDS_KDUMP_MIN 63 + #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) + + #define TX_RING_SIZE_POW 13 + #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) + #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) + #define NUM_TX_BDS_MIN 128 ++#define NUM_TX_BDS_KDUMP_MIN 63 + #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX + + #define QEDE_MIN_PKT_LEN 64 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c +index dab202f343c6..8bb734486bf3 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c +@@ -29,6 +29,7 @@ + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ ++#include + #include + #include + #include +@@ -624,8 +625,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, + edev->dp_module = dp_module; + edev->dp_level = dp_level; + edev->ops = qed_ops; +- edev->q_num_rx_buffers = NUM_RX_BDS_DEF; +- edev->q_num_tx_buffers = NUM_TX_BDS_DEF; ++ ++ if (is_kdump_kernel()) { ++ edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN; ++ edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN; ++ } else { ++ edev->q_num_rx_buffers = NUM_RX_BDS_DEF; ++ edev->q_num_tx_buffers = NUM_TX_BDS_DEF; ++ } + + DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", + info->num_queues, info->num_queues); +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 3072fc902eca..b7f41c52766f 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -449,6 +449,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) + int ret; + rx_handler_result_t handle_res; + ++ /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ ++ if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) ++ return RX_HANDLER_PASS; ++ + port = macvlan_port_get_rcu(skb->dev); + if (is_multicast_ether_addr(eth->h_dest)) { + unsigned int hash; +diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c +index 2ff27314e047..66c6c07c7a16 100644 +--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c ++++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c +@@ -692,6 +692,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; ++ if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) ++ return 0; + while (n--) + p[n] = rssConf->indTable[n]; + return 0; +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 9bca97d5f063..afdc2c290fd0 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -1610,6 +1610,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, + ns_olen = request->len - skb_network_offset(request) - + sizeof(struct ipv6hdr) - sizeof(*ns); + for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { ++ if (!ns->opt[i + 1]) { ++ kfree_skb(reply); ++ return NULL; ++ } + if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { + daddr = ns->opt + i + sizeof(struct nd_opt_hdr); + break; +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c +index 6f669166c263..4e769cf07f59 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.c ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c +@@ -610,6 +610,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, + hif_dev->remain_skb = nskb; + spin_unlock(&hif_dev->rx_lock); + } else { ++ if (pool_index == MAX_PKT_NUM_IN_TRANSFER) { ++ dev_err(&hif_dev->udev->dev, ++ "ath9k_htc: over RX MAX_PKT_NUM\n"); ++ goto err; ++ } + nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); + if (!nskb) { + dev_err(&hif_dev->udev->dev, +@@ -636,9 +641,9 @@ err: + + static void ath9k_hif_usb_rx_cb(struct urb *urb) + { +- struct sk_buff *skb = (struct sk_buff *) urb->context; +- struct hif_device_usb *hif_dev = +- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); ++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context; ++ struct hif_device_usb *hif_dev = rx_buf->hif_dev; ++ struct sk_buff *skb = rx_buf->skb; + int ret; + + if (!skb) +@@ -678,14 +683,15 @@ resubmit: + return; + free: + kfree_skb(skb); ++ kfree(rx_buf); + } + + static void ath9k_hif_usb_reg_in_cb(struct urb *urb) + { +- struct sk_buff *skb = (struct sk_buff *) urb->context; ++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context; ++ struct hif_device_usb *hif_dev = rx_buf->hif_dev; ++ struct sk_buff *skb = rx_buf->skb; + struct sk_buff *nskb; +- struct hif_device_usb *hif_dev = +- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + int ret; + + if (!skb) +@@ -743,6 +749,7 @@ resubmit: + return; + free: + kfree_skb(skb); ++ kfree(rx_buf); + urb->context = NULL; + } + +@@ -788,7 +795,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) + init_usb_anchor(&hif_dev->mgmt_submitted); + + for (i = 0; i < MAX_TX_URB_NUM; i++) { +- tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL); ++ tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL); + if (!tx_buf) + goto err; + +@@ -825,8 +832,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev) + + static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + { +- struct urb *urb = NULL; ++ struct rx_buf *rx_buf = NULL; + struct sk_buff *skb = NULL; ++ struct urb *urb = NULL; + int i, ret; + + init_usb_anchor(&hif_dev->rx_submitted); +@@ -834,6 +842,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + + for (i = 0; i < MAX_RX_URB_NUM; i++) { + ++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); ++ if (!rx_buf) { ++ ret = -ENOMEM; ++ goto err_rxb; ++ } ++ + /* Allocate URB */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb == NULL) { +@@ -848,11 +862,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + goto err_skb; + } + ++ rx_buf->hif_dev = hif_dev; ++ rx_buf->skb = skb; ++ + usb_fill_bulk_urb(urb, hif_dev->udev, + usb_rcvbulkpipe(hif_dev->udev, + USB_WLAN_RX_PIPE), + skb->data, MAX_RX_BUF_SIZE, +- ath9k_hif_usb_rx_cb, skb); ++ ath9k_hif_usb_rx_cb, rx_buf); + + /* Anchor URB */ + usb_anchor_urb(urb, &hif_dev->rx_submitted); +@@ -878,6 +895,8 @@ err_submit: + err_skb: + usb_free_urb(urb); + err_urb: ++ kfree(rx_buf); ++err_rxb: + ath9k_hif_usb_dealloc_rx_urbs(hif_dev); + return ret; + } +@@ -889,14 +908,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev) + + static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) + { +- struct urb *urb = NULL; ++ struct rx_buf *rx_buf = NULL; + struct sk_buff *skb = NULL; ++ struct urb *urb = NULL; + int i, ret; + + init_usb_anchor(&hif_dev->reg_in_submitted); + + for (i = 0; i < MAX_REG_IN_URB_NUM; i++) { + ++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); ++ if (!rx_buf) { ++ ret = -ENOMEM; ++ goto err_rxb; ++ } ++ + /* Allocate URB */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb == NULL) { +@@ -911,11 +937,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) + goto err_skb; + } + ++ rx_buf->hif_dev = hif_dev; ++ rx_buf->skb = skb; ++ + usb_fill_int_urb(urb, hif_dev->udev, + usb_rcvintpipe(hif_dev->udev, + USB_REG_IN_PIPE), + skb->data, MAX_REG_IN_BUF_SIZE, +- ath9k_hif_usb_reg_in_cb, skb, 1); ++ ath9k_hif_usb_reg_in_cb, rx_buf, 1); + + /* Anchor URB */ + usb_anchor_urb(urb, &hif_dev->reg_in_submitted); +@@ -941,6 +970,8 @@ err_submit: + err_skb: + usb_free_urb(urb); + err_urb: ++ kfree(rx_buf); ++err_rxb: + ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); + return ret; + } +@@ -971,7 +1002,7 @@ err: + return -ENOMEM; + } + +-static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) ++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) + { + usb_kill_anchored_urbs(&hif_dev->regout_submitted); + ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); +@@ -1339,8 +1370,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) + + if (hif_dev->flags & HIF_USB_READY) { + ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); +- ath9k_htc_hw_free(hif_dev->htc_handle); + ath9k_hif_usb_dev_deinit(hif_dev); ++ ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv); ++ ath9k_htc_hw_free(hif_dev->htc_handle); + } + + usb_set_intfdata(interface, NULL); +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h +index 7846916aa01d..5985aa15ca93 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.h ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.h +@@ -86,6 +86,11 @@ struct tx_buf { + struct list_head list; + }; + ++struct rx_buf { ++ struct sk_buff *skb; ++ struct hif_device_usb *hif_dev; ++}; ++ + #define HIF_USB_TX_STOP BIT(0) + #define HIF_USB_TX_FLUSH BIT(1) + +@@ -133,5 +138,6 @@ struct hif_device_usb { + + int ath9k_hif_usb_init(void); + void ath9k_hif_usb_exit(void); ++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev); + + #endif /* HTC_USB_H */ +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c +index da2164b0cccc..66ef5cf16450 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c +@@ -933,8 +933,9 @@ err_init: + int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, + u16 devid, char *product, u32 drv_info) + { +- struct ieee80211_hw *hw; ++ struct hif_device_usb *hif_dev; + struct ath9k_htc_priv *priv; ++ struct ieee80211_hw *hw; + int ret; + + hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops); +@@ -969,7 +970,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, + return 0; + + err_init: +- ath9k_deinit_wmi(priv); ++ ath9k_stop_wmi(priv); ++ hif_dev = (struct hif_device_usb *)htc_handle->hif_dev; ++ ath9k_hif_usb_dealloc_urbs(hif_dev); ++ ath9k_destoy_wmi(priv); + err_free: + ieee80211_free_hw(hw); + return ret; +@@ -984,7 +988,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) + htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED; + + ath9k_deinit_device(htc_handle->drv_priv); +- ath9k_deinit_wmi(htc_handle->drv_priv); ++ ath9k_stop_wmi(htc_handle->drv_priv); + ieee80211_free_hw(htc_handle->drv_priv->hw); + } + } +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +index 4748f557c753..11d06021b5e4 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +@@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, + * which are not PHY_ERROR (short radar pulses have a length of 3) + */ + if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { +- ath_warn(common, +- "Short RX data len, dropping (dlen: %d)\n", +- rs_datalen); ++ ath_dbg(common, ANY, ++ "Short RX data len, dropping (dlen: %d)\n", ++ rs_datalen); + goto rx_next; + } + +diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c +index 1bf63a4efb4c..d2e062eaf561 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_hst.c ++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c +@@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target, + + if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { + epid = svc_rspmsg->endpoint_id; ++ if (epid < 0 || epid >= ENDPOINT_MAX) ++ return; ++ + service_id = be16_to_cpu(svc_rspmsg->service_id); + max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); + endpoint = &target->endpoint[epid]; +diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c +index 64a354fa78ab..f57f48e4d7a0 100644 +--- a/drivers/net/wireless/ath/ath9k/wmi.c ++++ b/drivers/net/wireless/ath/ath9k/wmi.c +@@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) + return wmi; + } + +-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv) ++void ath9k_stop_wmi(struct ath9k_htc_priv *priv) + { + struct wmi *wmi = priv->wmi; + + mutex_lock(&wmi->op_mutex); + wmi->stopped = true; + mutex_unlock(&wmi->op_mutex); ++} + ++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv) ++{ + kfree(priv->wmi); + } + +diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h +index 380175d5ecd7..d8b912206232 100644 +--- a/drivers/net/wireless/ath/ath9k/wmi.h ++++ b/drivers/net/wireless/ath/ath9k/wmi.h +@@ -179,7 +179,6 @@ struct wmi { + }; + + struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv); +-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv); + int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, + enum htc_endpoint_id *wmi_ctrl_epid); + int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, +@@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, + void ath9k_wmi_event_tasklet(unsigned long data); + void ath9k_fatal_work(struct work_struct *work); + void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv); ++void ath9k_stop_wmi(struct ath9k_htc_priv *priv); ++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv); + + #define WMI_CMD(_wmi_cmd) \ + do { \ +diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c +index 88045f93a76c..62ed0977f32c 100644 +--- a/drivers/net/wireless/ath/carl9170/fw.c ++++ b/drivers/net/wireless/ath/carl9170/fw.c +@@ -351,9 +351,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) + ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + + if (SUPP(CARL9170FW_WLANTX_CAB)) { +- if_comb_types |= +- BIT(NL80211_IFTYPE_AP) | +- BIT(NL80211_IFTYPE_P2P_GO); ++ if_comb_types |= BIT(NL80211_IFTYPE_AP); + + #ifdef CONFIG_MAC80211_MESH + if_comb_types |= +diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c +index 988c8857d78c..80312b2fddb1 100644 +--- a/drivers/net/wireless/ath/carl9170/main.c ++++ b/drivers/net/wireless/ath/carl9170/main.c +@@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar, + ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && + (vif->type != NL80211_IFTYPE_AP)); + +- /* While the driver supports HW offload in a single +- * P2P client configuration, it doesn't support HW +- * offload in the favourit, concurrent P2P GO+CLIENT +- * configuration. Hence, HW offload will always be +- * disabled for P2P. ++ /* The driver used to have P2P GO+CLIENT support, ++ * but since this was dropped and we don't know if ++ * there are any gremlins lurking in the shadows, ++ * so best we keep HW offload disabled for P2P. + */ + ar->disable_offload |= vif->p2p; + +@@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, + if (vif->type == NL80211_IFTYPE_STATION) + break; + +- /* P2P GO [master] use-case +- * Because the P2P GO station is selected dynamically +- * by all participating peers of a WIFI Direct network, +- * the driver has be able to change the main interface +- * operating mode on the fly. +- */ +- if (main_vif->p2p && vif->p2p && +- vif->type == NL80211_IFTYPE_AP) { +- old_main = main_vif; +- break; +- } +- + err = -EBUSY; + rcu_read_unlock(); + +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c +index af37c19dbfd7..688152bcfc15 100644 +--- a/drivers/net/wireless/ath/wcn36xx/main.c ++++ b/drivers/net/wireless/ath/wcn36xx/main.c +@@ -1280,7 +1280,7 @@ static int wcn36xx_probe(struct platform_device *pdev) + if (addr && ret != ETH_ALEN) { + wcn36xx_err("invalid local-mac-address\n"); + ret = -EINVAL; +- goto out_wq; ++ goto out_destroy_ept; + } else if (addr) { + wcn36xx_info("mac address: %pM\n", addr); + SET_IEEE80211_PERM_ADDR(wcn->hw, addr); +@@ -1288,7 +1288,7 @@ static int wcn36xx_probe(struct platform_device *pdev) + + ret = wcn36xx_platform_get_resources(wcn, pdev); + if (ret) +- goto out_wq; ++ goto out_destroy_ept; + + wcn36xx_init_ieee80211(wcn); + ret = ieee80211_register_hw(wcn->hw); +@@ -1300,6 +1300,8 @@ static int wcn36xx_probe(struct platform_device *pdev) + out_unmap: + iounmap(wcn->ccu_base); + iounmap(wcn->dxe_base); ++out_destroy_ept: ++ rpmsg_destroy_ept(wcn->smd_channel); + out_wq: + ieee80211_free_hw(hw); + out_err: +diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c +index b37e7391f55d..8a226a9d755e 100644 +--- a/drivers/net/wireless/broadcom/b43/main.c ++++ b/drivers/net/wireless/broadcom/b43/main.c +@@ -5596,7 +5596,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) + /* fill hw info */ + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); +- ++ ieee80211_hw_set(hw, MFP_CAPABLE); + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | +diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c +index f435bd0f8b5b..6f123a52ae2d 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/main.c ++++ b/drivers/net/wireless/broadcom/b43legacy/main.c +@@ -3835,6 +3835,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev) + /* fill hw info */ + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); ++ ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */ + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | +diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c +index 35ccf400b02c..87045e30e585 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c ++++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c +@@ -571,6 +571,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev, + default: + b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", + chanstat); ++ goto drop; + } + + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +index 53ae30259989..473b2b3cb6f5 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +@@ -192,13 +192,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) + if (!err) + ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC); + ++ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); ++ + if (drvr->settings->feature_disable) { + brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n", + ifp->drvr->feat_flags, + drvr->settings->feature_disable); + ifp->drvr->feat_flags &= ~drvr->settings->feature_disable; + } +- brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); + + /* set chip related quirks */ + switch (drvr->bus_if->chip) { +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index 5e8e34a08b2d..79c50aebffc4 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -1451,7 +1451,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) + { + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); +- static struct mwifiex_sta_node *node; ++ struct mwifiex_sta_node *node; ++ int i; + + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && + priv->media_connected && idx == 0) { +@@ -1461,13 +1462,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST, + HostCmd_ACT_GEN_GET, 0, NULL, true); + +- if (node && (&node->list == &priv->sta_list)) { +- node = NULL; +- return -ENOENT; +- } +- +- node = list_prepare_entry(node, &priv->sta_list, list); +- list_for_each_entry_continue(node, &priv->sta_list, list) { ++ i = 0; ++ list_for_each_entry(node, &priv->sta_list, list) { ++ if (i++ != idx) ++ continue; + ether_addr_copy(mac, node->mac_addr); + return mwifiex_dump_station_info(priv, node, sinfo); + } +diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c +index 93eda23f0123..7a050a75bdcb 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/usb.c ++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c +@@ -910,10 +910,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, + + WARN_ON(NULL == skb); + _urb = usb_alloc_urb(0, GFP_ATOMIC); +- if (!_urb) { +- kfree_skb(skb); ++ if (!_urb) + return NULL; +- } + _rtl_install_trx_info(rtlusb, skb, ep_num); + usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, + ep_num), skb->data, skb->len, _rtl_tx_complete, skb); +@@ -927,7 +925,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + u32 ep_num; + struct urb *_urb = NULL; +- struct sk_buff *_skb = NULL; + + WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); + if (unlikely(IS_USB_STOP(rtlusb))) { +@@ -936,8 +933,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, + return; + } + ep_num = rtlusb->ep_map.ep_mapping[qnum]; +- _skb = skb; +- _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); ++ _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num); + if (unlikely(!_urb)) { + pr_err("Can't allocate urb. Drop skb!\n"); + kfree_skb(skb); +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index a760c449f4a9..2d95755092e3 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -758,6 +758,19 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) + } + EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); + ++/* ++ * In NVMe 1.0 the CNS field was just a binary controller or namespace ++ * flag, thus sending any new CNS opcodes has a big chance of not working. ++ * Qemu unfortunately had that bug after reporting a 1.1 version compliance ++ * (but not for any later version). ++ */ ++static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) ++{ ++ if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) ++ return ctrl->vs < NVME_VS(1, 2, 0); ++ return ctrl->vs < NVME_VS(1, 1, 0); ++} ++ + static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) + { + struct nvme_command c = { }; +@@ -2538,8 +2551,7 @@ static void nvme_scan_work(struct work_struct *work) + return; + + nn = le32_to_cpu(id->nn); +- if (ctrl->vs >= NVME_VS(1, 1, 0) && +- !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { ++ if (!nvme_ctrl_limited_cns(ctrl)) { + if (!nvme_scan_ns_list(ctrl, nn)) + goto done; + } +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index e23bfd9845b1..31cc04aeaaaa 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -1446,7 +1446,7 @@ int pci_setup_device(struct pci_dev *dev) + /* device class may be changed after fixup */ + class = dev->class >> 8; + +- if (dev->non_compliant_bars) { ++ if (dev->non_compliant_bars && !dev->mmio_always_on) { + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { + dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); +@@ -1557,13 +1557,33 @@ static void pci_configure_mps(struct pci_dev *dev) + struct pci_dev *bridge = pci_upstream_bridge(dev); + int mps, p_mps, rc; + +- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) ++ if (!pci_is_pcie(dev)) + return; + + /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ + if (dev->is_virtfn) + return; + ++ /* ++ * For Root Complex Integrated Endpoints, program the maximum ++ * supported value unless limited by the PCIE_BUS_PEER2PEER case. ++ */ ++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { ++ if (pcie_bus_config == PCIE_BUS_PEER2PEER) ++ mps = 128; ++ else ++ mps = 128 << dev->pcie_mpss; ++ rc = pcie_set_mps(dev, mps); ++ if (rc) { ++ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", ++ mps); ++ } ++ return; ++ } ++ ++ if (!bridge || !pci_is_pcie(bridge)) ++ return; ++ + mps = pcie_get_mps(dev); + p_mps = pcie_get_mps(bridge); + +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index e7ed051ec125..5f26c170315c 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + #include /* isa_dma_bridge_buggy */ + #include "pci.h" + +@@ -4235,6 +4236,24 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, + quirk_chelsio_T5_disable_root_port_attributes); + ++/* ++ * pci_acs_ctrl_enabled - compare desired ACS controls with those provided ++ * by a device ++ * @acs_ctrl_req: Bitmask of desired ACS controls ++ * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by ++ * the hardware design ++ * ++ * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included ++ * in @acs_ctrl_ena, i.e., the device provides all the access controls the ++ * caller desires. Return 0 otherwise. ++ */ ++static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena) ++{ ++ if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req) ++ return 1; ++ return 0; ++} ++ + /* + * AMD has indicated that the devices below do not support peer-to-peer + * in any system where they are found in the southbridge with an AMD +@@ -4278,7 +4297,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) + /* Filter out flags not applicable to multifunction */ + acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); + +- return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1; ++ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR); + #else + return -ENODEV; + #endif +@@ -4305,20 +4324,19 @@ static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) + + static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) + { ++ if (!pci_quirk_cavium_acs_match(dev)) ++ return -ENOTTY; ++ + /* +- * Cavium root ports don't advertise an ACS capability. However, ++ * Cavium Root Ports don't advertise an ACS capability. However, + * the RTL internally implements similar protection as if ACS had +- * Request Redirection, Completion Redirection, Source Validation, ++ * Source Validation, Request Redirection, Completion Redirection, + * and Upstream Forwarding features enabled. Assert that the + * hardware implements and enables equivalent ACS functionality for + * these flags. + */ +- acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); +- +- if (!pci_quirk_cavium_acs_match(dev)) +- return -ENOTTY; +- +- return acs_flags ? 0 : 1; ++ return pci_acs_ctrl_enabled(acs_flags, ++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + } + + static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) +@@ -4328,13 +4346,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) + * transactions with others, allowing masking out these bits as if they + * were unimplemented in the ACS capability. + */ +- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); +- +- return acs_flags ? 0 : 1; ++ return pci_acs_ctrl_enabled(acs_flags, ++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + } + + /* +- * Many Intel PCH root ports do provide ACS-like features to disable peer ++ * Many Intel PCH Root Ports do provide ACS-like features to disable peer + * transactions and validate bus numbers in requests, but do not provide an + * actual PCIe ACS capability. This is the list of device IDs known to fall + * into that category as provided by Intel in Red Hat bugzilla 1037684. +@@ -4382,37 +4399,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) + return false; + } + +-#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV) +- + static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags) + { +- u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ? +- INTEL_PCH_ACS_FLAGS : 0; +- + if (!pci_quirk_intel_pch_acs_match(dev)) + return -ENOTTY; + +- return acs_flags & ~flags ? 0 : 1; ++ if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK) ++ return pci_acs_ctrl_enabled(acs_flags, ++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); ++ ++ return pci_acs_ctrl_enabled(acs_flags, 0); + } + + /* +- * These QCOM root ports do provide ACS-like features to disable peer ++ * These QCOM Root Ports do provide ACS-like features to disable peer + * transactions and validate bus numbers in requests, but do not provide an + * actual PCIe ACS capability. Hardware supports source validation but it + * will report the issue as Completer Abort instead of ACS Violation. +- * Hardware doesn't support peer-to-peer and each root port is a root +- * complex with unique segment numbers. It is not possible for one root +- * port to pass traffic to another root port. All PCIe transactions are +- * terminated inside the root port. ++ * Hardware doesn't support peer-to-peer and each Root Port is a Root ++ * Complex with unique segment numbers. It is not possible for one Root ++ * Port to pass traffic to another Root Port. All PCIe transactions are ++ * terminated inside the Root Port. + */ + static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) + { +- u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV); +- int ret = acs_flags & ~flags ? 0 : 1; +- +- dev_info(&dev->dev, "Using QCOM ACS Quirk (%d)\n", ret); +- +- return ret; ++ return pci_acs_ctrl_enabled(acs_flags, ++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + } + + /* +@@ -4495,7 +4507,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) + + pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); + +- return acs_flags & ~ctrl ? 0 : 1; ++ return pci_acs_ctrl_enabled(acs_flags, ctrl); + } + + static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) +@@ -4509,10 +4521,35 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) + * perform peer-to-peer with other functions, allowing us to mask out + * these bits as if they were unimplemented in the ACS capability. + */ +- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | +- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); ++ return pci_acs_ctrl_enabled(acs_flags, ++ PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | ++ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); ++} + +- return acs_flags ? 0 : 1; ++static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags) ++{ ++ /* ++ * Intel RCiEP's are required to allow p2p only on translated ++ * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16, ++ * "Root-Complex Peer to Peer Considerations". ++ */ ++ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END) ++ return -ENOTTY; ++ ++ return pci_acs_ctrl_enabled(acs_flags, ++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); ++} ++ ++static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) ++{ ++ /* ++ * iProc PAXB Root Ports don't advertise an ACS capability, but ++ * they do not allow peer-to-peer transactions between Root Ports. ++ * Allow each Root Port to be in a separate IOMMU group by masking ++ * SV/RR/CR/UF bits. ++ */ ++ return pci_acs_ctrl_enabled(acs_flags, ++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + } + + static const struct pci_dev_acs_enabled { +@@ -4585,6 +4622,7 @@ static const struct pci_dev_acs_enabled { + /* I219 */ + { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, ++ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs }, + /* QCOM QDF2xxx root ports */ + { 0x17cb, 0x400, pci_quirk_qcom_rp_acs }, + { 0x17cb, 0x401, pci_quirk_qcom_rp_acs }, +@@ -4597,9 +4635,30 @@ static const struct pci_dev_acs_enabled { + { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs }, + /* APM X-Gene */ + { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs }, ++ /* Ampere Computing */ ++ { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs }, ++ { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs }, ++ { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs }, ++ { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs }, ++ { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs }, ++ { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, ++ { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, ++ { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, ++ { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, + { 0 } + }; + ++/* ++ * pci_dev_specific_acs_enabled - check whether device provides ACS controls ++ * @dev: PCI device ++ * @acs_flags: Bitmask of desired ACS controls ++ * ++ * Returns: ++ * -ENOTTY: No quirk applies to this device; we can't tell whether the ++ * device provides the desired controls ++ * 0: Device does not provide all the desired controls ++ * >0: Device provides all the controls in @acs_flags ++ */ + int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) + { + const struct pci_dev_acs_enabled *i; +@@ -4869,13 +4928,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev) + } + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); + +-/* FLR may cause some 82579 devices to hang. */ +-static void quirk_intel_no_flr(struct pci_dev *dev) ++/* ++ * FLR may cause the following to devices to hang: ++ * ++ * AMD Starship/Matisse HD Audio Controller 0x1487 ++ * AMD Starship USB 3.0 Host Controller 0x148c ++ * AMD Matisse USB 3.0 Host Controller 0x149c ++ * Intel 82579LM Gigabit Ethernet Controller 0x1502 ++ * Intel 82579V Gigabit Ethernet Controller 0x1503 ++ * ++ */ ++static void quirk_no_flr(struct pci_dev *dev) + { + dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; + } +-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr); +-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr); + + static void quirk_no_ext_tags(struct pci_dev *pdev) + { +@@ -4912,3 +4983,63 @@ static void quirk_no_ats(struct pci_dev *pdev) + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); + #endif /* CONFIG_PCI_ATS */ ++ ++/* Freescale PCIe doesn't support MSI in RC mode */ ++static void quirk_fsl_no_msi(struct pci_dev *pdev) ++{ ++ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) ++ pdev->no_msi = 1; ++} ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi); ++ ++/* ++ * Although not allowed by the spec, some multi-function devices have ++ * dependencies of one function (consumer) on another (supplier). For the ++ * consumer to work in D0, the supplier must also be in D0. Create a ++ * device link from the consumer to the supplier to enforce this ++ * dependency. Runtime PM is allowed by default on the consumer to prevent ++ * it from permanently keeping the supplier awake. ++ */ ++static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer, ++ unsigned int supplier, unsigned int class, ++ unsigned int class_shift) ++{ ++ struct pci_dev *supplier_pdev; ++ ++ if (PCI_FUNC(pdev->devfn) != consumer) ++ return; ++ ++ supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), ++ pdev->bus->number, ++ PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier)); ++ if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) { ++ pci_dev_put(supplier_pdev); ++ return; ++ } ++ ++ if (device_link_add(&pdev->dev, &supplier_pdev->dev, ++ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) ++ pci_info(pdev, "D0 power state depends on %s\n", ++ pci_name(supplier_pdev)); ++ else ++ pci_err(pdev, "Cannot enforce power dependency on %s\n", ++ pci_name(supplier_pdev)); ++ ++ pm_runtime_allow(&pdev->dev); ++ pci_dev_put(supplier_pdev); ++} ++ ++/* ++ * Create device link for GPUs with integrated HDA controller for streaming ++ * audio to attached displays. ++ */ ++static void quirk_gpu_hda(struct pci_dev *hda) ++{ ++ pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16); ++} ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, ++ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID, ++ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, ++ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); +diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c +index 1c534d823fd7..6925a3d969e2 100644 +--- a/drivers/pinctrl/samsung/pinctrl-exynos.c ++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c +@@ -266,6 +266,7 @@ struct exynos_eint_gpio_save { + u32 eint_con; + u32 eint_fltcon0; + u32 eint_fltcon1; ++ u32 eint_mask; + }; + + /* +@@ -561,10 +562,13 @@ static void exynos_pinctrl_suspend_bank( + + 2 * bank->eint_offset); + save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); ++ save->eint_mask = readl(regs + bank->irq_chip->eint_mask ++ + bank->eint_offset); + + pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); + pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); + pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); ++ pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); + } + + void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) +@@ -593,6 +597,9 @@ static void exynos_pinctrl_resume_bank( + pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4), save->eint_fltcon1); ++ pr_debug("%s: mask %#010x => %#010x\n", bank->name, ++ readl(regs + bank->irq_chip->eint_mask ++ + bank->eint_offset), save->eint_mask); + + writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); +@@ -600,6 +607,8 @@ static void exynos_pinctrl_resume_bank( + + 2 * bank->eint_offset); + writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); ++ writel(save->eint_mask, regs + bank->irq_chip->eint_mask ++ + bank->eint_offset); + } + + void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c +index 06a3c1ef8eee..952544ca0d84 100644 +--- a/drivers/platform/x86/hp-wmi.c ++++ b/drivers/platform/x86/hp-wmi.c +@@ -474,8 +474,14 @@ static ssize_t postcode_show(struct device *dev, struct device_attribute *attr, + static ssize_t als_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) + { +- u32 tmp = simple_strtoul(buf, NULL, 10); +- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, ++ u32 tmp; ++ int ret; ++ ++ ret = kstrtou32(buf, 10, &tmp); ++ if (ret) ++ return ret; ++ ++ ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, + sizeof(tmp), sizeof(tmp)); + if (ret) + return ret < 0 ? ret : -EINVAL; +diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c +index e9e749f87517..8fb43c4438e6 100644 +--- a/drivers/power/reset/vexpress-poweroff.c ++++ b/drivers/power/reset/vexpress-poweroff.c +@@ -150,6 +150,7 @@ static struct platform_driver vexpress_reset_driver = { + .driver = { + .name = "vexpress-reset", + .of_match_table = vexpress_reset_of_match, ++ .suppress_bind_attrs = true, + }, + }; + +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c +index 0316fae20cfe..7c86a8ee03aa 100644 +--- a/drivers/spi/spi-bcm-qspi.c ++++ b/drivers/spi/spi-bcm-qspi.c +@@ -683,7 +683,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) + if (buf) + buf[tp.byte] = read_rxram_slot_u8(qspi, slot); + dev_dbg(&qspi->pdev->dev, "RD %02x\n", +- buf ? buf[tp.byte] : 0xff); ++ buf ? buf[tp.byte] : 0x0); + } else { + u16 *buf = tp.trans->rx_buf; + +@@ -691,7 +691,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) + buf[tp.byte / 2] = read_rxram_slot_u16(qspi, + slot); + dev_dbg(&qspi->pdev->dev, "RD %04x\n", +- buf ? buf[tp.byte] : 0xffff); ++ buf ? buf[tp.byte / 2] : 0x0); + } + + update_qspi_trans_byte_count(qspi, &tp, +@@ -746,13 +746,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) + while (!tstatus && slot < MSPI_NUM_CDRAM) { + if (tp.trans->bits_per_word <= 8) { + const u8 *buf = tp.trans->tx_buf; +- u8 val = buf ? buf[tp.byte] : 0xff; ++ u8 val = buf ? buf[tp.byte] : 0x00; + + write_txram_slot_u8(qspi, slot, val); + dev_dbg(&qspi->pdev->dev, "WR %02x\n", val); + } else { + const u16 *buf = tp.trans->tx_buf; +- u16 val = buf ? buf[tp.byte / 2] : 0xffff; ++ u16 val = buf ? buf[tp.byte / 2] : 0x0000; + + write_txram_slot_u16(qspi, slot, val); + dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); +diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c +index eab27d41ba83..df6abc75bc16 100644 +--- a/drivers/spi/spi-bcm2835.c ++++ b/drivers/spi/spi-bcm2835.c +@@ -793,7 +793,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) + goto out_clk_disable; + } + +- err = devm_spi_register_master(&pdev->dev, master); ++ err = spi_register_master(master); + if (err) { + dev_err(&pdev->dev, "could not register SPI master: %d\n", err); + goto out_clk_disable; +@@ -813,6 +813,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev) + struct spi_master *master = platform_get_drvdata(pdev); + struct bcm2835_spi *bs = spi_master_get_devdata(master); + ++ spi_unregister_master(master); ++ + /* Clear FIFOs, and disable the HW block */ + bcm2835_wr(bs, BCM2835_SPI_CS, + BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); +diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c +index e075712c501e..b7f78e6d9bec 100644 +--- a/drivers/spi/spi-bcm2835aux.c ++++ b/drivers/spi/spi-bcm2835aux.c +@@ -485,7 +485,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) + goto out_clk_disable; + } + +- err = devm_spi_register_master(&pdev->dev, master); ++ err = spi_register_master(master); + if (err) { + dev_err(&pdev->dev, "could not register SPI master: %d\n", err); + goto out_clk_disable; +@@ -505,6 +505,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev) + struct spi_master *master = platform_get_drvdata(pdev); + struct bcm2835aux_spi *bs = spi_master_get_devdata(master); + ++ spi_unregister_master(master); ++ + bcm2835aux_spi_reset_hw(bs); + + /* disable the HW block by releasing the clock */ +diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c +index 837cb8d0bac6..cb268cc4ba2b 100644 +--- a/drivers/spi/spi-dw-mid.c ++++ b/drivers/spi/spi-dw-mid.c +@@ -155,6 +155,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, + if (!xfer->tx_buf) + return NULL; + ++ memset(&txconf, 0, sizeof(txconf)); + txconf.direction = DMA_MEM_TO_DEV; + txconf.dst_addr = dws->dma_addr; + txconf.dst_maxburst = 16; +@@ -201,6 +202,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + if (!xfer->rx_buf) + return NULL; + ++ memset(&rxconf, 0, sizeof(rxconf)); + rxconf.direction = DMA_DEV_TO_MEM; + rxconf.src_addr = dws->dma_addr; + rxconf.src_maxburst = 16; +@@ -226,19 +228,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + + static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) + { +- u16 dma_ctrl = 0; ++ u16 imr = 0, dma_ctrl = 0; + + dw_writel(dws, DW_SPI_DMARDLR, 0xf); + dw_writel(dws, DW_SPI_DMATDLR, 0x10); + +- if (xfer->tx_buf) ++ if (xfer->tx_buf) { + dma_ctrl |= SPI_DMA_TDMAE; +- if (xfer->rx_buf) ++ imr |= SPI_INT_TXOI; ++ } ++ if (xfer->rx_buf) { + dma_ctrl |= SPI_DMA_RDMAE; ++ imr |= SPI_INT_RXUI | SPI_INT_RXOI; ++ } + dw_writel(dws, DW_SPI_DMACR, dma_ctrl); + + /* Set the interrupt mask */ +- spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); ++ spi_umask_intr(dws, imr); + + dws->transfer_handler = dma_transfer; + +@@ -268,7 +274,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) + dma_async_issue_pending(dws->txchan); + } + +- return 0; ++ return 1; + } + + static void mid_spi_dma_stop(struct dw_spi *dws) +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index cbdad3c4930f..d2428a8809c1 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -384,11 +384,8 @@ static int dw_spi_transfer_one(struct spi_master *master, + + spi_enable_chip(dws, 1); + +- if (dws->dma_mapped) { +- ret = dws->dma_ops->dma_transfer(dws, transfer); +- if (ret < 0) +- return ret; +- } ++ if (dws->dma_mapped) ++ return dws->dma_ops->dma_transfer(dws, transfer); + + if (chip->poll_mode) + return poll_transfer(dws); +@@ -499,6 +496,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) + dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); + spin_lock_init(&dws->buf_lock); + ++ spi_master_set_devdata(master, dws); ++ + ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev), + master); + if (ret < 0) { +@@ -529,11 +528,11 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) + dws->dma_inited = 0; + } else { + master->can_dma = dws->dma_ops->can_dma; ++ master->flags |= SPI_CONTROLLER_MUST_TX; + } + } + +- spi_master_set_devdata(master, dws); +- ret = devm_spi_register_master(dev, master); ++ ret = spi_register_master(master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_dma_exit; +@@ -557,6 +556,8 @@ void dw_spi_remove_host(struct dw_spi *dws) + { + dw_spi_debugfs_remove(dws); + ++ spi_unregister_master(dws->master); ++ + if (dws->dma_ops && dws->dma_ops->dma_exit) + dws->dma_ops->dma_exit(dws); + +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index 5160e16d3a98..1579eb2bc29f 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -156,6 +156,7 @@ static const struct lpss_config lpss_platforms[] = { + .tx_threshold_hi = 48, + .cs_sel_shift = 8, + .cs_sel_mask = 3 << 8, ++ .cs_clk_stays_gated = true, + }, + { /* LPSS_CNL_SSP */ + .offset = 0x200, +@@ -1826,7 +1827,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); +- status = devm_spi_register_master(&pdev->dev, master); ++ status = spi_register_master(master); + if (status != 0) { + dev_err(&pdev->dev, "problem registering spi master\n"); + goto out_error_clock_enabled; +@@ -1856,6 +1857,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) + + pm_runtime_get_sync(&pdev->dev); + ++ spi_unregister_master(drv_data->master); ++ + /* Disable the SSP at the peripheral and SOC level */ + pxa2xx_spi_write(drv_data, SSCR0, 0); + clk_disable_unprepare(ssp->clk); +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index 56035637d8f6..49eee894f51d 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -2264,7 +2264,8 @@ void spi_unregister_controller(struct spi_controller *ctlr) + { + struct spi_controller *found; + int id = ctlr->bus_num; +- int dummy; ++ ++ device_for_each_child(&ctlr->dev, NULL, __unregister); + + /* First make sure that this controller was ever added */ + mutex_lock(&board_lock); +@@ -2278,7 +2279,6 @@ void spi_unregister_controller(struct spi_controller *ctlr) + list_del(&ctlr->list); + mutex_unlock(&board_lock); + +- dummy = device_for_each_child(&ctlr->dev, NULL, __unregister); + device_unregister(&ctlr->dev); + /* free bus id */ + mutex_lock(&board_lock); +diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c +index babbd94c32d9..33a9777e7a99 100644 +--- a/drivers/staging/android/ion/ion_heap.c ++++ b/drivers/staging/android/ion/ion_heap.c +@@ -105,12 +105,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + + static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) + { +- void *addr = vm_map_ram(pages, num, -1, pgprot); ++ void *addr = vmap(pages, num, VM_MAP, pgprot); + + if (!addr) + return -ENOMEM; + memset(addr, 0, PAGE_SIZE * num); +- vm_unmap_ram(addr, num); ++ vunmap(addr); + + return 0; + } +diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c +index 101ca5097fc9..93e2c091c565 100644 +--- a/drivers/staging/greybus/sdio.c ++++ b/drivers/staging/greybus/sdio.c +@@ -412,6 +412,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) + struct gb_sdio_command_request request = {0}; + struct gb_sdio_command_response response; + struct mmc_data *data = host->mrq->data; ++ unsigned int timeout_ms; + u8 cmd_flags; + u8 cmd_type; + int i; +@@ -470,9 +471,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) + request.data_blksz = cpu_to_le16(data->blksz); + } + +- ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND, +- &request, sizeof(request), &response, +- sizeof(response)); ++ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : ++ GB_OPERATION_TIMEOUT_DEFAULT; ++ ++ ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND, ++ &request, sizeof(request), &response, ++ sizeof(response), timeout_ms); + if (ret < 0) + goto out; + +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c +index 630065b551f5..dfa2db6ed322 100644 +--- a/drivers/tty/serial/imx.c ++++ b/drivers/tty/serial/imx.c +@@ -538,6 +538,11 @@ static void dma_tx_callback(void *data) + + if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port)) + imx_dma_tx(sport); ++ else if (sport->port.rs485.flags & SER_RS485_ENABLED) { ++ temp = readl(sport->port.membase + UCR4); ++ temp |= UCR4_TCEN; ++ writel(temp, sport->port.membase + UCR4); ++ } + + spin_unlock_irqrestore(&sport->port.lock, flags); + } +@@ -555,6 +560,10 @@ static void imx_dma_tx(struct imx_port *sport) + if (sport->dma_is_txing) + return; + ++ temp = readl(sport->port.membase + UCR4); ++ temp &= ~UCR4_TCEN; ++ writel(temp, sport->port.membase + UCR4); ++ + sport->tx_bytes = uart_circ_chars_pending(xmit); + + if (xmit->tail < xmit->head || xmit->head == 0) { +@@ -617,10 +626,15 @@ static void imx_start_tx(struct uart_port *port) + if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) + imx_stop_rx(port); + +- /* enable transmitter and shifter empty irq */ +- temp = readl(port->membase + UCR4); +- temp |= UCR4_TCEN; +- writel(temp, port->membase + UCR4); ++ /* ++ * Enable transmitter and shifter empty irq only if DMA is off. ++ * In the DMA case this is done in the tx-callback. ++ */ ++ if (!sport->dma_is_enabled) { ++ temp = readl(port->membase + UCR4); ++ temp |= UCR4_TCEN; ++ writel(temp, port->membase + UCR4); ++ } + } + + if (!sport->dma_is_enabled) { +diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c +index d570e19a2864..ffda1d68fb05 100644 +--- a/drivers/video/fbdev/w100fb.c ++++ b/drivers/video/fbdev/w100fb.c +@@ -583,6 +583,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) + memsize=par->mach->mem->size; + memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize); + vfree(par->saved_extmem); ++ par->saved_extmem = NULL; + } + if (par->saved_intmem) { + memsize=MEM_INT_SIZE; +@@ -591,6 +592,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) + else + memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize); + vfree(par->saved_intmem); ++ par->saved_intmem = NULL; + } + } + +diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c +index 3099052e1243..0667bc6e7d23 100644 +--- a/drivers/w1/masters/omap_hdq.c ++++ b/drivers/w1/masters/omap_hdq.c +@@ -176,7 +176,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) + /* check irqstatus */ + if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { + dev_dbg(hdq_data->dev, "timeout waiting for" +- " TXCOMPLETE/RXCOMPLETE, %x", *status); ++ " TXCOMPLETE/RXCOMPLETE, %x\n", *status); + ret = -ETIMEDOUT; + goto out; + } +@@ -187,7 +187,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) + OMAP_HDQ_FLAG_CLEAR, &tmp_status); + if (ret) { + dev_dbg(hdq_data->dev, "timeout waiting GO bit" +- " return to zero, %x", tmp_status); ++ " return to zero, %x\n", tmp_status); + } + + out: +@@ -203,7 +203,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq) + spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); + hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); + spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); +- dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); ++ dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus); + + if (hdq_data->hdq_irqstatus & + (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE +@@ -311,7 +311,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) + tmp_status = hdq_data->hdq_irqstatus; + /* check irqstatus */ + if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { +- dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", ++ dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n", + tmp_status); + ret = -ETIMEDOUT; + goto out; +@@ -338,7 +338,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) + &tmp_status); + if (ret) + dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" +- " return to zero, %x", tmp_status); ++ " return to zero, %x\n", tmp_status); + + out: + mutex_unlock(&hdq_data->hdq_mutex); +diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c +index 58be15c27b6d..62a0c4111dc4 100644 +--- a/drivers/xen/pvcalls-back.c ++++ b/drivers/xen/pvcalls-back.c +@@ -1104,7 +1104,8 @@ static void set_backend_state(struct xenbus_device *dev, + case XenbusStateInitialised: + switch (state) { + case XenbusStateConnected: +- backend_connect(dev); ++ if (backend_connect(dev)) ++ return; + xenbus_switch_state(dev, XenbusStateConnected); + break; + case XenbusStateClosing: +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c +index 717d82d51bb1..edd5f152e448 100644 +--- a/fs/btrfs/file-item.c ++++ b/fs/btrfs/file-item.c +@@ -795,10 +795,12 @@ again: + nritems = btrfs_header_nritems(path->nodes[0]); + if (!nritems || (path->slots[0] >= nritems - 1)) { + ret = btrfs_next_leaf(root, path); +- if (ret == 1) ++ if (ret < 0) { ++ goto out; ++ } else if (ret > 0) { + found_next = 1; +- if (ret != 0) + goto insert; ++ } + slot = path->slots[0]; + } + btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 2a196bb134d9..ad138f0b0ce1 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -1139,8 +1139,8 @@ out_unlock: + */ + if (extent_reserved) { + extent_clear_unlock_delalloc(inode, start, +- start + cur_alloc_size, +- start + cur_alloc_size, ++ start + cur_alloc_size - 1, ++ start + cur_alloc_size - 1, + locked_page, + clear_bits, + page_ops); +@@ -8707,7 +8707,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) + + /* bio split */ + ASSERT(map_length <= INT_MAX); +- atomic_inc(&dip->pending_bios); + do { + clone_len = min_t(int, submit_len, map_length); + +@@ -8758,7 +8757,8 @@ submit: + if (!status) + return 0; + +- bio_put(bio); ++ if (bio != orig_bio) ++ bio_put(bio); + out_err: + dip->errors = 1; + /* +@@ -8798,7 +8798,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, + bio->bi_private = dip; + dip->orig_bio = bio; + dip->dio_bio = dio_bio; +- atomic_set(&dip->pending_bios, 0); ++ atomic_set(&dip->pending_bios, 1); + io_bio = btrfs_io_bio(bio); + io_bio->logical = file_offset; + +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index ca15d65a2070..654ab6e57ec3 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -35,6 +35,7 @@ + #include "btrfs_inode.h" + #include "transaction.h" + #include "compression.h" ++#include "xattr.h" + + /* + * Maximum number of references an extent can have in order for us to attempt to +@@ -4554,6 +4555,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, + struct fs_path *p; + struct posix_acl_xattr_header dummy_acl; + ++ /* Capabilities are emitted by finish_inode_if_needed */ ++ if (!strncmp(name, XATTR_NAME_CAPS, name_len)) ++ return 0; ++ + p = fs_path_alloc(); + if (!p) + return -ENOMEM; +@@ -5096,6 +5101,64 @@ static int send_extent_data(struct send_ctx *sctx, + return 0; + } + ++/* ++ * Search for a capability xattr related to sctx->cur_ino. If the capability is ++ * found, call send_set_xattr function to emit it. ++ * ++ * Return 0 if there isn't a capability, or when the capability was emitted ++ * successfully, or < 0 if an error occurred. ++ */ ++static int send_capabilities(struct send_ctx *sctx) ++{ ++ struct fs_path *fspath = NULL; ++ struct btrfs_path *path; ++ struct btrfs_dir_item *di; ++ struct extent_buffer *leaf; ++ unsigned long data_ptr; ++ char *buf = NULL; ++ int buf_len; ++ int ret = 0; ++ ++ path = alloc_path_for_send(); ++ if (!path) ++ return -ENOMEM; ++ ++ di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, ++ XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); ++ if (!di) { ++ /* There is no xattr for this inode */ ++ goto out; ++ } else if (IS_ERR(di)) { ++ ret = PTR_ERR(di); ++ goto out; ++ } ++ ++ leaf = path->nodes[0]; ++ buf_len = btrfs_dir_data_len(leaf, di); ++ ++ fspath = fs_path_alloc(); ++ buf = kmalloc(buf_len, GFP_KERNEL); ++ if (!fspath || !buf) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); ++ if (ret < 0) ++ goto out; ++ ++ data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); ++ read_extent_buffer(leaf, buf, data_ptr, buf_len); ++ ++ ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, ++ strlen(XATTR_NAME_CAPS), buf, buf_len); ++out: ++ kfree(buf); ++ fs_path_free(fspath); ++ btrfs_free_path(path); ++ return ret; ++} ++ + static int clone_range(struct send_ctx *sctx, + struct clone_root *clone_root, + const u64 disk_byte, +@@ -5907,6 +5970,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) + goto out; + } + ++ ret = send_capabilities(sctx); ++ if (ret < 0) ++ goto out; ++ + /* + * If other directory inodes depended on our current directory + * inode's move/rename, now do their move/rename operations. +diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h +index a284fb28944b..63291c265aa0 100644 +--- a/fs/ext4/ext4_extents.h ++++ b/fs/ext4/ext4_extents.h +@@ -169,10 +169,13 @@ struct ext4_ext_path { + (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) + #define EXT_LAST_INDEX(__hdr__) \ + (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) +-#define EXT_MAX_EXTENT(__hdr__) \ +- (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++#define EXT_MAX_EXTENT(__hdr__) \ ++ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ++ ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ ++ : 0) + #define EXT_MAX_INDEX(__hdr__) \ +- (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ++ ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0) + + static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode) + { +diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c +index 5508baa11bb6..8a28d47bd502 100644 +--- a/fs/ext4/fsync.c ++++ b/fs/ext4/fsync.c +@@ -44,30 +44,28 @@ + */ + static int ext4_sync_parent(struct inode *inode) + { +- struct dentry *dentry = NULL; +- struct inode *next; ++ struct dentry *dentry, *next; + int ret = 0; + + if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) + return 0; +- inode = igrab(inode); ++ dentry = d_find_any_alias(inode); ++ if (!dentry) ++ return 0; + while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { + ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); +- dentry = d_find_any_alias(inode); +- if (!dentry) +- break; +- next = igrab(d_inode(dentry->d_parent)); ++ ++ next = dget_parent(dentry); + dput(dentry); +- if (!next) +- break; +- iput(inode); +- inode = next; ++ dentry = next; ++ inode = dentry->d_inode; ++ + /* + * The directory inode may have gone through rmdir by now. But + * the inode itself and its blocks are still allocated (we hold +- * a reference to the inode so it didn't go through +- * ext4_evict_inode()) and so we are safe to flush metadata +- * blocks and the inode. ++ * a reference to the inode via its dentry), so it didn't go ++ * through ext4_evict_inode()) and so we are safe to flush ++ * metadata blocks and the inode. + */ + ret = sync_mapping_buffers(inode->i_mapping); + if (ret) +@@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode *inode) + if (ret) + break; + } +- iput(inode); ++ dput(dentry); + return ret; + } + +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index b0873b89dc87..823b01f6b0f0 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -1823,8 +1823,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, + if (EXT4_I(inode)->i_file_acl) { + /* The inode already has an extended attribute block. */ + bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); +- if (IS_ERR(bs->bh)) +- return PTR_ERR(bs->bh); ++ if (IS_ERR(bs->bh)) { ++ error = PTR_ERR(bs->bh); ++ bs->bh = NULL; ++ return error; ++ } + ea_bdebug(bs->bh, "b_count=%d, refcount=%d", + atomic_read(&(bs->bh->b_count)), + le32_to_cpu(BHDR(bs->bh)->h_refcount)); +diff --git a/fs/fat/inode.c b/fs/fat/inode.c +index 1df023c4c2cc..c41393e30a04 100644 +--- a/fs/fat/inode.c ++++ b/fs/fat/inode.c +@@ -1512,6 +1512,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b, + goto out; + } + ++ if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) { ++ if (!silent) ++ fat_msg(sb, KERN_ERR, "bogus number of FAT sectors"); ++ goto out; ++ } ++ + error = 0; + + out: +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index 6398bd8a066e..fe907ff91eeb 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -269,6 +269,7 @@ void __inode_attach_wb(struct inode *inode, struct page *page) + if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) + wb_put(wb); + } ++EXPORT_SYMBOL_GPL(__inode_attach_wb); + + /** + * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index 50e12956c737..5d1e5832690e 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -2794,6 +2794,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) + if (!nilfs->ns_writer) + return -ENOMEM; + ++ inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); ++ + err = nilfs_segctor_start_thread(nilfs->ns_writer); + if (err) { + kfree(nilfs->ns_writer); +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c +index 321eae740148..b97fc1df6212 100644 +--- a/fs/overlayfs/copy_up.c ++++ b/fs/overlayfs/copy_up.c +@@ -59,7 +59,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) + { + ssize_t list_size, size, value_size = 0; + char *buf, *name, *value = NULL; +- int uninitialized_var(error); ++ int error = 0; + size_t slen; + + if (!(old->d_inode->i_opflags & IOP_XATTR) || +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index 225f541f7078..d8e1249adb18 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -432,7 +432,7 @@ const struct inode_operations proc_link_inode_operations = { + + struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) + { +- struct inode *inode = new_inode_pseudo(sb); ++ struct inode *inode = new_inode(sb); + + if (inode) { + inode->i_ino = de->low_ino; +diff --git a/fs/proc/self.c b/fs/proc/self.c +index 31326bb23b8b..398cdf9a9f0c 100644 +--- a/fs/proc/self.c ++++ b/fs/proc/self.c +@@ -41,7 +41,7 @@ int proc_setup_self(struct super_block *s) + inode_lock(root_inode); + self = d_alloc_name(s->s_root, "self"); + if (self) { +- struct inode *inode = new_inode_pseudo(s); ++ struct inode *inode = new_inode(s); + if (inode) { + inode->i_ino = self_inum; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); +diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c +index b813e3b529f2..c6cd35e5ef5d 100644 +--- a/fs/proc/thread_self.c ++++ b/fs/proc/thread_self.c +@@ -42,7 +42,7 @@ int proc_setup_thread_self(struct super_block *s) + inode_lock(root_inode); + thread_self = d_alloc_name(s->s_root, "thread-self"); + if (thread_self) { +- struct inode *inode = new_inode_pseudo(s); ++ struct inode *inode = new_inode(s); + if (inode) { + inode->i_ino = thread_self_inum; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); +diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h +index e465bb15912d..6be5545d3584 100644 +--- a/include/linux/kgdb.h ++++ b/include/linux/kgdb.h +@@ -317,7 +317,7 @@ extern void gdbstub_exit(int status); + extern int kgdb_single_step; + extern atomic_t kgdb_active; + #define in_dbg_master() \ +- (raw_smp_processor_id() == atomic_read(&kgdb_active)) ++ (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) + extern bool dbg_is_early; + extern void __init dbg_late_init(void); + #else /* ! CONFIG_KGDB */ +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 6f852d5fbada..156940758fc5 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -548,6 +548,7 @@ static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) + } + + extern void kvfree(const void *addr); ++extern void kvfree_sensitive(const void *addr, size_t len); + + /* + * Mapcount of compound page as a whole, does not include mapped sub-pages. +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h +index 7fa3f1498b34..2d036930a3cd 100644 +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -45,6 +45,7 @@ + #define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 + #define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 + #define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 ++#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 + #define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 + + #define PCI_BASE_CLASS_MEMORY 0x05 +@@ -1331,6 +1332,7 @@ + #define PCI_DEVICE_ID_IMS_TT3D 0x9135 + + #define PCI_VENDOR_ID_AMCC 0x10e8 ++#define PCI_VENDOR_ID_AMPERE 0x1def + + #define PCI_VENDOR_ID_INTERG 0x10ea + #define PCI_DEVICE_ID_INTERG_1682 0x1682 +diff --git a/include/linux/string.h b/include/linux/string.h +index 3d43329c20be..315fef3aff4e 100644 +--- a/include/linux/string.h ++++ b/include/linux/string.h +@@ -238,6 +238,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob + void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); + + #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) ++ ++#ifdef CONFIG_KASAN ++extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); ++extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); ++extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); ++extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); ++extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); ++extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); ++extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); ++extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); ++extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); ++extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); ++#else ++#define __underlying_memchr __builtin_memchr ++#define __underlying_memcmp __builtin_memcmp ++#define __underlying_memcpy __builtin_memcpy ++#define __underlying_memmove __builtin_memmove ++#define __underlying_memset __builtin_memset ++#define __underlying_strcat __builtin_strcat ++#define __underlying_strcpy __builtin_strcpy ++#define __underlying_strlen __builtin_strlen ++#define __underlying_strncat __builtin_strncat ++#define __underlying_strncpy __builtin_strncpy ++#endif ++ + __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) + { + size_t p_size = __builtin_object_size(p, 0); +@@ -245,14 +270,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); +- return __builtin_strncpy(p, q, size); ++ return __underlying_strncpy(p, q, size); + } + + __FORTIFY_INLINE char *strcat(char *p, const char *q) + { + size_t p_size = __builtin_object_size(p, 0); + if (p_size == (size_t)-1) +- return __builtin_strcat(p, q); ++ return __underlying_strcat(p, q); + if (strlcat(p, q, p_size) >= p_size) + fortify_panic(__func__); + return p; +@@ -266,7 +291,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) + /* Work around gcc excess stack consumption issue */ + if (p_size == (size_t)-1 || + (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) +- return __builtin_strlen(p); ++ return __underlying_strlen(p); + ret = strnlen(p, p_size); + if (p_size <= ret) + fortify_panic(__func__); +@@ -299,7 +324,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) + __write_overflow(); + if (len >= p_size) + fortify_panic(__func__); +- __builtin_memcpy(p, q, len); ++ __underlying_memcpy(p, q, len); + p[len] = '\0'; + } + return ret; +@@ -312,12 +337,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) +- return __builtin_strncat(p, q, count); ++ return __underlying_strncat(p, q, count); + p_len = strlen(p); + copy_len = strnlen(q, count); + if (p_size < p_len + copy_len + 1) + fortify_panic(__func__); +- __builtin_memcpy(p + p_len, q, copy_len); ++ __underlying_memcpy(p + p_len, q, copy_len); + p[p_len + copy_len] = '\0'; + return p; + } +@@ -329,7 +354,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); +- return __builtin_memset(p, c, size); ++ return __underlying_memset(p, c, size); + } + + __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) +@@ -344,7 +369,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) + } + if (p_size < size || q_size < size) + fortify_panic(__func__); +- return __builtin_memcpy(p, q, size); ++ return __underlying_memcpy(p, q, size); + } + + __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) +@@ -359,7 +384,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) + } + if (p_size < size || q_size < size) + fortify_panic(__func__); +- return __builtin_memmove(p, q, size); ++ return __underlying_memmove(p, q, size); + } + + extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); +@@ -385,7 +410,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) + } + if (p_size < size || q_size < size) + fortify_panic(__func__); +- return __builtin_memcmp(p, q, size); ++ return __underlying_memcmp(p, q, size); + } + + __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) +@@ -395,7 +420,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); +- return __builtin_memchr(p, c, size); ++ return __underlying_memchr(p, c, size); + } + + void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); +@@ -426,11 +451,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) +- return __builtin_strcpy(p, q); ++ return __underlying_strcpy(p, q); + memcpy(p, q, strlen(q) + 1); + return p; + } + ++/* Don't use these outside the FORITFY_SOURCE implementation */ ++#undef __underlying_memchr ++#undef __underlying_memcmp ++#undef __underlying_memcpy ++#undef __underlying_memmove ++#undef __underlying_memset ++#undef __underlying_strcat ++#undef __underlying_strcpy ++#undef __underlying_strlen ++#undef __underlying_strncat ++#undef __underlying_strncpy + #endif + + /** +diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h +index 5ac5db4d295f..566d5f547567 100644 +--- a/include/linux/sunrpc/gss_api.h ++++ b/include/linux/sunrpc/gss_api.h +@@ -83,6 +83,7 @@ struct pf_desc { + u32 service; + char *name; + char *auth_domain_name; ++ struct auth_domain *domain; + bool datatouch; + }; + +diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h +index a4528b26c8aa..d229d27ab19e 100644 +--- a/include/linux/sunrpc/svcauth_gss.h ++++ b/include/linux/sunrpc/svcauth_gss.h +@@ -21,7 +21,8 @@ int gss_svc_init(void); + void gss_svc_shutdown(void); + int gss_svc_init_net(struct net *net); + void gss_svc_shutdown_net(struct net *net); +-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); ++struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, ++ char *name); + u32 svcauth_gss_flavor(struct auth_domain *dom); + + #endif /* __KERNEL__ */ +diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h +index 251e655d407f..ec4f0053d6d8 100644 +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h +@@ -267,7 +267,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); + probe_kernel_read(&retval, addr, sizeof(retval)) + + #ifndef user_access_begin +-#define user_access_begin() do { } while (0) ++#define user_access_begin(type, ptr, len) access_ok(type, ptr, len) + #define user_access_end() do { } while (0) + #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) + #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) +diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h +index 960bedbdec87..77f0f0af3a71 100644 +--- a/include/linux/vga_switcheroo.h ++++ b/include/linux/vga_switcheroo.h +@@ -168,11 +168,8 @@ int vga_switcheroo_process_delayed_switch(void); + bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev); + enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); + +-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); +- + int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); + void vga_switcheroo_fini_domain_pm_ops(struct device *dev); +-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain); + #else + + static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} +@@ -192,11 +189,8 @@ static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } + static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; } + static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } + +-static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} +- + static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } + static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {} +-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } + + #endif + #endif /* _LINUX_VGA_SWITCHEROO_H_ */ +diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h +index 926ea701cdc4..5d0bf1688eba 100644 +--- a/include/sound/hdaudio.h ++++ b/include/sound/hdaudio.h +@@ -228,9 +228,6 @@ struct hdac_io_ops { + #define HDA_UNSOL_QUEUE_SIZE 64 + #define HDA_MAX_CODECS 8 /* limit by controller side */ + +-/* HD Audio class code */ +-#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 +- + /* + * CORB/RIRB + * +diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h +index 27c62abb6c9e..efe8873943f6 100644 +--- a/include/uapi/linux/kvm.h ++++ b/include/uapi/linux/kvm.h +@@ -189,9 +189,11 @@ struct kvm_hyperv_exit { + #define KVM_EXIT_HYPERV_SYNIC 1 + #define KVM_EXIT_HYPERV_HCALL 2 + __u32 type; ++ __u32 pad1; + union { + struct { + __u32 msr; ++ __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; +diff --git a/kernel/audit.c b/kernel/audit.c +index aa6d5e39526b..6faaa908544a 100644 +--- a/kernel/audit.c ++++ b/kernel/audit.c +@@ -853,7 +853,7 @@ main_queue: + return 0; + } + +-int audit_send_list(void *_dest) ++int audit_send_list_thread(void *_dest) + { + struct audit_netlink_list *dest = _dest; + struct sk_buff *skb; +@@ -897,19 +897,30 @@ out_kfree_skb: + return NULL; + } + ++static void audit_free_reply(struct audit_reply *reply) ++{ ++ if (!reply) ++ return; ++ ++ if (reply->skb) ++ kfree_skb(reply->skb); ++ if (reply->net) ++ put_net(reply->net); ++ kfree(reply); ++} ++ + static int audit_send_reply_thread(void *arg) + { + struct audit_reply *reply = (struct audit_reply *)arg; +- struct sock *sk = audit_get_sk(reply->net); + + mutex_lock(&audit_cmd_mutex); + mutex_unlock(&audit_cmd_mutex); + + /* Ignore failure. It'll only happen if the sender goes away, + because our timeout is set to infinite. */ +- netlink_unicast(sk, reply->skb, reply->portid, 0); +- put_net(reply->net); +- kfree(reply); ++ netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0); ++ reply->skb = NULL; ++ audit_free_reply(reply); + return 0; + } + +@@ -923,35 +934,32 @@ static int audit_send_reply_thread(void *arg) + * @payload: payload data + * @size: payload size + * +- * Allocates an skb, builds the netlink message, and sends it to the port id. +- * No failure notifications. ++ * Allocates a skb, builds the netlink message, and sends it to the port id. + */ + static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, + int multi, const void *payload, int size) + { +- struct net *net = sock_net(NETLINK_CB(request_skb).sk); +- struct sk_buff *skb; + struct task_struct *tsk; +- struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), +- GFP_KERNEL); ++ struct audit_reply *reply; + ++ reply = kzalloc(sizeof(*reply), GFP_KERNEL); + if (!reply) + return; + +- skb = audit_make_reply(seq, type, done, multi, payload, size); +- if (!skb) +- goto out; +- +- reply->net = get_net(net); ++ reply->skb = audit_make_reply(seq, type, done, multi, payload, size); ++ if (!reply->skb) ++ goto err; ++ reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); + reply->portid = NETLINK_CB(request_skb).portid; +- reply->skb = skb; + + tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); +- if (!IS_ERR(tsk)) +- return; +- kfree_skb(skb); +-out: +- kfree(reply); ++ if (IS_ERR(tsk)) ++ goto err; ++ ++ return; ++ ++err: ++ audit_free_reply(reply); + } + + /* +diff --git a/kernel/audit.h b/kernel/audit.h +index 9b110ae17ee3..1007773b0b81 100644 +--- a/kernel/audit.h ++++ b/kernel/audit.h +@@ -248,7 +248,7 @@ struct audit_netlink_list { + struct sk_buff_head q; + }; + +-int audit_send_list(void *_dest); ++int audit_send_list_thread(void *_dest); + + extern int selinux_audit_rule_update(void); + +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c +index 16cf396ea738..f26f4cb5d08d 100644 +--- a/kernel/auditfilter.c ++++ b/kernel/auditfilter.c +@@ -1137,11 +1137,8 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz) + */ + int audit_list_rules_send(struct sk_buff *request_skb, int seq) + { +- u32 portid = NETLINK_CB(request_skb).portid; +- struct net *net = sock_net(NETLINK_CB(request_skb).sk); + struct task_struct *tsk; + struct audit_netlink_list *dest; +- int err = 0; + + /* We can't just spew out the rules here because we might fill + * the available socket buffer space and deadlock waiting for +@@ -1149,25 +1146,26 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq) + * happen if we're actually running in the context of auditctl + * trying to _send_ the stuff */ + +- dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); ++ dest = kmalloc(sizeof(*dest), GFP_KERNEL); + if (!dest) + return -ENOMEM; +- dest->net = get_net(net); +- dest->portid = portid; ++ dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); ++ dest->portid = NETLINK_CB(request_skb).portid; + skb_queue_head_init(&dest->q); + + mutex_lock(&audit_filter_mutex); + audit_list_rules(seq, &dest->q); + mutex_unlock(&audit_filter_mutex); + +- tsk = kthread_run(audit_send_list, dest, "audit_send_list"); ++ tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list"); + if (IS_ERR(tsk)) { + skb_queue_purge(&dest->q); ++ put_net(dest->net); + kfree(dest); +- err = PTR_ERR(tsk); ++ return PTR_ERR(tsk); + } + +- return err; ++ return 0; + } + + int audit_comparator(u32 left, u32 op, u32 right) +diff --git a/kernel/compat.c b/kernel/compat.c +index 7e83733d4c95..45ae3ace49c2 100644 +--- a/kernel/compat.c ++++ b/kernel/compat.c +@@ -437,10 +437,9 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, + bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); + nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); + +- if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) ++ if (!user_access_begin(VERIFY_READ, umask, bitmap_size / 8)) + return -EFAULT; + +- user_access_begin(); + while (nr_compat_longs > 1) { + compat_ulong_t l1, l2; + unsafe_get_user(l1, umask++, Efault); +@@ -467,10 +466,9 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, + bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); + nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); + +- if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) ++ if (!user_access_begin(VERIFY_WRITE, umask, bitmap_size / 8)) + return -EFAULT; + +- user_access_begin(); + while (nr_compat_longs > 1) { + unsigned long m = *mask++; + unsafe_put_user((compat_ulong_t)m, umask++, Efault); +diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c +index 67b02e138a47..2ed6351e2a7e 100644 +--- a/kernel/cpu_pm.c ++++ b/kernel/cpu_pm.c +@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); + */ + int cpu_pm_enter(void) + { +- int nr_calls; ++ int nr_calls = 0; + int ret = 0; + + ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); +@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); + */ + int cpu_cluster_pm_enter(void) + { +- int nr_calls; ++ int nr_calls = 0; + int ret = 0; + + ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); +diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c +index 94aa9ae0007a..159a53ff2716 100644 +--- a/kernel/debug/debug_core.c ++++ b/kernel/debug/debug_core.c +@@ -444,6 +444,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) + + if (exception_level > 1) { + dump_stack(); ++ kgdb_io_module_registered = false; + panic("Recursive entry to debugger"); + } + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 5636c9c48545..e50b140053f9 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -94,11 +94,11 @@ static void remote_function(void *data) + * @info: the function call argument + * + * Calls the function @func when the task is currently running. This might +- * be on the current CPU, which just calls the function directly ++ * be on the current CPU, which just calls the function directly. This will ++ * retry due to any failures in smp_call_function_single(), such as if the ++ * task_cpu() goes offline concurrently. + * +- * returns: @func return value, or +- * -ESRCH - when the process isn't running +- * -EAGAIN - when the process moved away ++ * returns @func return value or -ESRCH when the process isn't running + */ + static int + task_function_call(struct task_struct *p, remote_function_f func, void *info) +@@ -111,11 +111,16 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) + }; + int ret; + +- do { +- ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); +- if (!ret) +- ret = data.ret; +- } while (ret == -EAGAIN); ++ for (;;) { ++ ret = smp_call_function_single(task_cpu(p), remote_function, ++ &data, 1); ++ ret = !ret ? data.ret : -EAGAIN; ++ ++ if (ret != -EAGAIN) ++ break; ++ ++ cond_resched(); ++ } + + return ret; + } +diff --git a/kernel/exit.c b/kernel/exit.c +index d1baf9c96c3e..7a7984d7a4d8 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -770,8 +770,12 @@ void __noreturn do_exit(long code) + struct task_struct *tsk = current; + int group_dead; + +- profile_task_exit(tsk); +- kcov_task_exit(tsk); ++ /* ++ * We can get here from a kernel oops, sometimes with preemption off. ++ * Start by checking for critical errors. ++ * Then fix up important state like USER_DS and preemption. ++ * Then do everything else. ++ */ + + WARN_ON(blk_needs_flush_plug(tsk)); + +@@ -789,6 +793,16 @@ void __noreturn do_exit(long code) + */ + set_fs(USER_DS); + ++ if (unlikely(in_atomic())) { ++ pr_info("note: %s[%d] exited with preempt_count %d\n", ++ current->comm, task_pid_nr(current), ++ preempt_count()); ++ preempt_count_set(PREEMPT_ENABLED); ++ } ++ ++ profile_task_exit(tsk); ++ kcov_task_exit(tsk); ++ + ptrace_event(PTRACE_EVENT_EXIT, code); + + validate_creds_for_do_exit(tsk); +@@ -806,13 +820,6 @@ void __noreturn do_exit(long code) + + exit_signals(tsk); /* sets PF_EXITING */ + +- if (unlikely(in_atomic())) { +- pr_info("note: %s[%d] exited with preempt_count %d\n", +- current->comm, task_pid_nr(current), +- preempt_count()); +- preempt_count_set(PREEMPT_ENABLED); +- } +- + /* sync mm's RSS info before statistics gathering */ + if (tsk->mm) + sync_mm_rss(tsk->mm); +@@ -1597,10 +1604,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, + if (!infop) + return err; + +- if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) ++ if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop))) + return -EFAULT; + +- user_access_begin(); + unsafe_put_user(signo, &infop->si_signo, Efault); + unsafe_put_user(0, &infop->si_errno, Efault); + unsafe_put_user(info.cause, &infop->si_code, Efault); +@@ -1725,10 +1731,9 @@ COMPAT_SYSCALL_DEFINE5(waitid, + if (!infop) + return err; + +- if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) ++ if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop))) + return -EFAULT; + +- user_access_begin(); + unsafe_put_user(signo, &infop->si_signo, Efault); + unsafe_put_user(0, &infop->si_errno, Efault); + unsafe_put_user(info.cause, &infop->si_code, Efault); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 0b4e997fea1a..4d8add44fffb 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -2643,7 +2643,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) + /* + * We don't care about NUMA placement if we don't have memory. + */ +- if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) ++ if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) + return; + + /* +diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h +index e01b705556aa..6c5229f98c9e 100644 +--- a/lib/mpi/longlong.h ++++ b/lib/mpi/longlong.h +@@ -671,7 +671,7 @@ do { \ + ************** MIPS/64 ************** + ***************************************/ + #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 +-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 ++#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) + /* + * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C + * code below, so we special case MIPS64r6 until the compiler can do better. +diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c +index e304b54c9c7d..fc5b1e2d997d 100644 +--- a/lib/strncpy_from_user.c ++++ b/lib/strncpy_from_user.c +@@ -29,13 +29,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + unsigned long res = 0; + +- /* +- * Truncate 'max' to the user-specified limit, so that +- * we only have one limit we need to check in the loop +- */ +- if (max > count) +- max = count; +- + if (IS_UNALIGNED(src, dst)) + goto byte_at_a_time; + +@@ -113,12 +106,20 @@ long strncpy_from_user(char *dst, const char __user *src, long count) + unsigned long max = max_addr - src_addr; + long retval; + ++ /* ++ * Truncate 'max' to the user-specified limit, so that ++ * we only have one limit we need to check in the loop ++ */ ++ if (max > count) ++ max = count; ++ + kasan_check_write(dst, count); + check_object_size(dst, count, false); +- user_access_begin(); +- retval = do_strncpy_from_user(dst, src, count, max); +- user_access_end(); +- return retval; ++ if (user_access_begin(VERIFY_READ, src, max)) { ++ retval = do_strncpy_from_user(dst, src, count, max); ++ user_access_end(); ++ return retval; ++ } + } + return -EFAULT; + } +diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c +index 184f80f7bacf..0bf7c06ebdad 100644 +--- a/lib/strnlen_user.c ++++ b/lib/strnlen_user.c +@@ -31,13 +31,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, + unsigned long align, res = 0; + unsigned long c; + +- /* +- * Truncate 'max' to the user-specified limit, so that +- * we only have one limit we need to check in the loop +- */ +- if (max > count) +- max = count; +- + /* + * Do everything aligned. But that means that we + * need to also expand the maximum.. +@@ -114,10 +107,18 @@ long strnlen_user(const char __user *str, long count) + unsigned long max = max_addr - src_addr; + long retval; + +- user_access_begin(); +- retval = do_strnlen_user(str, count, max); +- user_access_end(); +- return retval; ++ /* ++ * Truncate 'max' to the user-specified limit, so that ++ * we only have one limit we need to check in the loop ++ */ ++ if (max > count) ++ max = count; ++ ++ if (user_access_begin(VERIFY_READ, str, max)) { ++ retval = do_strnlen_user(str, count, max); ++ user_access_end(); ++ return retval; ++ } + } + return 0; + } +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 283963032eff..9f3d4f84032b 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2193,6 +2193,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + spinlock_t *ptl; + struct mm_struct *mm = vma->vm_mm; + unsigned long haddr = address & HPAGE_PMD_MASK; ++ bool was_locked = false; ++ pmd_t _pmd; + + mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); + ptl = pmd_lock(mm, pmd); +@@ -2202,11 +2204,32 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + * pmd against. Otherwise we can end up replacing wrong page. + */ + VM_BUG_ON(freeze && !page); +- if (page && page != pmd_page(*pmd)) +- goto out; ++ if (page) { ++ VM_WARN_ON_ONCE(!PageLocked(page)); ++ was_locked = true; ++ if (page != pmd_page(*pmd)) ++ goto out; ++ } + ++repeat: + if (pmd_trans_huge(*pmd)) { +- page = pmd_page(*pmd); ++ if (!page) { ++ page = pmd_page(*pmd); ++ if (unlikely(!trylock_page(page))) { ++ get_page(page); ++ _pmd = *pmd; ++ spin_unlock(ptl); ++ lock_page(page); ++ spin_lock(ptl); ++ if (unlikely(!pmd_same(*pmd, _pmd))) { ++ unlock_page(page); ++ put_page(page); ++ page = NULL; ++ goto repeat; ++ } ++ put_page(page); ++ } ++ } + if (PageMlocked(page)) + clear_page_mlock(page); + } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) +@@ -2214,6 +2237,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + __split_huge_pmd_locked(vma, pmd, haddr, freeze); + out: + spin_unlock(ptl); ++ if (!was_locked && page) ++ unlock_page(page); + mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); + } + +diff --git a/mm/slub.c b/mm/slub.c +index 481518c3f61a..8807a0c98a67 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -5727,8 +5727,10 @@ static int sysfs_slab_add(struct kmem_cache *s) + + s->kobj.kset = kset; + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); +- if (err) ++ if (err) { ++ kobject_put(&s->kobj); + goto out; ++ } + + err = sysfs_create_group(&s->kobj, &slab_attr_group); + if (err) +diff --git a/mm/util.c b/mm/util.c +index 842ba5fb662e..f0d773c719a1 100644 +--- a/mm/util.c ++++ b/mm/util.c +@@ -417,6 +417,24 @@ void kvfree(const void *addr) + } + EXPORT_SYMBOL(kvfree); + ++/** ++ * kvfree_sensitive - Free a data object containing sensitive information. ++ * @addr: address of the data object to be freed. ++ * @len: length of the data object. ++ * ++ * Use the special memzero_explicit() function to clear the content of a ++ * kvmalloc'ed object containing sensitive data to make sure that the ++ * compiler won't optimize out the data clearing. ++ */ ++void kvfree_sensitive(const void *addr, size_t len) ++{ ++ if (likely(!ZERO_OR_NULL_PTR(addr))) { ++ memzero_explicit((void *)addr, len); ++ kvfree(addr); ++ } ++} ++EXPORT_SYMBOL(kvfree_sensitive); ++ + static inline void *__page_rmapping(struct page *page) + { + unsigned long mapping; +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 363dc85bbc5c..56e4ae7d7f63 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -3775,6 +3775,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, + case 0x11: /* Unsupported Feature or Parameter Value */ + case 0x1c: /* SCO interval rejected */ + case 0x1a: /* Unsupported Remote Feature */ ++ case 0x1e: /* Invalid LMP Parameters */ + case 0x1f: /* Unspecified error */ + case 0x20: /* Unsupported LMP Parameter value */ + if (conn->out) { +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c +index 337b43d4c3eb..c183222967d0 100644 +--- a/net/ipv6/ipv6_sockglue.c ++++ b/net/ipv6/ipv6_sockglue.c +@@ -185,14 +185,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, + retv = -EBUSY; + break; + } +- } +- if (sk->sk_protocol == IPPROTO_TCP && +- sk->sk_prot != &tcpv6_prot) { +- retv = -EBUSY; ++ } else if (sk->sk_protocol == IPPROTO_TCP) { ++ if (sk->sk_prot != &tcpv6_prot) { ++ retv = -EBUSY; ++ break; ++ } ++ } else { + break; + } +- if (sk->sk_protocol != IPPROTO_TCP) +- break; ++ + if (sk->sk_state != TCP_ESTABLISHED) { + retv = -ENOTCONN; + break; +diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c +index ed548d06b6dd..a18cceecef88 100644 +--- a/net/netfilter/nft_nat.c ++++ b/net/netfilter/nft_nat.c +@@ -135,7 +135,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + priv->type = NF_NAT_MANIP_DST; + break; + default: +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + if (tb[NFTA_NAT_FAMILY] == NULL) +@@ -202,7 +202,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + if (tb[NFTA_NAT_FLAGS]) { + priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); + if (priv->flags & ~NF_NAT_RANGE_MASK) +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + return nf_ct_netns_get(ctx->net, family); +diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c +index 5fec3abbe19b..c7d88f979c56 100644 +--- a/net/sunrpc/auth_gss/gss_mech_switch.c ++++ b/net/sunrpc/auth_gss/gss_mech_switch.c +@@ -61,6 +61,8 @@ gss_mech_free(struct gss_api_mech *gm) + + for (i = 0; i < gm->gm_pf_num; i++) { + pf = &gm->gm_pfs[i]; ++ if (pf->domain) ++ auth_domain_put(pf->domain); + kfree(pf->auth_domain_name); + pf->auth_domain_name = NULL; + } +@@ -83,6 +85,7 @@ make_auth_domain_name(char *name) + static int + gss_mech_svc_setup(struct gss_api_mech *gm) + { ++ struct auth_domain *dom; + struct pf_desc *pf; + int i, status; + +@@ -92,10 +95,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm) + status = -ENOMEM; + if (pf->auth_domain_name == NULL) + goto out; +- status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, +- pf->auth_domain_name); +- if (status) ++ dom = svcauth_gss_register_pseudoflavor( ++ pf->pseudoflavor, pf->auth_domain_name); ++ if (IS_ERR(dom)) { ++ status = PTR_ERR(dom); + goto out; ++ } ++ pf->domain = dom; + } + return 0; + out: +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index a457e7afb768..03043d5221e9 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -779,7 +779,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) + + EXPORT_SYMBOL_GPL(svcauth_gss_flavor); + +-int ++struct auth_domain * + svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) + { + struct gss_domain *new; +@@ -796,21 +796,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) + new->h.flavour = &svcauthops_gss; + new->pseudoflavor = pseudoflavor; + +- stat = 0; + test = auth_domain_lookup(name, &new->h); +- if (test != &new->h) { /* Duplicate registration */ ++ if (test != &new->h) { ++ pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n", ++ name); ++ stat = -EADDRINUSE; + auth_domain_put(test); +- kfree(new->h.name); +- goto out_free_dom; ++ goto out_free_name; + } +- return 0; ++ return test; + ++out_free_name: ++ kfree(new->h.name); + out_free_dom: + kfree(new); + out: +- return stat; ++ return ERR_PTR(stat); + } +- + EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); + + static inline int +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index 73791d22ae07..d5843cfa83e7 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -240,7 +240,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, + + /* Portable EVM signatures must include an IMA hash */ + if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present) +- return -EPERM; ++ error = -EPERM; + out: + kfree(xattr_value); + kfree(desc); +diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h +index d52b487ad259..186a3158edef 100644 +--- a/security/integrity/ima/ima.h ++++ b/security/integrity/ima/ima.h +@@ -40,7 +40,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; + #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE + #define IMA_EVENT_NAME_LEN_MAX 255 + +-#define IMA_HASH_BITS 9 ++#define IMA_HASH_BITS 10 + #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) + + #define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 +@@ -167,9 +167,10 @@ struct ima_h_table { + }; + extern struct ima_h_table ima_htable; + +-static inline unsigned long ima_hash_key(u8 *digest) ++static inline unsigned int ima_hash_key(u8 *digest) + { +- return hash_long(*digest, IMA_HASH_BITS); ++ /* there is no point in taking a hash of part of a digest */ ++ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE; + } + + #define __ima_hooks(hook) \ +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c +index 4c160bee67f7..46b0bd6b3d62 100644 +--- a/security/integrity/ima/ima_policy.c ++++ b/security/integrity/ima/ima_policy.c +@@ -170,7 +170,7 @@ static struct ima_rule_entry secure_boot_rules[] __ro_after_init = { + static LIST_HEAD(ima_default_rules); + static LIST_HEAD(ima_policy_rules); + static LIST_HEAD(ima_temp_rules); +-static struct list_head *ima_rules; ++static struct list_head *ima_rules = &ima_default_rules; + + static int ima_policy __initdata; + +@@ -468,7 +468,6 @@ void __init ima_init_policy(void) + temp_ima_appraise |= IMA_APPRAISE_POLICY; + } + +- ima_rules = &ima_default_rules; + ima_update_policy_flag(); + } + +diff --git a/security/keys/internal.h b/security/keys/internal.h +index 124273e500cf..d479ca71137e 100644 +--- a/security/keys/internal.h ++++ b/security/keys/internal.h +@@ -306,15 +306,4 @@ static inline void key_check(const struct key *key) + #define key_check(key) do {} while(0) + + #endif +- +-/* +- * Helper function to clear and free a kvmalloc'ed memory object. +- */ +-static inline void __kvzfree(const void *addr, size_t len) +-{ +- if (addr) { +- memset((void *)addr, 0, len); +- kvfree(addr); +- } +-} + #endif /* _INTERNAL_H */ +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index c07c2e2b2478..9394d72a77e8 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -133,10 +133,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, + + key_ref_put(keyring_ref); + error3: +- if (payload) { +- memzero_explicit(payload, plen); +- kvfree(payload); +- } ++ kvfree_sensitive(payload, plen); + error2: + kfree(description); + error: +@@ -351,7 +348,7 @@ long keyctl_update_key(key_serial_t id, + + key_ref_put(key_ref); + error2: +- __kvzfree(payload, plen); ++ kvfree_sensitive(payload, plen); + error: + return ret; + } +@@ -859,7 +856,7 @@ can_read_key: + */ + if (ret > key_data_len) { + if (unlikely(key_data)) +- __kvzfree(key_data, key_data_len); ++ kvfree_sensitive(key_data, key_data_len); + key_data_len = ret; + continue; /* Allocate buffer */ + } +@@ -868,7 +865,7 @@ can_read_key: + ret = -EFAULT; + break; + } +- __kvzfree(key_data, key_data_len); ++ kvfree_sensitive(key_data, key_data_len); + + key_put_out: + key_put(key); +@@ -1170,10 +1167,7 @@ long keyctl_instantiate_key_common(key_serial_t id, + keyctl_change_reqkey_auth(NULL); + + error2: +- if (payload) { +- memzero_explicit(payload, plen); +- kvfree(payload); +- } ++ kvfree_sensitive(payload, plen); + error: + return ret; + } +diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c +index f6482e53d55a..371ae368da35 100644 +--- a/security/smack/smackfs.c ++++ b/security/smack/smackfs.c +@@ -906,11 +906,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + else + rule += strlen(skp->smk_known) + 1; + ++ if (rule > data + count) { ++ rc = -EOVERFLOW; ++ goto out; ++ } ++ + ret = sscanf(rule, "%d", &maplevel); + if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) + goto out; + + rule += SMK_DIGITLEN; ++ if (rule > data + count) { ++ rc = -EOVERFLOW; ++ goto out; ++ } ++ + ret = sscanf(rule, "%d", &catlen); + if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM) + goto out; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 14b1ee29509d..071e09c3d855 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -1950,6 +1950,11 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) + } + pcm_file = f.file->private_data; + substream1 = pcm_file->substream; ++ if (substream == substream1) { ++ res = -EINVAL; ++ goto _badf; ++ } ++ + group = kmalloc(sizeof(*group), GFP_KERNEL); + if (!group) { + res = -ENOMEM; +diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c +index a826c138e7f5..8a58ed168756 100644 +--- a/sound/isa/es1688/es1688.c ++++ b/sound/isa/es1688/es1688.c +@@ -284,8 +284,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard, + return error; + } + error = snd_es1688_probe(card, dev); +- if (error < 0) ++ if (error < 0) { ++ snd_card_free(card); + return error; ++ } + pnp_set_card_drvdata(pcard, card); + snd_es968_pnp_is_probed = 1; + return 0; +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 7779f5460715..e399c5718ee6 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1282,6 +1282,7 @@ static void azx_vs_set_state(struct pci_dev *pci, + struct snd_card *card = pci_get_drvdata(pci); + struct azx *chip = card->private_data; + struct hda_intel *hda = container_of(chip, struct hda_intel, chip); ++ struct hda_codec *codec; + bool disabled; + + wait_for_completion(&hda->probe_wait); +@@ -1306,8 +1307,12 @@ static void azx_vs_set_state(struct pci_dev *pci, + dev_info(chip->card->dev, "%s via vga_switcheroo\n", + disabled ? "Disabling" : "Enabling"); + if (disabled) { +- pm_runtime_put_sync_suspend(card->dev); +- azx_suspend(card->dev); ++ list_for_each_codec(codec, &chip->bus) { ++ pm_runtime_suspend(hda_codec_dev(codec)); ++ pm_runtime_disable(hda_codec_dev(codec)); ++ } ++ pm_runtime_suspend(card->dev); ++ pm_runtime_disable(card->dev); + /* when we get suspended by vga_switcheroo we end up in D3cold, + * however we have no ACPI handle, so pci/acpi can't put us there, + * put ourselves there */ +@@ -1318,9 +1323,12 @@ static void azx_vs_set_state(struct pci_dev *pci, + "Cannot lock devices!\n"); + } else { + snd_hda_unlock_devices(&chip->bus); +- pm_runtime_get_noresume(card->dev); + chip->disabled = false; +- azx_resume(card->dev); ++ pm_runtime_enable(card->dev); ++ list_for_each_codec(codec, &chip->bus) { ++ pm_runtime_enable(hda_codec_dev(codec)); ++ pm_runtime_resume(hda_codec_dev(codec)); ++ } + } + } + } +@@ -1350,6 +1358,7 @@ static void init_vga_switcheroo(struct azx *chip) + dev_info(chip->card->dev, + "Handle vga_switcheroo audio client\n"); + hda->use_vga_switcheroo = 1; ++ chip->driver_caps |= AZX_DCAPS_PM_RUNTIME; + pci_dev_put(p); + } + } +@@ -1375,9 +1384,6 @@ static int register_vga_switcheroo(struct azx *chip) + return err; + hda->vga_switcheroo_registered = 1; + +- /* register as an optimus hdmi audio power domain */ +- vga_switcheroo_init_domain_pm_optimus_hdmi_audio(chip->card->dev, +- &hda->hdmi_pm_domain); + return 0; + } + #else +@@ -1406,10 +1412,8 @@ static int azx_free(struct azx *chip) + if (use_vga_switcheroo(hda)) { + if (chip->disabled && hda->probe_continued) + snd_hda_unlock_devices(&chip->bus); +- if (hda->vga_switcheroo_registered) { ++ if (hda->vga_switcheroo_registered) + vga_switcheroo_unregister_client(chip->pci); +- vga_switcheroo_fini_domain_pm_ops(chip->card->dev); +- } + } + + if (bus->chip_init) { +@@ -2301,6 +2305,7 @@ static int azx_probe_continue(struct azx *chip) + struct hda_intel *hda = container_of(chip, struct hda_intel, chip); + struct hdac_bus *bus = azx_bus(chip); + struct pci_dev *pci = chip->pci; ++ struct hda_codec *codec; + int dev = chip->dev_index; + int val; + int err; +@@ -2385,6 +2390,14 @@ static int azx_probe_continue(struct azx *chip) + chip->running = 1; + azx_add_card_list(chip); + ++ /* ++ * The discrete GPU cannot power down unless the HDA controller runtime ++ * suspends, so activate runtime PM on codecs even if power_save == 0. ++ */ ++ if (use_vga_switcheroo(hda)) ++ list_for_each_codec(codec, &chip->bus) ++ codec->auto_runtime_pm = 1; ++ + val = power_save; + #ifdef CONFIG_PM + if (pm_blacklist) { +@@ -2399,7 +2412,7 @@ static int azx_probe_continue(struct azx *chip) + } + #endif /* CONFIG_PM */ + snd_hda_set_power_save(&chip->bus, val * 1000); +- if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo) ++ if (azx_has_pm_runtime(chip)) + pm_runtime_put_autosuspend(&pci->dev); + + out_free: +diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h +index ff0c4d617bc1..e3a3d318d2e5 100644 +--- a/sound/pci/hda/hda_intel.h ++++ b/sound/pci/hda/hda_intel.h +@@ -40,9 +40,6 @@ struct hda_intel { + unsigned int vga_switcheroo_registered:1; + unsigned int init_failed:1; /* delayed init failed */ + +- /* secondary power domain for hdmi audio under vga device */ +- struct dev_pm_domain hdmi_pm_domain; +- + bool need_i915_power:1; /* the hda controller needs i915 power */ + }; + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 5be3c926364e..98110fd65b9b 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -7131,6 +7131,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + ALC225_STANDARD_PINS, + {0x12, 0xb7a60130}, + {0x17, 0x90170110}), ++ SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC, ++ {0x14, 0x01014010}, ++ {0x17, 0x90170120}, ++ {0x18, 0x02a11030}, ++ {0x19, 0x02a1103f}, ++ {0x21, 0x0221101f}), + {} + }; + +diff --git a/sound/usb/card.c b/sound/usb/card.c +index 4169c71f8a32..721f91f5766d 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -768,9 +768,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) + if (chip == (void *)-1L) + return 0; + +- chip->autosuspended = !!PMSG_IS_AUTO(message); +- if (!chip->autosuspended) +- snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); + if (!chip->num_suspended_intf++) { + list_for_each_entry(as, &chip->pcm_list, list) { + snd_pcm_suspend_all(as->pcm); +@@ -783,6 +780,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) + snd_usb_mixer_suspend(mixer); + } + ++ if (!PMSG_IS_AUTO(message) && !chip->system_suspend) { ++ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); ++ chip->system_suspend = chip->num_suspended_intf; ++ } ++ + return 0; + } + +@@ -795,10 +797,11 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) + + if (chip == (void *)-1L) + return 0; +- if (--chip->num_suspended_intf) +- return 0; + + atomic_inc(&chip->active); /* avoid autopm */ ++ if (chip->num_suspended_intf > 1) ++ goto out; ++ + /* + * ALSA leaves material resumption to user space + * we just notify and restart the mixers +@@ -813,9 +816,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) + snd_usbmidi_resume(p); + } + +- if (!chip->autosuspended) ++ out: ++ if (chip->num_suspended_intf == chip->system_suspend) { + snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); +- chip->autosuspended = 0; ++ chip->system_suspend = 0; ++ } ++ chip->num_suspended_intf--; + + err_out: + atomic_dec(&chip->active); /* allow autopm after this point */ +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h +index 4d5c89a7ba2b..f4ee83c8e0b2 100644 +--- a/sound/usb/usbaudio.h ++++ b/sound/usb/usbaudio.h +@@ -37,7 +37,7 @@ struct snd_usb_audio { + struct usb_interface *pm_intf; + u32 usb_id; + struct mutex mutex; +- unsigned int autosuspended:1; ++ unsigned int system_suspend; + atomic_t active; + atomic_t shutdown; + atomic_t usage_count; +diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c +index 45b50b89009a..c61841051a90 100644 +--- a/tools/lib/api/fs/fs.c ++++ b/tools/lib/api/fs/fs.c +@@ -90,6 +90,7 @@ struct fs { + const char * const *mounts; + char path[PATH_MAX]; + bool found; ++ bool checked; + long magic; + }; + +@@ -111,31 +112,37 @@ static struct fs fs__entries[] = { + .name = "sysfs", + .mounts = sysfs__fs_known_mountpoints, + .magic = SYSFS_MAGIC, ++ .checked = false, + }, + [FS__PROCFS] = { + .name = "proc", + .mounts = procfs__known_mountpoints, + .magic = PROC_SUPER_MAGIC, ++ .checked = false, + }, + [FS__DEBUGFS] = { + .name = "debugfs", + .mounts = debugfs__known_mountpoints, + .magic = DEBUGFS_MAGIC, ++ .checked = false, + }, + [FS__TRACEFS] = { + .name = "tracefs", + .mounts = tracefs__known_mountpoints, + .magic = TRACEFS_MAGIC, ++ .checked = false, + }, + [FS__HUGETLBFS] = { + .name = "hugetlbfs", + .mounts = hugetlbfs__known_mountpoints, + .magic = HUGETLBFS_MAGIC, ++ .checked = false, + }, + [FS__BPF_FS] = { + .name = "bpf", + .mounts = bpf_fs__known_mountpoints, + .magic = BPF_FS_MAGIC, ++ .checked = false, + }, + }; + +@@ -158,6 +165,7 @@ static bool fs__read_mounts(struct fs *fs) + } + + fclose(fp); ++ fs->checked = true; + return fs->found = found; + } + +@@ -220,6 +228,7 @@ static bool fs__env_override(struct fs *fs) + return false; + + fs->found = true; ++ fs->checked = true; + strncpy(fs->path, override_path, sizeof(fs->path) - 1); + fs->path[sizeof(fs->path) - 1] = '\0'; + return true; +@@ -246,6 +255,14 @@ static const char *fs__mountpoint(int idx) + if (fs->found) + return (const char *)fs->path; + ++ /* the mount point was already checked for the mount point ++ * but and did not exist, so return NULL to avoid scanning again. ++ * This makes the found and not found paths cost equivalent ++ * in case of multiple calls. ++ */ ++ if (fs->checked) ++ return NULL; ++ + return fs__get_mountpoint(fs); + } + +diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h +index dda49deefb52..57a3dc160b08 100644 +--- a/tools/lib/api/fs/fs.h ++++ b/tools/lib/api/fs/fs.h +@@ -18,6 +18,18 @@ + const char *name##__mount(void); \ + bool name##__configured(void); \ + ++/* ++ * The xxxx__mountpoint() entry points find the first match mount point for each ++ * filesystems listed below, where xxxx is the filesystem type. ++ * ++ * The interface is as follows: ++ * ++ * - If a mount point is found on first call, it is cached and used for all ++ * subsequent calls. ++ * ++ * - If a mount point is not found, NULL is returned on first call and all ++ * subsequent calls. ++ */ + FS(sysfs) + FS(procfs) + FS(debugfs) +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index 5685fe2c7a7d..247fbb5f6a38 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -778,6 +778,12 @@ static int add_special_section_alts(struct objtool_file *file) + } + + if (special_alt->group) { ++ if (!special_alt->orig_len) { ++ WARN_FUNC("empty alternative entry", ++ orig_insn->sec, orig_insn->offset); ++ continue; ++ } ++ + ret = handle_group_alt(file, special_alt, orig_insn, + &new_insn); + if (ret) +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c +index e1ac51aaedcf..48d40b12d581 100644 +--- a/tools/perf/builtin-probe.c ++++ b/tools/perf/builtin-probe.c +@@ -377,6 +377,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs) + + for (k = 0; k < pev->ntevs; k++) { + struct probe_trace_event *tev = &pev->tevs[k]; ++ /* Skipped events have no event name */ ++ if (!tev->event) ++ continue; + + /* We use tev's name for showing new events */ + show_perf_probe_event(tev->group, tev->event, pev, +diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c +index 505c13bf7e30..40b5f656ebc3 100644 +--- a/tools/perf/util/dso.c ++++ b/tools/perf/util/dso.c +@@ -36,6 +36,7 @@ char dso__symtab_origin(const struct dso *dso) + [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', + [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', + [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', ++ [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', + [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', + [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', + [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', +@@ -118,6 +119,21 @@ int dso__read_binary_type_filename(const struct dso *dso, + snprintf(filename + len, size - len, "%s", dso->long_name); + break; + ++ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: ++ /* ++ * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in ++ * /usr/lib/debug/lib when it is expected to be in ++ * /usr/lib/debug/usr/lib ++ */ ++ if (strlen(dso->long_name) < 9 || ++ strncmp(dso->long_name, "/usr/lib/", 9)) { ++ ret = -1; ++ break; ++ } ++ len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); ++ snprintf(filename + len, size - len, "%s", dso->long_name + 4); ++ break; ++ + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + { + const char *last_slash; +diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h +index 926ff2e7f668..ea198d19dadd 100644 +--- a/tools/perf/util/dso.h ++++ b/tools/perf/util/dso.h +@@ -25,6 +25,7 @@ enum dso_binary_type { + DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__GUEST_KMODULE, +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c +index 6670e12a2bb3..7c286756c34b 100644 +--- a/tools/perf/util/probe-event.c ++++ b/tools/perf/util/probe-event.c +@@ -122,7 +122,7 @@ static struct symbol *__find_kernel_function(u64 addr, struct map **mapp) + return machine__find_kernel_function(host_machine, addr, mapp); + } + +-static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) ++static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap) + { + /* kmap->ref_reloc_sym should be set if host_machine is initialized */ + struct kmap *kmap; +@@ -134,6 +134,10 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) + kmap = map__kmap(map); + if (!kmap) + return NULL; ++ ++ if (pmap) ++ *pmap = map; ++ + return kmap->ref_reloc_sym; + } + +@@ -145,7 +149,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, + struct map *map; + + /* ref_reloc_sym is just a label. Need a special fix*/ +- reloc_sym = kernel_get_ref_reloc_sym(); ++ reloc_sym = kernel_get_ref_reloc_sym(NULL); + if (reloc_sym && strcmp(name, reloc_sym->name) == 0) + *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; + else { +@@ -764,6 +768,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + int ntevs) + { + struct ref_reloc_sym *reloc_sym; ++ struct map *map; + char *tmp; + int i, skipped = 0; + +@@ -772,7 +777,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + return post_process_offline_probe_trace_events(tevs, ntevs, + symbol_conf.vmlinux_name); + +- reloc_sym = kernel_get_ref_reloc_sym(); ++ reloc_sym = kernel_get_ref_reloc_sym(&map); + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + return -EINVAL; +@@ -783,9 +788,13 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + continue; + if (tevs[i].point.retprobe && !kretprobe_offset_is_supported()) + continue; +- /* If we found a wrong one, mark it by NULL symbol */ ++ /* ++ * If we found a wrong one, mark it by NULL symbol. ++ * Since addresses in debuginfo is same as objdump, we need ++ * to convert it to addresses on memory. ++ */ + if (kprobe_warn_out_range(tevs[i].point.symbol, +- tevs[i].point.address)) { ++ map__objdump_2mem(map, tevs[i].point.address))) { + tmp = NULL; + skipped++; + } else { +@@ -1762,8 +1771,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev) + fmt1_str = strtok_r(argv0_str, ":", &fmt); + fmt2_str = strtok_r(NULL, "/", &fmt); + fmt3_str = strtok_r(NULL, " \t", &fmt); +- if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL +- || fmt3_str == NULL) { ++ if (fmt1_str == NULL || fmt2_str == NULL || fmt3_str == NULL) { + semantic_error("Failed to parse event name: %s\n", argv[0]); + ret = -EINVAL; + goto out; +@@ -2888,7 +2896,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, + /* Note that the symbols in the kmodule are not relocated */ + if (!pev->uprobes && !pev->target && + (!pp->retprobe || kretprobe_offset_is_supported())) { +- reloc_sym = kernel_get_ref_reloc_sym(); ++ reloc_sym = kernel_get_ref_reloc_sym(NULL); + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + ret = -EINVAL; +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c +index ae0feea4e8b5..8f7f9d05f38c 100644 +--- a/tools/perf/util/probe-finder.c ++++ b/tools/perf/util/probe-finder.c +@@ -114,6 +114,7 @@ enum dso_binary_type distro_dwarf_types[] = { + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c +index 27bffcb213eb..dea6f15af485 100644 +--- a/tools/perf/util/symbol.c ++++ b/tools/perf/util/symbol.c +@@ -64,6 +64,7 @@ static enum dso_binary_type binary_type_symtab[] = { + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + +@@ -1412,6 +1413,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, + case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: + case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: + case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: ++ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: + case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + return !kmod && dso->kernel == DSO_TYPE_USER; +diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c +index dd4162fc0419..7a573fb4c1c4 100644 +--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c ++++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c +@@ -114,6 +114,7 @@ static struct option long_options[] = { + { "tcp", no_argument, 0, 't' }, + { "udp", no_argument, 0, 'u' }, + { "ip", no_argument, 0, 'i' }, ++ { NULL, 0, NULL, 0 }, + }; + + static int next_port = 19999; diff --git a/patch/kernel/odroidxu4-legacy/patch-4.14.185-186.patch b/patch/kernel/odroidxu4-legacy/patch-4.14.185-186.patch new file mode 100644 index 000000000..5144cc9ad --- /dev/null +++ b/patch/kernel/odroidxu4-legacy/patch-4.14.185-186.patch @@ -0,0 +1,4619 @@ +diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst +index 2a5191b6d445..15449334b124 100644 +--- a/Documentation/driver-api/mtdnand.rst ++++ b/Documentation/driver-api/mtdnand.rst +@@ -277,7 +277,7 @@ unregisters the partitions in the MTD layer. + static void __exit board_cleanup (void) + { + /* Release resources, unregister device */ +- nand_release (board_mtd); ++ nand_release (mtd_to_nand(board_mtd)); + + /* unmap physical address */ + iounmap(baseaddr); +diff --git a/Makefile b/Makefile +index 04d63a6b4f46..5152fefccab5 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 185 ++SUBLEVEL = 186 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig +index cefe44f6889b..ba124f8704fa 100644 +--- a/arch/arm/mach-integrator/Kconfig ++++ b/arch/arm/mach-integrator/Kconfig +@@ -3,6 +3,8 @@ menuconfig ARCH_INTEGRATOR + depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6 + select ARM_AMBA + select COMMON_CLK_VERSATILE ++ select CMA ++ select DMA_CMA + select HAVE_TCM + select ICST + select MFD_SYSCON +@@ -34,14 +36,13 @@ config INTEGRATOR_IMPD1 + select ARM_VIC + select GPIO_PL061 + select GPIOLIB ++ select REGULATOR ++ select REGULATOR_FIXED_VOLTAGE + help + The IM-PD1 is an add-on logic module for the Integrator which + allows ARM(R) Ltd PrimeCells to be developed and evaluated. + The IM-PD1 can be found on the Integrator/PP2 platform. + +- To compile this driver as a module, choose M here: the +- module will be called impd1. +- + config INTEGRATOR_CM7TDMI + bool "Integrator/CM7TDMI core module" + depends on ARCH_INTEGRATOR_AP +diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c +index 95697a9c1245..6e96cea99a4e 100644 +--- a/arch/arm64/kernel/hw_breakpoint.c ++++ b/arch/arm64/kernel/hw_breakpoint.c +@@ -738,6 +738,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, + return 0; + } + ++static int watchpoint_report(struct perf_event *wp, unsigned long addr, ++ struct pt_regs *regs) ++{ ++ int step = is_default_overflow_handler(wp); ++ struct arch_hw_breakpoint *info = counter_arch_bp(wp); ++ ++ info->trigger = addr; ++ ++ /* ++ * If we triggered a user watchpoint from a uaccess routine, then ++ * handle the stepping ourselves since userspace really can't help ++ * us with this. ++ */ ++ if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0) ++ step = 1; ++ else ++ perf_bp_event(wp, regs); ++ ++ return step; ++} ++ + static int watchpoint_handler(unsigned long addr, unsigned int esr, + struct pt_regs *regs) + { +@@ -747,7 +768,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, + u64 val; + struct perf_event *wp, **slots; + struct debug_info *debug_info; +- struct arch_hw_breakpoint *info; + struct arch_hw_breakpoint_ctrl ctrl; + + slots = this_cpu_ptr(wp_on_reg); +@@ -785,25 +805,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, + if (dist != 0) + continue; + +- info = counter_arch_bp(wp); +- info->trigger = addr; +- perf_bp_event(wp, regs); +- +- /* Do we need to handle the stepping? */ +- if (is_default_overflow_handler(wp)) +- step = 1; ++ step = watchpoint_report(wp, addr, regs); + } +- if (min_dist > 0 && min_dist != -1) { +- /* No exact match found. */ +- wp = slots[closest_match]; +- info = counter_arch_bp(wp); +- info->trigger = addr; +- perf_bp_event(wp, regs); + +- /* Do we need to handle the stepping? */ +- if (is_default_overflow_handler(wp)) +- step = 1; +- } ++ /* No exact match found? */ ++ if (min_dist > 0 && min_dist != -1) ++ step = watchpoint_report(slots[closest_match], addr, regs); ++ + rcu_read_unlock(); + + if (!step) +diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c +index 3097fa2ca746..1e428d18d268 100644 +--- a/arch/m68k/coldfire/pci.c ++++ b/arch/m68k/coldfire/pci.c +@@ -316,8 +316,10 @@ static int __init mcf_pci_init(void) + + /* Keep a virtual mapping to IO/config space active */ + iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE); +- if (iospace == 0) ++ if (iospace == 0) { ++ pci_free_host_bridge(bridge); + return -ENODEV; ++ } + pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n", + (u32) iospace); + +diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S +index 1107d34e45bf..0fdfa7142f4b 100644 +--- a/arch/openrisc/kernel/entry.S ++++ b/arch/openrisc/kernel/entry.S +@@ -1102,13 +1102,13 @@ ENTRY(__sys_clone) + l.movhi r29,hi(sys_clone) + l.ori r29,r29,lo(sys_clone) + l.j _fork_save_extra_regs_and_call +- l.addi r7,r1,0 ++ l.nop + + ENTRY(__sys_fork) + l.movhi r29,hi(sys_fork) + l.ori r29,r29,lo(sys_fork) + l.j _fork_save_extra_regs_and_call +- l.addi r3,r1,0 ++ l.nop + + ENTRY(sys_rt_sigreturn) + l.jal _sys_rt_sigreturn +diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h +index bcb79a96a6c8..618ee2c0ed53 100644 +--- a/arch/powerpc/include/asm/book3s/64/pgtable.h ++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h +@@ -898,10 +898,25 @@ extern struct page *pgd_page(pgd_t pgd); + #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) + #define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) + +-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) +-#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) +-#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) +-#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) ++static inline unsigned long pgd_index(unsigned long address) ++{ ++ return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); ++} ++ ++static inline unsigned long pud_index(unsigned long address) ++{ ++ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); ++} ++ ++static inline unsigned long pmd_index(unsigned long address) ++{ ++ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); ++} ++ ++static inline unsigned long pte_index(unsigned long address) ++{ ++ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); ++} + + /* + * Find an entry in a page-table-directory. We combine the address region +diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c +index 9dafd7af39b8..cb4d6cd949fc 100644 +--- a/arch/powerpc/kernel/machine_kexec.c ++++ b/arch/powerpc/kernel/machine_kexec.c +@@ -113,11 +113,12 @@ void machine_kexec(struct kimage *image) + + void __init reserve_crashkernel(void) + { +- unsigned long long crash_size, crash_base; ++ unsigned long long crash_size, crash_base, total_mem_sz; + int ret; + ++ total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size(); + /* use common parsing */ +- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), ++ ret = parse_crashkernel(boot_command_line, total_mem_sz, + &crash_size, &crash_base); + if (ret == 0 && crash_size > 0) { + crashk_res.start = crash_base; +@@ -176,6 +177,7 @@ void __init reserve_crashkernel(void) + /* Crash kernel trumps memory limit */ + if (memory_limit && memory_limit <= crashk_res.end) { + memory_limit = crashk_res.end + 1; ++ total_mem_sz = memory_limit; + printk("Adjusted memory limit for crashkernel, now 0x%llx\n", + memory_limit); + } +@@ -184,7 +186,7 @@ void __init reserve_crashkernel(void) + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crashk_res.start >> 20), +- (unsigned long)(memblock_phys_mem_size() >> 20)); ++ (unsigned long)(total_mem_sz >> 20)); + + if (!memblock_is_region_memory(crashk_res.start, crash_size) || + memblock_reserve(crashk_res.start, crash_size)) { +diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c +index 72238eedc360..2bb798918483 100644 +--- a/arch/powerpc/perf/hv-24x7.c ++++ b/arch/powerpc/perf/hv-24x7.c +@@ -1413,16 +1413,6 @@ static void h_24x7_event_read(struct perf_event *event) + h24x7hw = &get_cpu_var(hv_24x7_hw); + h24x7hw->events[i] = event; + put_cpu_var(h24x7hw); +- /* +- * Clear the event count so we can compute the _change_ +- * in the 24x7 raw counter value at the end of the txn. +- * +- * Note that we could alternatively read the 24x7 value +- * now and save its value in event->hw.prev_count. But +- * that would require issuing a hcall, which would then +- * defeat the purpose of using the txn interface. +- */ +- local64_set(&event->count, 0); + } + + put_cpu_var(hv_24x7_reqb); +diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c +index 73e6b36bcd51..256943af58aa 100644 +--- a/arch/powerpc/platforms/4xx/pci.c ++++ b/arch/powerpc/platforms/4xx/pci.c +@@ -1242,7 +1242,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) + if (mbase == NULL) { + printk(KERN_ERR "%pOF: Can't map internal config space !", + port->node); +- goto done; ++ return; + } + + while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA) +@@ -1252,9 +1252,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) + } + if (attempt) + port->link = 1; +-done: + iounmap(mbase); +- + } + + static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { +diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c +index b0f34663b1ae..19bae78b1f25 100644 +--- a/arch/powerpc/platforms/ps3/mm.c ++++ b/arch/powerpc/platforms/ps3/mm.c +@@ -212,13 +212,14 @@ void ps3_mm_vas_destroy(void) + { + int result; + +- DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); +- + if (map.vas_id) { + result = lv1_select_virtual_address_space(0); +- BUG_ON(result); +- result = lv1_destruct_virtual_address_space(map.vas_id); +- BUG_ON(result); ++ result += lv1_destruct_virtual_address_space(map.vas_id); ++ ++ if (result) { ++ lv1_panic(0); ++ } ++ + map.vas_id = 0; + } + } +@@ -316,19 +317,20 @@ static void ps3_mm_region_destroy(struct mem_region *r) + int result; + + if (!r->destroy) { +- pr_info("%s:%d: Not destroying high region: %llxh %llxh\n", +- __func__, __LINE__, r->base, r->size); + return; + } + +- DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); +- + if (r->base) { + result = lv1_release_memory(r->base); +- BUG_ON(result); ++ ++ if (result) { ++ lv1_panic(0); ++ } ++ + r->size = r->base = r->offset = 0; + map.total = map.rm.size; + } ++ + ps3_mm_set_repository_highmem(NULL); + } + +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c +index 99d1152ae224..5ec935521204 100644 +--- a/arch/powerpc/platforms/pseries/ras.c ++++ b/arch/powerpc/platforms/pseries/ras.c +@@ -325,10 +325,11 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) + /* + * Some versions of FWNMI place the buffer inside the 4kB page starting at + * 0x7000. Other versions place it inside the rtas buffer. We check both. ++ * Minimum size of the buffer is 16 bytes. + */ + #define VALID_FWNMI_BUFFER(A) \ +- ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \ +- (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16)))) ++ ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \ ++ (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16)))) + + /* + * Get the error information for errors coming through the +diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h +index 6bc941be6921..166fbd74e316 100644 +--- a/arch/s390/include/asm/syscall.h ++++ b/arch/s390/include/asm/syscall.h +@@ -41,7 +41,17 @@ static inline void syscall_rollback(struct task_struct *task, + static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) + { +- return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0; ++ unsigned long error = regs->gprs[2]; ++#ifdef CONFIG_COMPAT ++ if (test_tsk_thread_flag(task, TIF_31BIT)) { ++ /* ++ * Sign-extend the value so (int)-EFOO becomes (long)-EFOO ++ * and will match correctly in comparisons. ++ */ ++ error = (long)(int)error; ++ } ++#endif ++ return IS_ERR_VALUE(error) ? error : 0; + } + + static inline long syscall_get_return_value(struct task_struct *task, +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile +index 1c060748c813..f38ffcc610d2 100644 +--- a/arch/x86/boot/Makefile ++++ b/arch/x86/boot/Makefile +@@ -87,7 +87,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE + + SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) + +-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' ++sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' + + quiet_cmd_zoffset = ZOFFSET $@ + cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 48ab5fdd1044..ee33f0951322 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -1915,7 +1915,7 @@ void __init init_apic_mappings(void) + unsigned int new_apicid; + + if (apic_validate_deadline_timer()) +- pr_debug("TSC deadline timer available\n"); ++ pr_info("TSC deadline timer available\n"); + + if (x2apic_mode) { + boot_cpu_physical_apicid = read_apic_id(); +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index 9d7bb8de2917..02665ffef050 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -744,16 +744,11 @@ asm( + NOKPROBE_SYMBOL(kretprobe_trampoline); + STACK_FRAME_NON_STANDARD(kretprobe_trampoline); + +-static struct kprobe kretprobe_kprobe = { +- .addr = (void *)kretprobe_trampoline, +-}; +- + /* + * Called from kretprobe_trampoline + */ + __visible __used void *trampoline_handler(struct pt_regs *regs) + { +- struct kprobe_ctlblk *kcb; + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *tmp; +@@ -763,16 +758,12 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) + void *frame_pointer; + bool skipped = false; + +- preempt_disable(); +- + /* + * Set a dummy kprobe for avoiding kretprobe recursion. + * Since kretprobe never run in kprobe handler, kprobe must not + * be running at this point. + */ +- kcb = get_kprobe_ctlblk(); +- __this_cpu_write(current_kprobe, &kretprobe_kprobe); +- kcb->kprobe_status = KPROBE_HIT_ACTIVE; ++ kprobe_busy_begin(); + + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); +@@ -851,7 +842,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) + __this_cpu_write(current_kprobe, &ri->rp->kp); + ri->ret_addr = correct_ret_addr; + ri->rp->handler(ri, regs); +- __this_cpu_write(current_kprobe, &kretprobe_kprobe); ++ __this_cpu_write(current_kprobe, &kprobe_busy); + } + + recycle_rp_inst(ri, &empty_rp); +@@ -867,8 +858,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) + + kretprobe_hash_unlock(current, &flags); + +- __this_cpu_write(current_kprobe, NULL); +- preempt_enable(); ++ kprobe_busy_end(); + + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 7220ab210dcf..082d0cea72f4 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -263,6 +263,11 @@ static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; + */ + static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; + ++/* ++ * The number of non-reserved physical address bits irrespective of features ++ * that repurpose legal bits, e.g. MKTME. ++ */ ++static u8 __read_mostly shadow_phys_bits; + + static void mmu_spte_set(u64 *sptep, u64 spte); + static void mmu_free_roots(struct kvm_vcpu *vcpu); +@@ -445,6 +450,21 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, + } + EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); + ++static u8 kvm_get_shadow_phys_bits(void) ++{ ++ /* ++ * boot_cpu_data.x86_phys_bits is reduced when MKTME is detected ++ * in CPU detection code, but MKTME treats those reduced bits as ++ * 'keyID' thus they are not reserved bits. Therefore for MKTME ++ * we should still return physical address bits reported by CPUID. ++ */ ++ if (!boot_cpu_has(X86_FEATURE_TME) || ++ WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008)) ++ return boot_cpu_data.x86_phys_bits; ++ ++ return cpuid_eax(0x80000008) & 0xff; ++} ++ + static void kvm_mmu_reset_all_pte_masks(void) + { + u8 low_phys_bits; +@@ -458,6 +478,8 @@ static void kvm_mmu_reset_all_pte_masks(void) + shadow_present_mask = 0; + shadow_acc_track_mask = 0; + ++ shadow_phys_bits = kvm_get_shadow_phys_bits(); ++ + /* + * If the CPU has 46 or less physical address bits, then set an + * appropriate mask to guard against L1TF attacks. Otherwise, it is +@@ -4314,7 +4336,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) + */ + shadow_zero_check = &context->shadow_zero_check; + __reset_rsvds_bits_mask(vcpu, shadow_zero_check, +- boot_cpu_data.x86_phys_bits, ++ shadow_phys_bits, + context->shadow_root_level, uses_nx, + guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), + is_pse(vcpu), true); +@@ -4351,13 +4373,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + + if (boot_cpu_is_amd()) + __reset_rsvds_bits_mask(vcpu, shadow_zero_check, +- boot_cpu_data.x86_phys_bits, ++ shadow_phys_bits, + context->shadow_root_level, false, + boot_cpu_has(X86_FEATURE_GBPAGES), + true, true); + else + __reset_rsvds_bits_mask_ept(shadow_zero_check, +- boot_cpu_data.x86_phys_bits, ++ shadow_phys_bits, + false); + + if (!shadow_me_mask) +@@ -4378,7 +4400,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) + { + __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, +- boot_cpu_data.x86_phys_bits, execonly); ++ shadow_phys_bits, execonly); + } + + #define BYTE_MASK(access) \ +@@ -5675,6 +5697,25 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) + return 0; + } + ++static void kvm_set_mmio_spte_mask(void) ++{ ++ u64 mask; ++ ++ /* ++ * Set a reserved PA bit in MMIO SPTEs to generate page faults with ++ * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT ++ * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports ++ * 52-bit physical addresses then there are no reserved PA bits in the ++ * PTEs and so the reserved PA approach must be disabled. ++ */ ++ if (shadow_phys_bits < 52) ++ mask = BIT_ULL(51) | PT_PRESENT_MASK; ++ else ++ mask = 0; ++ ++ kvm_mmu_set_mmio_spte_mask(mask, mask); ++} ++ + int kvm_mmu_module_init(void) + { + if (nx_huge_pages == -1) +@@ -5682,6 +5723,8 @@ int kvm_mmu_module_init(void) + + kvm_mmu_reset_all_pte_masks(); + ++ kvm_set_mmio_spte_mask(); ++ + pte_list_desc_cache = kmem_cache_create("pte_list_desc", + sizeof(struct pte_list_desc), + 0, SLAB_ACCOUNT, NULL); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 1477e23827c2..3401061a2231 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -6291,35 +6291,6 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) + } + EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); + +-static void kvm_set_mmio_spte_mask(void) +-{ +- u64 mask; +- int maxphyaddr = boot_cpu_data.x86_phys_bits; +- +- /* +- * Set the reserved bits and the present bit of an paging-structure +- * entry to generate page fault with PFER.RSV = 1. +- */ +- +- /* +- * Mask the uppermost physical address bit, which would be reserved as +- * long as the supported physical address width is less than 52. +- */ +- mask = 1ull << 51; +- +- /* Set the present bit. */ +- mask |= 1ull; +- +- /* +- * If reserved bit is not supported, clear the present bit to disable +- * mmio page fault. +- */ +- if (maxphyaddr == 52) +- mask &= ~1ull; +- +- kvm_mmu_set_mmio_spte_mask(mask, mask); +-} +- + #ifdef CONFIG_X86_64 + static void pvclock_gtod_update_fn(struct work_struct *work) + { +@@ -6397,8 +6368,6 @@ int kvm_arch_init(void *opaque) + if (r) + goto out_free_percpu; + +- kvm_set_mmio_spte_mask(); +- + kvm_x86_ops = ops; + + kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, +diff --git a/crypto/algboss.c b/crypto/algboss.c +index 960d8548171b..9d253e1016b1 100644 +--- a/crypto/algboss.c ++++ b/crypto/algboss.c +@@ -194,8 +194,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) + if (IS_ERR(thread)) + goto err_put_larval; + +- wait_for_completion_interruptible(&larval->completion); +- + return NOTIFY_STOP; + + err_put_larval: +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index 90bc4e0f0785..1ce1284c9ed9 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -85,14 +85,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, + return PTR_ERR(areq); + + /* convert iovecs of output buffers into RX SGL */ +- err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); ++ err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len); + if (err) + goto free; + +- /* Process only as much RX buffers for which we have TX data */ +- if (len > ctx->used) +- len = ctx->used; +- + /* + * If more buffers are to be expected to be processed, process only + * full block size buffers. +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 33eb5e342a7a..a3a65f5490c0 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -57,7 +57,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -6536,7 +6535,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) + /* perform each probe asynchronously */ + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; +- async_schedule(async_port_probe, ap); ++ ap->cookie = async_schedule(async_port_probe, ap); + } + + return 0; +@@ -6676,11 +6675,11 @@ void ata_host_detach(struct ata_host *host) + { + int i; + +- /* Ensure ata_port probe has completed */ +- async_synchronize_full(); +- +- for (i = 0; i < host->n_ports; i++) ++ for (i = 0; i < host->n_ports; i++) { ++ /* Ensure ata_port probe has completed */ ++ async_synchronize_cookie(host->ports[i]->cookie + 1); + ata_port_detach(host->ports[i]); ++ } + + /* the host is dead now, dissociate ACPI */ + ata_acpi_dissociate(host); +diff --git a/drivers/base/platform.c b/drivers/base/platform.c +index bcb6519fe211..0ee3cab88f70 100644 +--- a/drivers/base/platform.c ++++ b/drivers/base/platform.c +@@ -702,6 +702,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv, + /* temporary section violation during probe() */ + drv->probe = probe; + retval = code = __platform_driver_register(drv, module); ++ if (retval) ++ return retval; + + /* + * Fixup that section violation, being paranoid about code scanning +diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c +index 075662f2cf46..d20f66d57804 100644 +--- a/drivers/block/ps3disk.c ++++ b/drivers/block/ps3disk.c +@@ -468,7 +468,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) + blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); + + blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); +- blk_queue_segment_boundary(queue, -1UL); + blk_queue_dma_alignment(queue, dev->blk_size-1); + blk_queue_logical_block_size(queue, dev->blk_size); + +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c +index 5f8082d89131..6db4204e5d5d 100644 +--- a/drivers/clk/bcm/clk-bcm2835.c ++++ b/drivers/clk/bcm/clk-bcm2835.c +@@ -1483,13 +1483,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, + return &clock->hw; + } + +-static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman, ++static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman, + const struct bcm2835_gate_data *data) + { +- return clk_register_gate(cprman->dev, data->name, data->parent, +- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, +- cprman->regs + data->ctl_reg, +- CM_GATE_BIT, 0, &cprman->regs_lock); ++ return clk_hw_register_gate(cprman->dev, data->name, data->parent, ++ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, ++ cprman->regs + data->ctl_reg, ++ CM_GATE_BIT, 0, &cprman->regs_lock); + } + + typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, +diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c +index 2057809219f4..7426d910e079 100644 +--- a/drivers/clk/qcom/gcc-msm8916.c ++++ b/drivers/clk/qcom/gcc-msm8916.c +@@ -270,7 +270,7 @@ static struct clk_pll gpll0 = { + .l_reg = 0x21004, + .m_reg = 0x21008, + .n_reg = 0x2100c, +- .config_reg = 0x21014, ++ .config_reg = 0x21010, + .mode_reg = 0x21000, + .status_reg = 0x2101c, + .status_bit = 17, +@@ -297,7 +297,7 @@ static struct clk_pll gpll1 = { + .l_reg = 0x20004, + .m_reg = 0x20008, + .n_reg = 0x2000c, +- .config_reg = 0x20014, ++ .config_reg = 0x20010, + .mode_reg = 0x20000, + .status_reg = 0x2001c, + .status_bit = 17, +@@ -324,7 +324,7 @@ static struct clk_pll gpll2 = { + .l_reg = 0x4a004, + .m_reg = 0x4a008, + .n_reg = 0x4a00c, +- .config_reg = 0x4a014, ++ .config_reg = 0x4a010, + .mode_reg = 0x4a000, + .status_reg = 0x4a01c, + .status_bit = 17, +@@ -351,7 +351,7 @@ static struct clk_pll bimc_pll = { + .l_reg = 0x23004, + .m_reg = 0x23008, + .n_reg = 0x2300c, +- .config_reg = 0x23014, ++ .config_reg = 0x23010, + .mode_reg = 0x23000, + .status_reg = 0x2301c, + .status_bit = 17, +diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c +index 1d2265f9ee97..1c327d5de98c 100644 +--- a/drivers/clk/samsung/clk-exynos5433.c ++++ b/drivers/clk/samsung/clk-exynos5433.c +@@ -1674,7 +1674,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = { + GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric", + ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric", +- ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0), ++ ENABLE_SCLK_PERIC, 6, ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), + GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC, + 5, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC, +diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c +index 918ba3164da9..cd856210db58 100644 +--- a/drivers/clk/st/clk-flexgen.c ++++ b/drivers/clk/st/clk-flexgen.c +@@ -373,6 +373,7 @@ static void __init st_of_flexgen_setup(struct device_node *np) + break; + } + ++ flex_flags &= ~CLK_IS_CRITICAL; + of_clk_detect_critical(np, i, &flex_flags); + + /* +diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c +index aa4add580516..0b5e091742f9 100644 +--- a/drivers/clk/sunxi/clk-sunxi.c ++++ b/drivers/clk/sunxi/clk-sunxi.c +@@ -98,7 +98,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req) + * Round down the frequency to the closest multiple of either + * 6 or 16 + */ +- u32 round_freq_6 = round_down(freq_mhz, 6); ++ u32 round_freq_6 = rounddown(freq_mhz, 6); + u32 round_freq_16 = round_down(freq_mhz, 16); + + if (round_freq_6 > round_freq_16) +diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c +index beea89463ca2..4ea5c08a1eb6 100644 +--- a/drivers/clk/ti/composite.c ++++ b/drivers/clk/ti/composite.c +@@ -240,6 +240,7 @@ cleanup: + if (!cclk->comp_clks[i]) + continue; + list_del(&cclk->comp_clks[i]->link); ++ kfree(cclk->comp_clks[i]->parent_names); + kfree(cclk->comp_clks[i]); + } + +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c +index c1f8da958c78..4e38b87c3228 100644 +--- a/drivers/crypto/omap-sham.c ++++ b/drivers/crypto/omap-sham.c +@@ -168,8 +168,6 @@ struct omap_sham_hmac_ctx { + }; + + struct omap_sham_ctx { +- struct omap_sham_dev *dd; +- + unsigned long flags; + + /* fallback stuff */ +@@ -916,27 +914,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) + return 0; + } + ++struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) ++{ ++ struct omap_sham_dev *dd; ++ ++ if (ctx->dd) ++ return ctx->dd; ++ ++ spin_lock_bh(&sham.lock); ++ dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list); ++ list_move_tail(&dd->list, &sham.dev_list); ++ ctx->dd = dd; ++ spin_unlock_bh(&sham.lock); ++ ++ return dd; ++} ++ + static int omap_sham_init(struct ahash_request *req) + { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); +- struct omap_sham_dev *dd = NULL, *tmp; ++ struct omap_sham_dev *dd; + int bs = 0; + +- spin_lock_bh(&sham.lock); +- if (!tctx->dd) { +- list_for_each_entry(tmp, &sham.dev_list, list) { +- dd = tmp; +- break; +- } +- tctx->dd = dd; +- } else { +- dd = tctx->dd; +- } +- spin_unlock_bh(&sham.lock); ++ ctx->dd = NULL; + +- ctx->dd = dd; ++ dd = omap_sham_find_dev(ctx); ++ if (!dd) ++ return -ENODEV; + + ctx->flags = 0; + +@@ -1186,8 +1192,7 @@ err1: + static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) + { + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); +- struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); +- struct omap_sham_dev *dd = tctx->dd; ++ struct omap_sham_dev *dd = ctx->dd; + + ctx->op = op; + +@@ -1197,7 +1202,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) + static int omap_sham_update(struct ahash_request *req) + { + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); +- struct omap_sham_dev *dd = ctx->dd; ++ struct omap_sham_dev *dd = omap_sham_find_dev(ctx); + + if (!req->nbytes) + return 0; +@@ -1302,21 +1307,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, + struct omap_sham_hmac_ctx *bctx = tctx->base; + int bs = crypto_shash_blocksize(bctx->shash); + int ds = crypto_shash_digestsize(bctx->shash); +- struct omap_sham_dev *dd = NULL, *tmp; + int err, i; + +- spin_lock_bh(&sham.lock); +- if (!tctx->dd) { +- list_for_each_entry(tmp, &sham.dev_list, list) { +- dd = tmp; +- break; +- } +- tctx->dd = dd; +- } else { +- dd = tctx->dd; +- } +- spin_unlock_bh(&sham.lock); +- + err = crypto_shash_setkey(tctx->fallback, key, keylen); + if (err) + return err; +@@ -1334,7 +1326,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, + + memset(bctx->ipad + keylen, 0, bs - keylen); + +- if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { ++ if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) { + memcpy(bctx->opad, bctx->ipad, bs); + + for (i = 0; i < bs; i++) { +@@ -2073,6 +2065,7 @@ static int omap_sham_probe(struct platform_device *pdev) + } + + dd->flags |= dd->pdata->flags; ++ sham.flags |= dd->pdata->flags; + + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); +@@ -2098,6 +2091,9 @@ static int omap_sham_probe(struct platform_device *pdev) + spin_unlock(&sham.lock); + + for (i = 0; i < dd->pdata->algs_info_size; i++) { ++ if (dd->pdata->algs_info[i].registered) ++ break; ++ + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { + struct ahash_alg *alg; + +@@ -2143,9 +2139,11 @@ static int omap_sham_remove(struct platform_device *pdev) + list_del(&dd->list); + spin_unlock(&sham.lock); + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) +- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) ++ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { + crypto_unregister_ahash( + &dd->pdata->algs_info[i].algs_list[j]); ++ dd->pdata->algs_info[i].registered--; ++ } + tasklet_kill(&dd->done_task); + pm_runtime_disable(&pdev->dev); + +diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c +index 6f6537ab0a79..59e6ca685be8 100644 +--- a/drivers/extcon/extcon-adc-jack.c ++++ b/drivers/extcon/extcon-adc-jack.c +@@ -128,7 +128,7 @@ static int adc_jack_probe(struct platform_device *pdev) + for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++); + data->num_conditions = i; + +- data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel); ++ data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel); + if (IS_ERR(data->chan)) + return PTR_ERR(data->chan); + +@@ -170,7 +170,6 @@ static int adc_jack_remove(struct platform_device *pdev) + + free_irq(data->irq, data); + cancel_work_sync(&data->handler.work); +- iio_channel_release(data->chan); + + return 0; + } +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c +index 42eaeae86bb8..e52381c9d04e 100644 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -2751,6 +2752,17 @@ fail: + return ret; + } + ++static int do_get_act_status(struct drm_dp_aux *aux) ++{ ++ int ret; ++ u8 status; ++ ++ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); ++ if (ret < 0) ++ return ret; ++ ++ return status; ++} + + /** + * drm_dp_check_act_status() - Check ACT handled status. +@@ -2760,33 +2772,29 @@ fail: + */ + int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) + { +- u8 status; +- int ret; +- int count = 0; +- +- do { +- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); +- +- if (ret < 0) { +- DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); +- goto fail; +- } +- +- if (status & DP_PAYLOAD_ACT_HANDLED) +- break; +- count++; +- udelay(100); +- +- } while (count < 30); +- +- if (!(status & DP_PAYLOAD_ACT_HANDLED)) { +- DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); +- ret = -EINVAL; +- goto fail; ++ /* ++ * There doesn't seem to be any recommended retry count or timeout in ++ * the MST specification. Since some hubs have been observed to take ++ * over 1 second to update their payload allocations under certain ++ * conditions, we use a rather large timeout value. ++ */ ++ const int timeout_ms = 3000; ++ int ret, status; ++ ++ ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, ++ status & DP_PAYLOAD_ACT_HANDLED || status < 0, ++ 200, timeout_ms * USEC_PER_MSEC); ++ if (ret < 0 && status >= 0) { ++ DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n", ++ timeout_ms, status); ++ return -EINVAL; ++ } else if (status < 0) { ++ DRM_DEBUG_KMS("Failed to read payload table status: %d\n", ++ status); ++ return status; + } ++ + return 0; +-fail: +- return ret; + } + EXPORT_SYMBOL(drm_dp_check_act_status); + +diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c +index cf804389f5ec..d50a7884e69e 100644 +--- a/drivers/gpu/drm/drm_encoder_slave.c ++++ b/drivers/gpu/drm/drm_encoder_slave.c +@@ -84,7 +84,7 @@ int drm_i2c_encoder_init(struct drm_device *dev, + + err = encoder_drv->encoder_init(client, dev, encoder); + if (err) +- goto fail_unregister; ++ goto fail_module_put; + + if (info->platform_data) + encoder->slave_funcs->set_config(&encoder->base, +@@ -92,9 +92,10 @@ int drm_i2c_encoder_init(struct drm_device *dev, + + return 0; + ++fail_module_put: ++ module_put(module); + fail_unregister: + i2c_unregister_device(client); +- module_put(module); + fail: + return err; + } +diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c +index e4b9eb1f6b60..f6b81f3256cf 100644 +--- a/drivers/gpu/drm/i915/i915_cmd_parser.c ++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c +@@ -570,6 +570,9 @@ struct drm_i915_reg_descriptor { + #define REG32(_reg, ...) \ + { .addr = (_reg), __VA_ARGS__ } + ++#define REG32_IDX(_reg, idx) \ ++ { .addr = _reg(idx) } ++ + /* + * Convenience macro for adding 64-bit registers. + * +@@ -667,6 +670,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), + REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), ++ REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE), + REG64_IDX(BCS_GPR, 0), + REG64_IDX(BCS_GPR, 1), + REG64_IDX(BCS_GPR, 2), +diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +index f7c0698fec40..791a74b9907d 100644 +--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c ++++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +@@ -972,7 +972,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) + + return 0; + fail: +- mdp5_destroy(pdev); ++ if (mdp5_kms) ++ mdp5_destroy(pdev); + return ret; + } + +diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c +index c5716a0ca3b8..20ca0a75e685 100644 +--- a/drivers/gpu/drm/qxl/qxl_kms.c ++++ b/drivers/gpu/drm/qxl/qxl_kms.c +@@ -181,7 +181,7 @@ int qxl_device_init(struct qxl_device *qdev, + &(qdev->ram_header->cursor_ring_hdr), + sizeof(struct qxl_command), + QXL_CURSOR_RING_SIZE, +- qdev->io_base + QXL_IO_NOTIFY_CMD, ++ qdev->io_base + QXL_IO_NOTIFY_CURSOR, + false, + &qdev->cursor_event); + +diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h +index a1f8cba251a2..3d9148eb40a7 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h ++++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h +@@ -143,7 +143,7 @@ + #define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3 + + #define SUN4I_HDMI_DDC_CLK_REG 0x528 +-#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3) ++#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3) + #define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7) + + #define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540 +diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +index 4692e8c345ed..58d9557a774f 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c ++++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +@@ -32,7 +32,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate, + unsigned long best_rate = 0; + u8 best_m = 0, best_n = 0, _m, _n; + +- for (_m = 0; _m < 8; _m++) { ++ for (_m = 0; _m < 16; _m++) { + for (_n = 0; _n < 8; _n++) { + unsigned long tmp_rate; + +diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c +index 4b81dc231b18..5345b731bb7c 100644 +--- a/drivers/i2c/busses/i2c-piix4.c ++++ b/drivers/i2c/busses/i2c-piix4.c +@@ -960,7 +960,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) + } + + if (dev->vendor == PCI_VENDOR_ID_AMD && +- dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { ++ (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || ++ dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { + retval = piix4_setup_sb800(dev, id, 1); + } + +diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c +index 600d264e080c..e300f9530f19 100644 +--- a/drivers/i2c/busses/i2c-pxa.c ++++ b/drivers/i2c/busses/i2c-pxa.c +@@ -315,11 +315,10 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) + dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n", + readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)), + readl(_ISR(i2c))); +- dev_dbg(dev, "log: "); ++ dev_err(dev, "log:"); + for (i = 0; i < i2c->irqlogidx; i++) +- pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]); +- +- pr_debug("\n"); ++ pr_cont(" [%03x:%05x]", i2c->isrlog[i], i2c->icrlog[i]); ++ pr_cont("\n"); + } + + #else /* ifdef DEBUG */ +@@ -709,11 +708,9 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) + { + u32 icr; + +- /* +- * Clear the STOP and ACK flags +- */ ++ /* Clear the START, STOP, ACK, TB and MA flags */ + icr = readl(_ICR(i2c)); +- icr &= ~(ICR_STOP | ICR_ACKNAK); ++ icr &= ~(ICR_START | ICR_STOP | ICR_ACKNAK | ICR_TB | ICR_MA); + writel(icr, _ICR(i2c)); + } + +diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c +index 5f625ffa2a88..ae415b4e381a 100644 +--- a/drivers/iio/pressure/bmp280-core.c ++++ b/drivers/iio/pressure/bmp280-core.c +@@ -182,6 +182,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data, + + (s32)2097152) * H2 + 8192) >> 14); + var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4; + ++ var = clamp_val(var, 0, 419430400); ++ + return var >> 12; + }; + +@@ -651,7 +653,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas) + unsigned int ctrl; + + if (data->use_eoc) +- init_completion(&data->done); ++ reinit_completion(&data->done); + + ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas); + if (ret) +@@ -907,6 +909,9 @@ static int bmp085_fetch_eoc_irq(struct device *dev, + "trying to enforce it\n"); + irq_trig = IRQF_TRIGGER_RISING; + } ++ ++ init_completion(&data->done); ++ + ret = devm_request_threaded_irq(dev, + irq, + bmp085_eoc_irq, +diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c +index 54076a3e8007..ac47e8a1dfbf 100644 +--- a/drivers/infiniband/core/cma_configfs.c ++++ b/drivers/infiniband/core/cma_configfs.c +@@ -319,8 +319,21 @@ fail: + return ERR_PTR(err); + } + ++static void drop_cma_dev(struct config_group *cgroup, struct config_item *item) ++{ ++ struct config_group *group = ++ container_of(item, struct config_group, cg_item); ++ struct cma_dev_group *cma_dev_group = ++ container_of(group, struct cma_dev_group, device_group); ++ ++ configfs_remove_default_groups(&cma_dev_group->ports_group); ++ configfs_remove_default_groups(&cma_dev_group->device_group); ++ config_item_put(item); ++} ++ + static struct configfs_group_operations cma_subsys_group_ops = { + .make_group = make_cma_dev, ++ .drop_item = drop_cma_dev, + }; + + static struct config_item_type cma_subsys_type = { +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c +index 96a6583e7b52..66c764491a83 100644 +--- a/drivers/md/bcache/btree.c ++++ b/drivers/md/bcache/btree.c +@@ -1374,7 +1374,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + if (__set_blocks(n1, n1->keys + n2->keys, + block_bytes(b->c)) > + btree_blocks(new_nodes[i])) +- goto out_nocoalesce; ++ goto out_unlock_nocoalesce; + + keys = n2->keys; + /* Take the key of the node we're getting rid of */ +@@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + + if (__bch_keylist_realloc(&keylist, + bkey_u64s(&new_nodes[i]->key))) +- goto out_nocoalesce; ++ goto out_unlock_nocoalesce; + + bch_btree_node_write(new_nodes[i], &cl); + bch_keylist_add(&keylist, &new_nodes[i]->key); +@@ -1449,6 +1449,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + /* Invalidated our iterator */ + return -EINTR; + ++out_unlock_nocoalesce: ++ for (i = 0; i < nodes; i++) ++ mutex_unlock(&new_nodes[i]->write_lock); ++ + out_nocoalesce: + closure_sync(&cl); + bch_keylist_free(&keylist); +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c +index 8b7328666eaa..7c60aace8d25 100644 +--- a/drivers/md/dm-mpath.c ++++ b/drivers/md/dm-mpath.c +@@ -1815,7 +1815,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, + int r; + + current_pgpath = READ_ONCE(m->current_pgpath); +- if (!current_pgpath) ++ if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) + current_pgpath = choose_pgpath(m, 0); + + if (current_pgpath) { +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c +index 4d658a0c6025..c6d3a4bc811c 100644 +--- a/drivers/md/dm-zoned-metadata.c ++++ b/drivers/md/dm-zoned-metadata.c +@@ -1580,7 +1580,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) + return dzone; + } + +- return ERR_PTR(-EBUSY); ++ return NULL; + } + + /* +@@ -1600,7 +1600,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) + return zone; + } + +- return ERR_PTR(-EBUSY); ++ return NULL; + } + + /* +diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c +index 2fad512dce98..1015b200330b 100644 +--- a/drivers/md/dm-zoned-reclaim.c ++++ b/drivers/md/dm-zoned-reclaim.c +@@ -350,8 +350,8 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) + + /* Get a data zone */ + dzone = dmz_get_zone_for_reclaim(zmd); +- if (IS_ERR(dzone)) +- return PTR_ERR(dzone); ++ if (!dzone) ++ return -EBUSY; + + start = jiffies; + +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 948344531baf..702a7d2c7e1e 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -1168,6 +1168,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) + mddev->new_layout = mddev->layout; + mddev->new_chunk_sectors = mddev->chunk_sectors; + } ++ if (mddev->level == 0) ++ mddev->layout = -1; + + if (sb->state & (1<recovery_cp = MaxSector; +@@ -1584,6 +1586,10 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ + rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; + } + ++ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && ++ sb->level != 0) ++ return -EINVAL; ++ + if (!refdev) { + ret = 1; + } else { +@@ -1694,6 +1700,10 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) + mddev->new_chunk_sectors = mddev->chunk_sectors; + } + ++ if (mddev->level == 0 && ++ !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) ++ mddev->layout = -1; ++ + if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) + set_bit(MD_HAS_JOURNAL, &mddev->flags); + +@@ -6757,6 +6767,9 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) + mddev->external = 0; + + mddev->layout = info->layout; ++ if (mddev->level == 0) ++ /* Cannot trust RAID0 layout info here */ ++ mddev->layout = -1; + mddev->chunk_sectors = info->chunk_size >> 9; + + if (mddev->persistent) { +diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c +index cdafa5e0ea6d..e179c121c030 100644 +--- a/drivers/md/raid0.c ++++ b/drivers/md/raid0.c +@@ -152,6 +152,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) + + if (conf->nr_strip_zones == 1) { + conf->layout = RAID0_ORIG_LAYOUT; ++ } else if (mddev->layout == RAID0_ORIG_LAYOUT || ++ mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { ++ conf->layout = mddev->layout; + } else if (default_layout == RAID0_ORIG_LAYOUT || + default_layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = default_layout; +diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c +index 953d0790ffd5..3259fb82d3c4 100644 +--- a/drivers/mfd/wm8994-core.c ++++ b/drivers/mfd/wm8994-core.c +@@ -696,3 +696,4 @@ module_i2c_driver(wm8994_i2c_driver); + MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC"); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Mark Brown "); ++MODULE_SOFTDEP("pre: wm8994_regulator"); +diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c +index dcec9cf4983f..e22a9ffa9cb7 100644 +--- a/drivers/mtd/nand/ams-delta.c ++++ b/drivers/mtd/nand/ams-delta.c +@@ -263,7 +263,7 @@ static int ams_delta_cleanup(struct platform_device *pdev) + void __iomem *io_base = platform_get_drvdata(pdev); + + /* Release resources, unregister device */ +- nand_release(ams_delta_mtd); ++ nand_release(mtd_to_nand(ams_delta_mtd)); + + gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); + gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); +diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c +index 9d4a28fa6b73..99c738be2545 100644 +--- a/drivers/mtd/nand/au1550nd.c ++++ b/drivers/mtd/nand/au1550nd.c +@@ -496,7 +496,7 @@ static int au1550nd_remove(struct platform_device *pdev) + struct au1550nd_ctx *ctx = platform_get_drvdata(pdev); + struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +- nand_release(nand_to_mtd(&ctx->chip)); ++ nand_release(&ctx->chip); + iounmap(ctx->base); + release_mem_region(r->start, 0x1000); + kfree(ctx); +diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c +index fb31429b70a9..d79694160845 100644 +--- a/drivers/mtd/nand/bcm47xxnflash/main.c ++++ b/drivers/mtd/nand/bcm47xxnflash/main.c +@@ -65,7 +65,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev) + { + struct bcm47xxnflash *nflash = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&nflash->nand_chip)); ++ nand_release(&nflash->nand_chip); + + return 0; + } +diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c +index 5655dca6ce43..ebcbcbaa8b54 100644 +--- a/drivers/mtd/nand/bf5xx_nand.c ++++ b/drivers/mtd/nand/bf5xx_nand.c +@@ -688,7 +688,7 @@ static int bf5xx_nand_remove(struct platform_device *pdev) + * and their partitions, then go through freeing the + * resources used + */ +- nand_release(nand_to_mtd(&info->chip)); ++ nand_release(&info->chip); + + peripheral_free_list(bfin_nfc_pin_req); + bf5xx_nand_dma_remove(info); +diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c +index f8d793b15a7a..2bb8e6faa539 100644 +--- a/drivers/mtd/nand/brcmnand/brcmnand.c ++++ b/drivers/mtd/nand/brcmnand/brcmnand.c +@@ -2597,7 +2597,7 @@ int brcmnand_remove(struct platform_device *pdev) + struct brcmnand_host *host; + + list_for_each_entry(host, &ctrl->host_list, node) +- nand_release(nand_to_mtd(&host->chip)); ++ nand_release(&host->chip); + + clk_disable_unprepare(ctrl->clk); + +diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c +index bc558c438a57..98c013094fa2 100644 +--- a/drivers/mtd/nand/cafe_nand.c ++++ b/drivers/mtd/nand/cafe_nand.c +@@ -826,7 +826,7 @@ static void cafe_nand_remove(struct pci_dev *pdev) + /* Disable NAND IRQ in global IRQ mask register */ + cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); + free_irq(pdev->irq, mtd); +- nand_release(mtd); ++ nand_release(chip); + free_rs(cafe->rs); + pci_iounmap(pdev, cafe->mmio); + dma_free_coherent(&cafe->pdev->dev, +diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c +index 1fc435f994e1..7b824ae88ab0 100644 +--- a/drivers/mtd/nand/cmx270_nand.c ++++ b/drivers/mtd/nand/cmx270_nand.c +@@ -230,7 +230,7 @@ module_init(cmx270_init); + static void __exit cmx270_cleanup(void) + { + /* Release resources, unregister device */ +- nand_release(cmx270_nand_mtd); ++ nand_release(mtd_to_nand(cmx270_nand_mtd)); + + gpio_free(GPIO_NAND_RB); + gpio_free(GPIO_NAND_CS); +diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c +index d48877540f14..647d4ee69345 100644 +--- a/drivers/mtd/nand/cs553x_nand.c ++++ b/drivers/mtd/nand/cs553x_nand.c +@@ -338,7 +338,7 @@ static void __exit cs553x_cleanup(void) + mmio_base = this->IO_ADDR_R; + + /* Release resources, unregister device */ +- nand_release(mtd); ++ nand_release(this); + kfree(mtd->name); + cs553x_mtd[i] = NULL; + +diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c +index ccc8c43abcff..e66f1385b49e 100644 +--- a/drivers/mtd/nand/davinci_nand.c ++++ b/drivers/mtd/nand/davinci_nand.c +@@ -854,7 +854,7 @@ static int nand_davinci_remove(struct platform_device *pdev) + ecc4_busy = false; + spin_unlock_irq(&davinci_nand_lock); + +- nand_release(nand_to_mtd(&info->chip)); ++ nand_release(&info->chip); + + clk_disable_unprepare(info->clk); + +diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c +index 3087b0ba7b7f..390a18ad68ee 100644 +--- a/drivers/mtd/nand/denali.c ++++ b/drivers/mtd/nand/denali.c +@@ -1444,9 +1444,7 @@ EXPORT_SYMBOL(denali_init); + /* driver exit point */ + void denali_remove(struct denali_nand_info *denali) + { +- struct mtd_info *mtd = nand_to_mtd(&denali->nand); +- +- nand_release(mtd); ++ nand_release(&denali->nand); + kfree(denali->buf); + denali_disable_irq(denali); + } +diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c +index c3aa53caab5c..4f48a9b4f9e3 100644 +--- a/drivers/mtd/nand/diskonchip.c ++++ b/drivers/mtd/nand/diskonchip.c +@@ -1605,13 +1605,10 @@ static int __init doc_probe(unsigned long physadr) + numchips = doc2001_init(mtd); + + if ((ret = nand_scan(mtd, numchips)) || (ret = doc->late_init(mtd))) { +- /* DBB note: i believe nand_release is necessary here, as ++ /* DBB note: i believe nand_cleanup is necessary here, as + buffers may have been allocated in nand_base. Check with + Thomas. FIX ME! */ +- /* nand_release will call mtd_device_unregister, but we +- haven't yet added it. This is handled without incident by +- mtd_device_unregister, as far as I can tell. */ +- nand_release(mtd); ++ nand_cleanup(nand); + kfree(nand); + goto fail; + } +@@ -1644,7 +1641,7 @@ static void release_nanddoc(void) + doc = nand_get_controller_data(nand); + + nextmtd = doc->nextdoc; +- nand_release(mtd); ++ nand_release(nand); + iounmap(doc->virtadr); + release_mem_region(doc->physadr, DOC_IOREMAP_LEN); + kfree(nand); +diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c +index 2436cbc71662..53fdf491d8c0 100644 +--- a/drivers/mtd/nand/docg4.c ++++ b/drivers/mtd/nand/docg4.c +@@ -1376,7 +1376,7 @@ static int __init probe_docg4(struct platform_device *pdev) + return 0; + + fail: +- nand_release(mtd); /* deletes partitions and mtd devices */ ++ nand_release(nand); /* deletes partitions and mtd devices */ + free_bch(doc->bch); + kfree(nand); + +@@ -1389,7 +1389,7 @@ fail_unmap: + static int __exit cleanup_docg4(struct platform_device *pdev) + { + struct docg4_priv *doc = platform_get_drvdata(pdev); +- nand_release(doc->mtd); ++ nand_release(mtd_to_nand(doc->mtd)); + free_bch(doc->bch); + kfree(mtd_to_nand(doc->mtd)); + iounmap(doc->virtadr); +diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c +index 17db2f90aa2c..0f70bd961234 100644 +--- a/drivers/mtd/nand/fsl_elbc_nand.c ++++ b/drivers/mtd/nand/fsl_elbc_nand.c +@@ -813,7 +813,7 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv) + struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; + struct mtd_info *mtd = nand_to_mtd(&priv->chip); + +- nand_release(mtd); ++ nand_release(&priv->chip); + + kfree(mtd->name); + +diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c +index 16deba1a2385..0d4928161439 100644 +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -927,7 +927,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv) + { + struct mtd_info *mtd = nand_to_mtd(&priv->chip); + +- nand_release(mtd); ++ nand_release(&priv->chip); + + kfree(mtd->name); + +diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c +index a88e2cf66e0f..009e96fb92ae 100644 +--- a/drivers/mtd/nand/fsl_upm.c ++++ b/drivers/mtd/nand/fsl_upm.c +@@ -326,7 +326,7 @@ static int fun_remove(struct platform_device *ofdev) + struct mtd_info *mtd = nand_to_mtd(&fun->chip); + int i; + +- nand_release(mtd); ++ nand_release(&fun->chip); + kfree(mtd->name); + + for (i = 0; i < fun->mchip_count; i++) { +diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c +index eac15d9bf49e..3be80e15e400 100644 +--- a/drivers/mtd/nand/fsmc_nand.c ++++ b/drivers/mtd/nand/fsmc_nand.c +@@ -1118,7 +1118,7 @@ static int fsmc_nand_remove(struct platform_device *pdev) + struct fsmc_nand_data *host = platform_get_drvdata(pdev); + + if (host) { +- nand_release(nand_to_mtd(&host->nand)); ++ nand_release(&host->nand); + + if (host->mode == USE_DMA_ACCESS) { + dma_release_channel(host->write_dma_chan); +diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c +index fd3648952b5a..81b02b81e984 100644 +--- a/drivers/mtd/nand/gpio.c ++++ b/drivers/mtd/nand/gpio.c +@@ -199,7 +199,7 @@ static int gpio_nand_remove(struct platform_device *pdev) + { + struct gpiomtd *gpiomtd = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&gpiomtd->nand_chip)); ++ nand_release(&gpiomtd->nand_chip); + + if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) + gpio_set_value(gpiomtd->plat.gpio_nwp, 0); +diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +index d4d824ef64e9..c7d0d2eed6c2 100644 +--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c ++++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +@@ -2135,7 +2135,7 @@ static int gpmi_nand_remove(struct platform_device *pdev) + { + struct gpmi_nand_data *this = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&this->nand)); ++ nand_release(&this->nand); + gpmi_free_dma_buffer(this); + release_resources(this); + return 0; +diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c +index d9ee1a7e6956..1d1b541489f8 100644 +--- a/drivers/mtd/nand/hisi504_nand.c ++++ b/drivers/mtd/nand/hisi504_nand.c +@@ -823,7 +823,7 @@ static int hisi_nfc_probe(struct platform_device *pdev) + return 0; + + err_mtd: +- nand_release(mtd); ++ nand_release(chip); + err_res: + return ret; + } +@@ -831,9 +831,8 @@ err_res: + static int hisi_nfc_remove(struct platform_device *pdev) + { + struct hinfc_host *host = platform_get_drvdata(pdev); +- struct mtd_info *mtd = nand_to_mtd(&host->chip); + +- nand_release(mtd); ++ nand_release(&host->chip); + + return 0; + } +diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c +index ad827d4af3e9..62ddea88edce 100644 +--- a/drivers/mtd/nand/jz4740_nand.c ++++ b/drivers/mtd/nand/jz4740_nand.c +@@ -480,7 +480,7 @@ static int jz_nand_probe(struct platform_device *pdev) + return 0; + + err_nand_release: +- nand_release(mtd); ++ nand_release(chip); + err_unclaim_banks: + while (chipnr--) { + unsigned char bank = nand->banks[chipnr]; +@@ -500,7 +500,7 @@ static int jz_nand_remove(struct platform_device *pdev) + struct jz_nand *nand = platform_get_drvdata(pdev); + size_t i; + +- nand_release(nand_to_mtd(&nand->chip)); ++ nand_release(&nand->chip); + + /* Deassert and disable all chips */ + writel(0, nand->base + JZ_REG_NAND_CTRL); +diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c +index e69f6ae4c539..86ff46eb7925 100644 +--- a/drivers/mtd/nand/jz4780_nand.c ++++ b/drivers/mtd/nand/jz4780_nand.c +@@ -293,7 +293,7 @@ static int jz4780_nand_init_chip(struct platform_device *pdev, + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { +- nand_release(mtd); ++ nand_release(chip); + return ret; + } + +@@ -308,7 +308,7 @@ static void jz4780_nand_cleanup_chips(struct jz4780_nand_controller *nfc) + + while (!list_empty(&nfc->chips)) { + chip = list_first_entry(&nfc->chips, struct jz4780_nand_chip, chip_list); +- nand_release(nand_to_mtd(&chip->chip)); ++ nand_release(&chip->chip); + list_del(&chip->chip_list); + } + } +diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c +index 5796468db653..d19d07931c2b 100644 +--- a/drivers/mtd/nand/lpc32xx_mlc.c ++++ b/drivers/mtd/nand/lpc32xx_mlc.c +@@ -805,7 +805,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) + if (!res) + return res; + +- nand_release(mtd); ++ nand_release(nand_chip); + + err_exit4: + free_irq(host->irq, host); +@@ -829,9 +829,8 @@ err_exit1: + static int lpc32xx_nand_remove(struct platform_device *pdev) + { + struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); +- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); + +- nand_release(mtd); ++ nand_release(&host->nand_chip); + free_irq(host->irq, host); + if (use_dma) + dma_release_channel(host->dma_chan); +diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c +index b61f28a1554d..1be4aaa3d6e0 100644 +--- a/drivers/mtd/nand/lpc32xx_slc.c ++++ b/drivers/mtd/nand/lpc32xx_slc.c +@@ -935,7 +935,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) + if (!res) + return res; + +- nand_release(mtd); ++ nand_release(chip); + + err_exit3: + dma_release_channel(host->dma_chan); +@@ -954,9 +954,8 @@ static int lpc32xx_nand_remove(struct platform_device *pdev) + { + uint32_t tmp; + struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); +- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); + +- nand_release(mtd); ++ nand_release(&host->nand_chip); + dma_release_channel(host->dma_chan); + + /* Force CE high */ +diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c +index b6b97cc9fba6..b8a93b47a290 100644 +--- a/drivers/mtd/nand/mpc5121_nfc.c ++++ b/drivers/mtd/nand/mpc5121_nfc.c +@@ -829,7 +829,7 @@ static int mpc5121_nfc_remove(struct platform_device *op) + struct device *dev = &op->dev; + struct mtd_info *mtd = dev_get_drvdata(dev); + +- nand_release(mtd); ++ nand_release(mtd_to_nand(mtd)); + mpc5121_nfc_free(dev, mtd); + + return 0; +diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c +index e17f838b9b81..ff314ce104e5 100644 +--- a/drivers/mtd/nand/mtk_nand.c ++++ b/drivers/mtd/nand/mtk_nand.c +@@ -1357,7 +1357,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, + ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); + if (ret) { + dev_err(dev, "mtd parse partition error\n"); +- nand_release(mtd); ++ nand_cleanup(nand); + return ret; + } + +@@ -1514,7 +1514,7 @@ static int mtk_nfc_remove(struct platform_device *pdev) + while (!list_empty(&nfc->chips)) { + chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip, + node); +- nand_release(nand_to_mtd(&chip->nand)); ++ nand_release(&chip->nand); + list_del(&chip->node); + } + +diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c +index fcb575d55b89..808d85bde9f2 100644 +--- a/drivers/mtd/nand/mxc_nand.c ++++ b/drivers/mtd/nand/mxc_nand.c +@@ -1834,7 +1834,7 @@ static int mxcnd_remove(struct platform_device *pdev) + { + struct mxc_nand_host *host = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&host->nand)); ++ nand_release(&host->nand); + if (host->clk_act) + clk_disable_unprepare(host->clk); + +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c +index d410de331854..e953eca67608 100644 +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -5046,12 +5046,12 @@ EXPORT_SYMBOL_GPL(nand_cleanup); + /** + * nand_release - [NAND Interface] Unregister the MTD device and free resources + * held by the NAND device +- * @mtd: MTD device structure ++ * @chip: NAND chip object + */ +-void nand_release(struct mtd_info *mtd) ++void nand_release(struct nand_chip *chip) + { +- mtd_device_unregister(mtd); +- nand_cleanup(mtd_to_nand(mtd)); ++ mtd_device_unregister(nand_to_mtd(chip)); ++ nand_cleanup(chip); + } + EXPORT_SYMBOL_GPL(nand_release); + +diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c +index 44322a363ba5..dbb0e47f5197 100644 +--- a/drivers/mtd/nand/nandsim.c ++++ b/drivers/mtd/nand/nandsim.c +@@ -2356,7 +2356,7 @@ static int __init ns_init_module(void) + + err_exit: + free_nandsim(nand); +- nand_release(nsmtd); ++ nand_release(chip); + for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) + kfree(nand->partitions[i].name); + error: +@@ -2378,7 +2378,7 @@ static void __exit ns_cleanup_module(void) + int i; + + free_nandsim(ns); /* Free nandsim private resources */ +- nand_release(nsmtd); /* Unregister driver */ ++ nand_release(chip); /* Unregister driver */ + for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) + kfree(ns->partitions[i].name); + kfree(mtd_to_nand(nsmtd)); /* Free other structures */ +diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c +index d8a806894937..eb84328d9bde 100644 +--- a/drivers/mtd/nand/ndfc.c ++++ b/drivers/mtd/nand/ndfc.c +@@ -258,7 +258,7 @@ static int ndfc_remove(struct platform_device *ofdev) + struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); + struct mtd_info *mtd = nand_to_mtd(&ndfc->chip); + +- nand_release(mtd); ++ nand_release(&ndfc->chip); + kfree(mtd->name); + + return 0; +diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c +index 7bb4d2ea9342..a79f88c6d010 100644 +--- a/drivers/mtd/nand/nuc900_nand.c ++++ b/drivers/mtd/nand/nuc900_nand.c +@@ -284,7 +284,7 @@ static int nuc900_nand_remove(struct platform_device *pdev) + { + struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&nuc900_nand->chip)); ++ nand_release(&nuc900_nand->chip); + clk_disable(nuc900_nand->clk); + + return 0; +diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c +index 9f98f74ff221..110c0726c665 100644 +--- a/drivers/mtd/nand/omap2.c ++++ b/drivers/mtd/nand/omap2.c +@@ -2306,7 +2306,7 @@ static int omap_nand_remove(struct platform_device *pdev) + } + if (info->dma) + dma_release_channel(info->dma); +- nand_release(mtd); ++ nand_release(nand_chip); + return 0; + } + +diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c +index 5a5aa1f07d07..df07f9b4cf03 100644 +--- a/drivers/mtd/nand/orion_nand.c ++++ b/drivers/mtd/nand/orion_nand.c +@@ -186,7 +186,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) + mtd->name = "orion_nand"; + ret = mtd_device_register(mtd, board->parts, board->nr_parts); + if (ret) { +- nand_release(mtd); ++ nand_cleanup(nc); + goto no_dev; + } + +@@ -201,9 +201,8 @@ static int orion_nand_remove(struct platform_device *pdev) + { + struct orion_nand_info *info = platform_get_drvdata(pdev); + struct nand_chip *chip = &info->chip; +- struct mtd_info *mtd = nand_to_mtd(chip); + +- nand_release(mtd); ++ nand_release(chip); + + clk_disable_unprepare(info->clk); + +diff --git a/drivers/mtd/nand/oxnas_nand.c b/drivers/mtd/nand/oxnas_nand.c +index d649d5944826..350d4226b436 100644 +--- a/drivers/mtd/nand/oxnas_nand.c ++++ b/drivers/mtd/nand/oxnas_nand.c +@@ -123,7 +123,7 @@ static int oxnas_nand_probe(struct platform_device *pdev) + GFP_KERNEL); + if (!chip) { + err = -ENOMEM; +- goto err_clk_unprepare; ++ goto err_release_child; + } + + chip->controller = &oxnas->base; +@@ -144,13 +144,11 @@ static int oxnas_nand_probe(struct platform_device *pdev) + /* Scan to find existence of the device */ + err = nand_scan(mtd, 1); + if (err) +- goto err_clk_unprepare; ++ goto err_release_child; + + err = mtd_device_register(mtd, NULL, 0); +- if (err) { +- nand_release(mtd); +- goto err_clk_unprepare; +- } ++ if (err) ++ goto err_cleanup_nand; + + oxnas->chips[nchips] = chip; + ++nchips; +@@ -166,6 +164,10 @@ static int oxnas_nand_probe(struct platform_device *pdev) + + return 0; + ++err_cleanup_nand: ++ nand_cleanup(chip); ++err_release_child: ++ of_node_put(nand_np); + err_clk_unprepare: + clk_disable_unprepare(oxnas->clk); + return err; +@@ -176,7 +178,7 @@ static int oxnas_nand_remove(struct platform_device *pdev) + struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev); + + if (oxnas->chips[0]) +- nand_release(nand_to_mtd(oxnas->chips[0])); ++ nand_release(oxnas->chips[0]); + + clk_disable_unprepare(oxnas->clk); + +diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c +index d69e5bae541e..f0f4ff960965 100644 +--- a/drivers/mtd/nand/pasemi_nand.c ++++ b/drivers/mtd/nand/pasemi_nand.c +@@ -193,7 +193,7 @@ static int pasemi_nand_remove(struct platform_device *ofdev) + chip = mtd_to_nand(pasemi_nand_mtd); + + /* Release resources, unregister device */ +- nand_release(pasemi_nand_mtd); ++ nand_release(chip); + + release_region(lpcctl, 4); + +diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c +index 925a1323604d..8c2d1c5c9569 100644 +--- a/drivers/mtd/nand/plat_nand.c ++++ b/drivers/mtd/nand/plat_nand.c +@@ -99,7 +99,7 @@ static int plat_nand_probe(struct platform_device *pdev) + if (!err) + return err; + +- nand_release(mtd); ++ nand_cleanup(&data->chip); + out: + if (pdata->ctrl.remove) + pdata->ctrl.remove(pdev); +@@ -114,7 +114,7 @@ static int plat_nand_remove(struct platform_device *pdev) + struct plat_nand_data *data = platform_get_drvdata(pdev); + struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev); + +- nand_release(nand_to_mtd(&data->chip)); ++ nand_release(&data->chip); + if (pdata->ctrl.remove) + pdata->ctrl.remove(pdev); + +diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c +index 125b744c9c28..df62f99979f3 100644 +--- a/drivers/mtd/nand/pxa3xx_nand.c ++++ b/drivers/mtd/nand/pxa3xx_nand.c +@@ -1915,7 +1915,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) + clk_disable_unprepare(info->clk); + + for (cs = 0; cs < pdata->num_cs; cs++) +- nand_release(nand_to_mtd(&info->host[cs]->chip)); ++ nand_release(&info->host[cs]->chip); + return 0; + } + +diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c +index 09d5f7df6023..65d1be2c3049 100644 +--- a/drivers/mtd/nand/qcom_nandc.c ++++ b/drivers/mtd/nand/qcom_nandc.c +@@ -2760,7 +2760,7 @@ static int qcom_nandc_remove(struct platform_device *pdev) + struct qcom_nand_host *host; + + list_for_each_entry(host, &nandc->host_list, node) +- nand_release(nand_to_mtd(&host->chip)); ++ nand_release(&host->chip); + + qcom_nandc_unalloc(nandc); + +diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c +index fc9287af4614..2cfa54941395 100644 +--- a/drivers/mtd/nand/r852.c ++++ b/drivers/mtd/nand/r852.c +@@ -656,7 +656,7 @@ static int r852_register_nand_device(struct r852_device *dev) + dev->card_registred = 1; + return 0; + error3: +- nand_release(mtd); ++ nand_release(dev->chip); + error1: + /* Force card redetect */ + dev->card_detected = 0; +@@ -675,7 +675,7 @@ static void r852_unregister_nand_device(struct r852_device *dev) + return; + + device_remove_file(&mtd->dev, &dev_attr_media_type); +- nand_release(mtd); ++ nand_release(dev->chip); + r852_engine_disable(dev); + dev->card_registred = 0; + } +diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c +index 4c383eeec6f6..f60de68bfabc 100644 +--- a/drivers/mtd/nand/s3c2410.c ++++ b/drivers/mtd/nand/s3c2410.c +@@ -784,7 +784,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev) + + for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { + pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); +- nand_release(nand_to_mtd(&ptr->chip)); ++ nand_release(&ptr->chip); + } + } + +diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c +index 43db80e5d994..f2ed03ee3035 100644 +--- a/drivers/mtd/nand/sh_flctl.c ++++ b/drivers/mtd/nand/sh_flctl.c +@@ -1231,7 +1231,7 @@ static int flctl_remove(struct platform_device *pdev) + struct sh_flctl *flctl = platform_get_drvdata(pdev); + + flctl_release_dma(flctl); +- nand_release(nand_to_mtd(&flctl->chip)); ++ nand_release(&flctl->chip); + pm_runtime_disable(&pdev->dev); + + return 0; +diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c +index f59c455d9f51..c245d66609c1 100644 +--- a/drivers/mtd/nand/sharpsl.c ++++ b/drivers/mtd/nand/sharpsl.c +@@ -192,7 +192,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) + return 0; + + err_add: +- nand_release(mtd); ++ nand_cleanup(this); + + err_scan: + iounmap(sharpsl->io); +@@ -210,7 +210,7 @@ static int sharpsl_nand_remove(struct platform_device *pdev) + struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev); + + /* Release resources, unregister device */ +- nand_release(nand_to_mtd(&sharpsl->chip)); ++ nand_release(&sharpsl->chip); + + iounmap(sharpsl->io); + +diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c +index 575997d0ef8a..8d4f0cd7197d 100644 +--- a/drivers/mtd/nand/socrates_nand.c ++++ b/drivers/mtd/nand/socrates_nand.c +@@ -195,7 +195,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) + if (!res) + return res; + +- nand_release(mtd); ++ nand_cleanup(nand_chip); + + out: + iounmap(host->io_base); +@@ -208,9 +208,8 @@ out: + static int socrates_nand_remove(struct platform_device *ofdev) + { + struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev); +- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); + +- nand_release(mtd); ++ nand_release(&host->nand_chip); + + iounmap(host->io_base); + +diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c +index 8e5231482397..d6e31e8a7b66 100644 +--- a/drivers/mtd/nand/sunxi_nand.c ++++ b/drivers/mtd/nand/sunxi_nand.c +@@ -2125,7 +2125,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(dev, "failed to register mtd device: %d\n", ret); +- nand_release(mtd); ++ nand_release(nand); + return ret; + } + +@@ -2164,7 +2164,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) + while (!list_empty(&nfc->chips)) { + chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip, + node); +- nand_release(nand_to_mtd(&chip->nand)); ++ nand_release(&chip->nand); + sunxi_nand_ecc_cleanup(&chip->nand.ecc); + list_del(&chip->node); + } +diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c +index ce366816a7ef..1ab16a90ea29 100644 +--- a/drivers/mtd/nand/tango_nand.c ++++ b/drivers/mtd/nand/tango_nand.c +@@ -619,7 +619,7 @@ static int tango_nand_remove(struct platform_device *pdev) + + for (cs = 0; cs < MAX_CS; ++cs) { + if (nfc->chips[cs]) +- nand_release(nand_to_mtd(&nfc->chips[cs]->nand_chip)); ++ nand_release(&nfc->chips[cs]->nand_chip); + } + + return 0; +diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c +index 84dbf32332e1..51f12b9f90ba 100644 +--- a/drivers/mtd/nand/tmio_nand.c ++++ b/drivers/mtd/nand/tmio_nand.c +@@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev) + if (!retval) + return retval; + +- nand_release(mtd); ++ nand_cleanup(nand_chip); + + err_irq: + tmio_hw_stop(dev, tmio); +@@ -459,7 +459,7 @@ static int tmio_remove(struct platform_device *dev) + { + struct tmio_nand *tmio = platform_get_drvdata(dev); + +- nand_release(nand_to_mtd(&tmio->chip)); ++ nand_release(&tmio->chip); + tmio_hw_stop(dev, tmio); + return 0; + } +diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c +index b567d212fe7d..236181b2985a 100644 +--- a/drivers/mtd/nand/txx9ndfmc.c ++++ b/drivers/mtd/nand/txx9ndfmc.c +@@ -390,7 +390,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev) + chip = mtd_to_nand(mtd); + txx9_priv = nand_get_controller_data(chip); + +- nand_release(mtd); ++ nand_release(chip); + kfree(txx9_priv->mtdname); + kfree(txx9_priv); + } +diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c +index e2583a539b41..688393526b5a 100644 +--- a/drivers/mtd/nand/vf610_nfc.c ++++ b/drivers/mtd/nand/vf610_nfc.c +@@ -794,7 +794,7 @@ static int vf610_nfc_remove(struct platform_device *pdev) + struct mtd_info *mtd = platform_get_drvdata(pdev); + struct vf610_nfc *nfc = mtd_to_nfc(mtd); + +- nand_release(mtd); ++ nand_release(mtd_to_nand(mtd)); + clk_disable_unprepare(nfc->clk); + return 0; + } +diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c +index 9926b4e3d69d..176a94fa31d7 100644 +--- a/drivers/mtd/nand/xway_nand.c ++++ b/drivers/mtd/nand/xway_nand.c +@@ -211,7 +211,7 @@ static int xway_nand_probe(struct platform_device *pdev) + + err = mtd_device_register(mtd, NULL, 0); + if (err) +- nand_release(mtd); ++ nand_cleanup(&data->chip); + + return err; + } +@@ -223,7 +223,7 @@ static int xway_nand_remove(struct platform_device *pdev) + { + struct xway_nand_data *data = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&data->chip)); ++ nand_release(&data->chip); + + return 0; + } +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index 5269af303f55..fff55f0bed30 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -6328,11 +6328,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +- u32 ctrl, ctrl_ext, rctl, status; +- /* Runtime suspend should only enable wakeup for link changes */ +- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; ++ u32 ctrl, ctrl_ext, rctl, status, wufc; + int retval = 0; + ++ /* Runtime suspend should only enable wakeup for link changes */ ++ if (runtime) ++ wufc = E1000_WUFC_LNKC; ++ else if (device_may_wakeup(&pdev->dev)) ++ wufc = adapter->wol; ++ else ++ wufc = 0; ++ + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; +@@ -6389,7 +6395,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) + if (adapter->hw.phy.type == e1000_phy_igp_3) { + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + } else if (hw->mac.type >= e1000_pch_lpt) { +- if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) ++ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + /* ULP does not support wake from unicast, multicast + * or broadcast. + */ +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c +index 6d3fa36b1616..3c9f8770f7e7 100644 +--- a/drivers/net/geneve.c ++++ b/drivers/net/geneve.c +@@ -915,9 +915,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) + if (geneve->collect_md) { + info = skb_tunnel_info(skb); + if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { +- err = -EINVAL; + netdev_dbg(dev, "no tunnel metadata\n"); +- goto tx_error; ++ dev_kfree_skb(skb); ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; + } + } else { + info = &geneve->info; +@@ -934,7 +935,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) + + if (likely(!err)) + return NETDEV_TX_OK; +-tx_error: ++ + dev_kfree_skb(skb); + + if (err == -ELOOP) +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c +index 16a6e1193912..b74c735a423d 100644 +--- a/drivers/net/hamradio/yam.c ++++ b/drivers/net/hamradio/yam.c +@@ -1162,6 +1162,7 @@ static int __init yam_init_driver(void) + err = register_netdev(dev); + if (err) { + printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name); ++ free_netdev(dev); + goto error; + } + yam_devs[i] = dev; +diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c +index 03b80d89b980..b75ec229b39a 100644 +--- a/drivers/ntb/ntb.c ++++ b/drivers/ntb/ntb.c +@@ -216,10 +216,8 @@ int ntb_default_port_number(struct ntb_dev *ntb) + case NTB_TOPO_B2B_DSD: + return NTB_PORT_SEC_DSD; + default: +- break; ++ return 0; + } +- +- return -EINVAL; + } + EXPORT_SYMBOL(ntb_default_port_number); + +@@ -242,10 +240,8 @@ int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx) + case NTB_TOPO_B2B_DSD: + return NTB_PORT_PRI_USD; + default: +- break; ++ return 0; + } +- +- return -EINVAL; + } + EXPORT_SYMBOL(ntb_default_peer_port_number); + +diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c +index 5f3048e75bec..c1db09fbbe04 100644 +--- a/drivers/pci/host/pci-aardvark.c ++++ b/drivers/pci/host/pci-aardvark.c +@@ -365,10 +365,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) + + advk_pcie_wait_for_link(pcie); + +- reg = PCIE_CORE_LINK_L0S_ENTRY | +- (1 << PCIE_CORE_LINK_WIDTH_SHIFT); +- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); +- + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); + reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | + PCIE_CORE_CMD_IO_ACCESS_EN | +diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c +index 2b0a1f3b8265..0077afca2493 100644 +--- a/drivers/pci/host/pcie-rcar.c ++++ b/drivers/pci/host/pcie-rcar.c +@@ -328,11 +328,12 @@ static struct pci_ops rcar_pcie_ops = { + }; + + static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, +- struct resource *res) ++ struct resource_entry *window) + { + /* Setup PCIe address space mappings for each resource */ + resource_size_t size; + resource_size_t res_start; ++ struct resource *res = window->res; + u32 mask; + + rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); +@@ -346,9 +347,9 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, + rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); + + if (res->flags & IORESOURCE_IO) +- res_start = pci_pio_to_address(res->start); ++ res_start = pci_pio_to_address(res->start) - window->offset; + else +- res_start = res->start; ++ res_start = res->start - window->offset; + + rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); + rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, +@@ -377,7 +378,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) + switch (resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: +- rcar_pcie_setup_window(i, pci, res); ++ rcar_pcie_setup_window(i, pci, win); + i++; + break; + case IORESOURCE_BUS: +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index 400031622b76..04d5c62588b7 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -584,16 +584,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) + + /* Setup initial capable state. Will be updated later */ + link->aspm_capable = link->aspm_support; +- /* +- * If the downstream component has pci bridge function, don't +- * do ASPM for now. +- */ +- list_for_each_entry(child, &linkbus->devices, bus_list) { +- if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) { +- link->aspm_disable = ASPM_STATE_ALL; +- break; +- } +- } + + /* Get and check endpoint acceptable latencies */ + list_for_each_entry(child, &linkbus->devices, bus_list) { +diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c +index 3008bba360f3..ec6f6213960b 100644 +--- a/drivers/pci/pcie/ptm.c ++++ b/drivers/pci/pcie/ptm.c +@@ -47,10 +47,6 @@ void pci_ptm_init(struct pci_dev *dev) + if (!pci_is_pcie(dev)) + return; + +- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); +- if (!pos) +- return; +- + /* + * Enable PTM only on interior devices (root ports, switch ports, + * etc.) on the assumption that it causes no link traffic until an +@@ -60,6 +56,23 @@ void pci_ptm_init(struct pci_dev *dev) + pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)) + return; + ++ /* ++ * Switch Downstream Ports are not permitted to have a PTM ++ * capability; their PTM behavior is controlled by the Upstream ++ * Port (PCIe r5.0, sec 7.9.16). ++ */ ++ ups = pci_upstream_bridge(dev); ++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM && ++ ups && ups->ptm_enabled) { ++ dev->ptm_granularity = ups->ptm_granularity; ++ dev->ptm_enabled = 1; ++ return; ++ } ++ ++ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); ++ if (!pos) ++ return; ++ + pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap); + local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; + +@@ -69,7 +82,6 @@ void pci_ptm_init(struct pci_dev *dev) + * the spec recommendation (PCIe r3.1, sec 7.32.3), select the + * furthest upstream Time Source as the PTM Root. + */ +- ups = pci_upstream_bridge(dev); + if (ups && ups->ptm_enabled) { + ctrl = PCI_PTM_CTRL_ENABLE; + if (ups->ptm_granularity == 0) +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 31cc04aeaaaa..55ece07e584a 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -792,9 +792,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) + goto free; + + err = device_register(&bridge->dev); +- if (err) ++ if (err) { + put_device(&bridge->dev); +- ++ goto free; ++ } + bus->bridge = get_device(&bridge->dev); + device_enable_async_suspend(bus->bridge); + pci_set_bus_of_node(bus); +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c +index 17f2c5a505b2..ec0119e1e781 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c +@@ -661,16 +661,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev, + return 0; + } + +-/* +- * imx_free_resources() - free memory used by this driver +- * @info: info driver instance +- */ +-static void imx_free_resources(struct imx_pinctrl *ipctl) +-{ +- if (ipctl->pctl) +- pinctrl_unregister(ipctl->pctl); +-} +- + int imx_pinctrl_probe(struct platform_device *pdev, + struct imx_pinctrl_soc_info *info) + { +@@ -761,21 +751,16 @@ int imx_pinctrl_probe(struct platform_device *pdev, + &ipctl->pctl); + if (ret) { + dev_err(&pdev->dev, "could not register IMX pinctrl driver\n"); +- goto free; ++ return ret; + } + + ret = imx_pinctrl_probe_dt(pdev, ipctl); + if (ret) { + dev_err(&pdev->dev, "fail to probe dt properties\n"); +- goto free; ++ return ret; + } + + dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); + + return pinctrl_enable(ipctl->pctl); +- +-free: +- imx_free_resources(ipctl); +- +- return ret; + } +diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +index e2cca91fd266..68108c4c3969 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +@@ -642,7 +642,6 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev, + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + if (ret) { +- pinctrl_unregister(ipctl->pctl); + dev_err(&pdev->dev, "Failed to populate subdevices\n"); + return ret; + } +diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig +index 5ab90c1f3f7c..24163cf8612c 100644 +--- a/drivers/power/supply/Kconfig ++++ b/drivers/power/supply/Kconfig +@@ -530,7 +530,7 @@ config CHARGER_BQ24257 + tristate "TI BQ24250/24251/24257 battery charger driver" + depends on I2C + depends on GPIOLIB || COMPILE_TEST +- depends on REGMAP_I2C ++ select REGMAP_I2C + help + Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery + chargers. +diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c +index 0f3432795f3c..b8f7dac7ac3f 100644 +--- a/drivers/power/supply/lp8788-charger.c ++++ b/drivers/power/supply/lp8788-charger.c +@@ -600,27 +600,14 @@ static void lp8788_setup_adc_channel(struct device *dev, + return; + + /* ADC channel for battery voltage */ +- chan = iio_channel_get(dev, pdata->adc_vbatt); ++ chan = devm_iio_channel_get(dev, pdata->adc_vbatt); + pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan; + + /* ADC channel for battery temperature */ +- chan = iio_channel_get(dev, pdata->adc_batt_temp); ++ chan = devm_iio_channel_get(dev, pdata->adc_batt_temp); + pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan; + } + +-static void lp8788_release_adc_channel(struct lp8788_charger *pchg) +-{ +- int i; +- +- for (i = 0; i < LP8788_NUM_CHG_ADC; i++) { +- if (!pchg->chan[i]) +- continue; +- +- iio_channel_release(pchg->chan[i]); +- pchg->chan[i] = NULL; +- } +-} +- + static ssize_t lp8788_show_charger_status(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -747,7 +734,6 @@ static int lp8788_charger_remove(struct platform_device *pdev) + lp8788_irq_unregister(pdev, pchg); + sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group); + lp8788_psy_unregister(pchg); +- lp8788_release_adc_channel(pchg); + + return 0; + } +diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c +index 072c5189bd6d..0655dbdc7000 100644 +--- a/drivers/power/supply/smb347-charger.c ++++ b/drivers/power/supply/smb347-charger.c +@@ -1141,6 +1141,7 @@ static bool smb347_volatile_reg(struct device *dev, unsigned int reg) + switch (reg) { + case IRQSTAT_A: + case IRQSTAT_C: ++ case IRQSTAT_D: + case IRQSTAT_E: + case IRQSTAT_F: + case STAT_A: +diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c +index 8f4fa1a52f05..d6372470e5be 100644 +--- a/drivers/remoteproc/remoteproc_core.c ++++ b/drivers/remoteproc/remoteproc_core.c +@@ -1432,6 +1432,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, + rproc->dev.type = &rproc_type; + rproc->dev.class = &rproc_class; + rproc->dev.driver_data = rproc; ++ idr_init(&rproc->notifyids); + + /* Assign a unique device index and name */ + rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); +@@ -1450,8 +1451,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, + + mutex_init(&rproc->lock); + +- idr_init(&rproc->notifyids); +- + INIT_LIST_HEAD(&rproc->carveouts); + INIT_LIST_HEAD(&rproc->mappings); + INIT_LIST_HEAD(&rproc->traces); +diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h +index 29d6b5222f1c..0f8d13288611 100644 +--- a/drivers/s390/cio/qdio.h ++++ b/drivers/s390/cio/qdio.h +@@ -377,7 +377,6 @@ static inline int multicast_outbound(struct qdio_q *q) + extern u64 last_ai_time; + + /* prototypes for thin interrupt */ +-void qdio_setup_thinint(struct qdio_irq *irq_ptr); + int qdio_establish_thinint(struct qdio_irq *irq_ptr); + void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); + void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); +diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c +index d0090c5c88e7..a64615a10352 100644 +--- a/drivers/s390/cio/qdio_setup.c ++++ b/drivers/s390/cio/qdio_setup.c +@@ -479,7 +479,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) + setup_queues(irq_ptr, init_data); + + setup_qib(irq_ptr, init_data); +- qdio_setup_thinint(irq_ptr); + set_impl_params(irq_ptr, init_data->qib_param_field_format, + init_data->qib_param_field, + init_data->input_slib_elements, +diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c +index 831a3a0a2837..4dc1108069d4 100644 +--- a/drivers/s390/cio/qdio_thinint.c ++++ b/drivers/s390/cio/qdio_thinint.c +@@ -270,17 +270,19 @@ int __init tiqdio_register_thinints(void) + + int qdio_establish_thinint(struct qdio_irq *irq_ptr) + { ++ int rc; ++ + if (!is_thinint_irq(irq_ptr)) + return 0; +- return set_subchannel_ind(irq_ptr, 0); +-} + +-void qdio_setup_thinint(struct qdio_irq *irq_ptr) +-{ +- if (!is_thinint_irq(irq_ptr)) +- return; + irq_ptr->dsci = get_indicator(); + DBF_HEX(&irq_ptr->dsci, sizeof(void *)); ++ ++ rc = set_subchannel_ind(irq_ptr, 0); ++ if (rc) ++ put_indicator(irq_ptr->dsci); ++ ++ return rc; + } + + void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) +diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c +index 421fe869a11e..ef9d907f2df5 100644 +--- a/drivers/scsi/arm/acornscsi.c ++++ b/drivers/scsi/arm/acornscsi.c +@@ -2914,8 +2914,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) + + ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); + ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); +- if (!ashost->base || !ashost->fast) ++ if (!ashost->base || !ashost->fast) { ++ ret = -ENOMEM; + goto out_put; ++ } + + host->irq = ec->irq; + ashost->host = host; +diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c +index 83645a1c6f82..aff868afe68d 100644 +--- a/drivers/scsi/ibmvscsi/ibmvscsi.c ++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c +@@ -429,6 +429,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, + int rc = 0; + struct vio_dev *vdev = to_vio_dev(hostdata->dev); + ++ set_adapter_info(hostdata); ++ + /* Re-enable the CRQ */ + do { + if (rc) +diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c +index d453667612f8..15d64f96e623 100644 +--- a/drivers/scsi/iscsi_boot_sysfs.c ++++ b/drivers/scsi/iscsi_boot_sysfs.c +@@ -360,7 +360,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, + boot_kobj->kobj.kset = boot_kset->kset; + if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype, + NULL, name, index)) { +- kfree(boot_kobj); ++ kobject_put(&boot_kobj->kobj); + return NULL; + } + boot_kobj->data = data; +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index 4c84c2ae1112..db1111f7e85a 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -7913,6 +7913,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + spin_lock_irq(shost->host_lock); + if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { + spin_unlock_irq(shost->host_lock); ++ if (newnode) ++ lpfc_nlp_put(ndlp); + goto dropit; + } + spin_unlock_irq(shost->host_lock); +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 817a7963a038..556971c5f0b0 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -3263,7 +3263,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) + ioc->scsi_lookup = NULL; + } + kfree(ioc->hpr_lookup); ++ ioc->hpr_lookup = NULL; + kfree(ioc->internal_lookup); ++ ioc->internal_lookup = NULL; + if (ioc->chain_lookup) { + for (i = 0; i < ioc->chain_depth; i++) { + if (ioc->chain_lookup[i].chain_buffer) +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c +index 94f3829b1974..fb6439bc1d9a 100644 +--- a/drivers/scsi/qedi/qedi_iscsi.c ++++ b/drivers/scsi/qedi/qedi_iscsi.c +@@ -1007,7 +1007,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) + if (qedi_ep->state == EP_STATE_OFLDCONN_START) + goto ep_exit_recover; + +- flush_work(&qedi_ep->offload_work); ++ if (qedi_ep->state != EP_STATE_OFLDCONN_NONE) ++ flush_work(&qedi_ep->offload_work); + + if (qedi_ep->conn) { + qedi_conn = qedi_ep->conn; +@@ -1224,6 +1225,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) + } + + iscsi_cid = (u32)path_data->handle; ++ if (iscsi_cid >= qedi->max_active_conns) { ++ ret = -EINVAL; ++ goto set_path_exit; ++ } + qedi_ep = qedi->ep_tbl[iscsi_cid]; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c +index d4024015f859..ea60c6e603c0 100644 +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -5824,6 +5824,7 @@ qla2x00_do_dpc(void *data) + + if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, + &base_vha->dpc_flags))) { ++ base_vha->flags.online = 1; + ql_dbg(ql_dbg_dpc, base_vha, 0x4007, + "ISP abort scheduled.\n"); + if (ha->isp_ops->abort_isp(base_vha)) { +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +index e08ac431bc49..e7aee067b056 100644 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -937,6 +937,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); ++ qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return count; +@@ -1101,6 +1102,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); ++ qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return count; +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c +index d0389b20574d..5be3d6b7991b 100644 +--- a/drivers/scsi/sr.c ++++ b/drivers/scsi/sr.c +@@ -748,7 +748,7 @@ static int sr_probe(struct device *dev) + cd->cdi.disk = disk; + + if (register_cdrom(&cd->cdi)) +- goto fail_put; ++ goto fail_minor; + + /* + * Initialize block layer runtime PM stuffs before the +@@ -766,6 +766,10 @@ static int sr_probe(struct device *dev) + + return 0; + ++fail_minor: ++ spin_lock(&sr_index_lock); ++ clear_bit(minor, sr_index_bits); ++ spin_unlock(&sr_index_lock); + fail_put: + put_disk(disk); + fail_free: +diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c +index f2b8de195d8a..ee3589ac64ab 100644 +--- a/drivers/scsi/ufs/ufs-qcom.c ++++ b/drivers/scsi/ufs/ufs-qcom.c +@@ -1649,11 +1649,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) + + /* sleep a bit intermittently as we are dumping too much data */ + ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); +- usleep_range(1000, 1100); ++ udelay(1000); + ufs_qcom_testbus_read(hba); +- usleep_range(1000, 1100); ++ udelay(1000); + ufs_qcom_print_unipro_testbus(hba); +- usleep_range(1000, 1100); ++ udelay(1000); + } + + /** +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 3a98e5a1d830..1e2a97a10033 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -4890,7 +4890,6 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, + err = ufshcd_enable_auto_bkops(hba); + else + err = ufshcd_disable_auto_bkops(hba); +- hba->urgent_bkops_lvl = curr_status; + out: + return err; + } +diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c +index 4e7575147775..9fab0e2751aa 100644 +--- a/drivers/staging/greybus/light.c ++++ b/drivers/staging/greybus/light.c +@@ -1033,7 +1033,8 @@ static int gb_lights_light_config(struct gb_lights *glights, u8 id) + + light->channels_count = conf.channel_count; + light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL); +- ++ if (!light->name) ++ return -ENOMEM; + light->channels = kcalloc(light->channels_count, + sizeof(struct gb_channel), GFP_KERNEL); + if (!light->channels) +diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c +index 67207b0554cd..5d6f3686c0de 100644 +--- a/drivers/staging/sm750fb/sm750.c ++++ b/drivers/staging/sm750fb/sm750.c +@@ -899,6 +899,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) + fix->visual = FB_VISUAL_PSEUDOCOLOR; + break; + case 16: ++ case 24: + case 32: + fix->visual = FB_VISUAL_TRUECOLOR; + break; +diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +index c211a8e4a210..fa98c398d70f 100644 +--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c ++++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +@@ -183,7 +183,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, + + data = ti_bandgap_get_sensor_data(bgp, id); + +- if (!data || IS_ERR(data)) ++ if (!IS_ERR_OR_NULL(data)) + data = ti_thermal_build_data(bgp, id); + + if (!data) +@@ -210,7 +210,7 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id) + + data = ti_bandgap_get_sensor_data(bgp, id); + +- if (data && data->ti_thermal) { ++ if (!IS_ERR_OR_NULL(data) && data->ti_thermal) { + if (data->our_zone) + thermal_zone_device_unregister(data->ti_thermal); + } +@@ -276,7 +276,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) + + data = ti_bandgap_get_sensor_data(bgp, id); + +- if (data) { ++ if (!IS_ERR_OR_NULL(data)) { + cpufreq_cooling_unregister(data->cool_dev); + cpufreq_cpu_put(data->policy); + } +diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c +index d52221ae1b85..663cbe3669e1 100644 +--- a/drivers/tty/hvc/hvc_console.c ++++ b/drivers/tty/hvc/hvc_console.c +@@ -88,6 +88,8 @@ static LIST_HEAD(hvc_structs); + */ + static DEFINE_SPINLOCK(hvc_structs_lock); + ++/* Mutex to serialize hvc_open */ ++static DEFINE_MUTEX(hvc_open_mutex); + /* + * This value is used to assign a tty->index value to a hvc_struct based + * upon order of exposure via hvc_probe(), when we can not match it to +@@ -332,16 +334,24 @@ static int hvc_install(struct tty_driver *driver, struct tty_struct *tty) + */ + static int hvc_open(struct tty_struct *tty, struct file * filp) + { +- struct hvc_struct *hp = tty->driver_data; ++ struct hvc_struct *hp; + unsigned long flags; + int rc = 0; + ++ mutex_lock(&hvc_open_mutex); ++ ++ hp = tty->driver_data; ++ if (!hp) { ++ rc = -EIO; ++ goto out; ++ } ++ + spin_lock_irqsave(&hp->port.lock, flags); + /* Check and then increment for fast path open. */ + if (hp->port.count++ > 0) { + spin_unlock_irqrestore(&hp->port.lock, flags); + hvc_kick(); +- return 0; ++ goto out; + } /* else count == 0 */ + spin_unlock_irqrestore(&hp->port.lock, flags); + +@@ -369,6 +379,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + /* Force wakeup of the polling thread */ + hvc_kick(); + ++out: ++ mutex_unlock(&hvc_open_mutex); + return rc; + } + +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c +index f46bd1af7a10..c70e79a0e9f2 100644 +--- a/drivers/tty/n_gsm.c ++++ b/drivers/tty/n_gsm.c +@@ -677,11 +677,10 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, + * FIXME: lock against link layer control transmissions + */ + +-static void gsm_data_kick(struct gsm_mux *gsm) ++static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci) + { + struct gsm_msg *msg, *nmsg; + int len; +- int skip_sof = 0; + + list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) { + if (gsm->constipated && msg->addr) +@@ -703,18 +702,23 @@ static void gsm_data_kick(struct gsm_mux *gsm) + print_hex_dump_bytes("gsm_data_kick: ", + DUMP_PREFIX_OFFSET, + gsm->txframe, len); +- +- if (gsm->output(gsm, gsm->txframe + skip_sof, +- len - skip_sof) < 0) ++ if (gsm->output(gsm, gsm->txframe, len) < 0) + break; + /* FIXME: Can eliminate one SOF in many more cases */ + gsm->tx_bytes -= msg->len; +- /* For a burst of frames skip the extra SOF within the +- burst */ +- skip_sof = 1; + + list_del(&msg->list); + kfree(msg); ++ ++ if (dlci) { ++ tty_port_tty_wakeup(&dlci->port); ++ } else { ++ int i = 0; ++ ++ for (i = 0; i < NUM_DLCI; i++) ++ if (gsm->dlci[i]) ++ tty_port_tty_wakeup(&gsm->dlci[i]->port); ++ } + } + } + +@@ -766,7 +770,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) + /* Add to the actual output queue */ + list_add_tail(&msg->list, &gsm->tx_list); + gsm->tx_bytes += msg->len; +- gsm_data_kick(gsm); ++ gsm_data_kick(gsm, dlci); + } + + /** +@@ -1227,7 +1231,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, + gsm_control_reply(gsm, CMD_FCON, NULL, 0); + /* Kick the link in case it is idling */ + spin_lock_irqsave(&gsm->tx_lock, flags); +- gsm_data_kick(gsm); ++ gsm_data_kick(gsm, NULL); + spin_unlock_irqrestore(&gsm->tx_lock, flags); + break; + case CMD_FCOFF: +@@ -2426,7 +2430,7 @@ static void gsmld_write_wakeup(struct tty_struct *tty) + /* Queue poll */ + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + spin_lock_irqsave(&gsm->tx_lock, flags); +- gsm_data_kick(gsm); ++ gsm_data_kick(gsm, NULL); + if (gsm->tx_bytes < TX_THRESH_LO) { + gsm_dlci_data_sweep(gsm); + } +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index 637f72fb6427..e55b55633721 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -2605,6 +2605,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, + uap->port.fifosize = uap->fifosize; + uap->port.flags = UPF_BOOT_AUTOCONF; + uap->port.line = index; ++ spin_lock_init(&uap->port.lock); + + amba_ports[index] = uap; + +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c +index 5e456a83779d..b0471ce34011 100644 +--- a/drivers/usb/class/usblp.c ++++ b/drivers/usb/class/usblp.c +@@ -481,7 +481,8 @@ static int usblp_release(struct inode *inode, struct file *file) + usb_autopm_put_interface(usblp->intf); + + if (!usblp->present) /* finish cleanup from disconnect */ +- usblp_cleanup(usblp); ++ usblp_cleanup(usblp); /* any URBs must be dead */ ++ + mutex_unlock(&usblp_mutex); + return 0; + } +@@ -1388,9 +1389,11 @@ static void usblp_disconnect(struct usb_interface *intf) + + usblp_unlink_urbs(usblp); + mutex_unlock(&usblp->mut); ++ usb_poison_anchored_urbs(&usblp->urbs); + + if (!usblp->used) + usblp_cleanup(usblp); ++ + mutex_unlock(&usblp_mutex); + } + +diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c +index b8bcb007c92a..e3e0a3ab31da 100644 +--- a/drivers/usb/dwc2/core_intr.c ++++ b/drivers/usb/dwc2/core_intr.c +@@ -364,10 +364,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) + if (ret && (ret != -ENOTSUPP)) + dev_err(hsotg->dev, "exit hibernation failed\n"); + ++ /* Change to L0 state */ ++ hsotg->lx_state = DWC2_L0; + call_gadget(hsotg, resume); ++ } else { ++ /* Change to L0 state */ ++ hsotg->lx_state = DWC2_L0; + } +- /* Change to L0 state */ +- hsotg->lx_state = DWC2_L0; + } else { + if (hsotg->params.hibernation) + return; +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index c1f037af9702..7d2715e899bb 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -100,40 +100,43 @@ function_descriptors(struct usb_function *f, + } + + /** +- * next_ep_desc() - advance to the next EP descriptor ++ * next_desc() - advance to the next desc_type descriptor + * @t: currect pointer within descriptor array ++ * @desc_type: descriptor type + * +- * Return: next EP descriptor or NULL ++ * Return: next desc_type descriptor or NULL + * +- * Iterate over @t until either EP descriptor found or ++ * Iterate over @t until either desc_type descriptor found or + * NULL (that indicates end of list) encountered + */ + static struct usb_descriptor_header** +-next_ep_desc(struct usb_descriptor_header **t) ++next_desc(struct usb_descriptor_header **t, u8 desc_type) + { + for (; *t; t++) { +- if ((*t)->bDescriptorType == USB_DT_ENDPOINT) ++ if ((*t)->bDescriptorType == desc_type) + return t; + } + return NULL; + } + + /* +- * for_each_ep_desc()- iterate over endpoint descriptors in the +- * descriptors list +- * @start: pointer within descriptor array. +- * @ep_desc: endpoint descriptor to use as the loop cursor ++ * for_each_desc() - iterate over desc_type descriptors in the ++ * descriptors list ++ * @start: pointer within descriptor array. ++ * @iter_desc: desc_type descriptor to use as the loop cursor ++ * @desc_type: wanted descriptr type + */ +-#define for_each_ep_desc(start, ep_desc) \ +- for (ep_desc = next_ep_desc(start); \ +- ep_desc; ep_desc = next_ep_desc(ep_desc+1)) ++#define for_each_desc(start, iter_desc, desc_type) \ ++ for (iter_desc = next_desc(start, desc_type); \ ++ iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type)) + + /** +- * config_ep_by_speed() - configures the given endpoint ++ * config_ep_by_speed_and_alt() - configures the given endpoint + * according to gadget speed. + * @g: pointer to the gadget + * @f: usb function + * @_ep: the endpoint to configure ++ * @alt: alternate setting number + * + * Return: error code, 0 on success + * +@@ -146,11 +149,13 @@ next_ep_desc(struct usb_descriptor_header **t) + * Note: the supplied function should hold all the descriptors + * for supported speeds + */ +-int config_ep_by_speed(struct usb_gadget *g, +- struct usb_function *f, +- struct usb_ep *_ep) ++int config_ep_by_speed_and_alt(struct usb_gadget *g, ++ struct usb_function *f, ++ struct usb_ep *_ep, ++ u8 alt) + { + struct usb_endpoint_descriptor *chosen_desc = NULL; ++ struct usb_interface_descriptor *int_desc = NULL; + struct usb_descriptor_header **speed_desc = NULL; + + struct usb_ss_ep_comp_descriptor *comp_desc = NULL; +@@ -186,8 +191,21 @@ int config_ep_by_speed(struct usb_gadget *g, + default: + speed_desc = f->fs_descriptors; + } ++ ++ /* find correct alternate setting descriptor */ ++ for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) { ++ int_desc = (struct usb_interface_descriptor *)*d_spd; ++ ++ if (int_desc->bAlternateSetting == alt) { ++ speed_desc = d_spd; ++ goto intf_found; ++ } ++ } ++ return -EIO; ++ ++intf_found: + /* find descriptors */ +- for_each_ep_desc(speed_desc, d_spd) { ++ for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) { + chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; + if (chosen_desc->bEndpointAddress == _ep->address) + goto ep_found; +@@ -240,6 +258,32 @@ ep_found: + } + return 0; + } ++EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt); ++ ++/** ++ * config_ep_by_speed() - configures the given endpoint ++ * according to gadget speed. ++ * @g: pointer to the gadget ++ * @f: usb function ++ * @_ep: the endpoint to configure ++ * ++ * Return: error code, 0 on success ++ * ++ * This function chooses the right descriptors for a given ++ * endpoint according to gadget speed and saves it in the ++ * endpoint desc field. If the endpoint already has a descriptor ++ * assigned to it - overwrites it with currently corresponding ++ * descriptor. The endpoint maxpacket field is updated according ++ * to the chosen descriptor. ++ * Note: the supplied function should hold all the descriptors ++ * for supported speeds ++ */ ++int config_ep_by_speed(struct usb_gadget *g, ++ struct usb_function *f, ++ struct usb_ep *_ep) ++{ ++ return config_ep_by_speed_and_alt(g, f, _ep, 0); ++} + EXPORT_SYMBOL_GPL(config_ep_by_speed); + + /** +diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c +index ac2aa04ca657..710793161795 100644 +--- a/drivers/usb/gadget/udc/lpc32xx_udc.c ++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c +@@ -1615,17 +1615,17 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep, + const struct usb_endpoint_descriptor *desc) + { + struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); +- struct lpc32xx_udc *udc = ep->udc; ++ struct lpc32xx_udc *udc; + u16 maxpacket; + u32 tmp; + unsigned long flags; + + /* Verify EP data */ + if ((!_ep) || (!ep) || (!desc) || +- (desc->bDescriptorType != USB_DT_ENDPOINT)) { +- dev_dbg(udc->dev, "bad ep or descriptor\n"); ++ (desc->bDescriptorType != USB_DT_ENDPOINT)) + return -EINVAL; +- } ++ ++ udc = ep->udc; + maxpacket = usb_endpoint_maxp(desc); + if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) { + dev_dbg(udc->dev, "bad ep descriptor's packet size\n"); +@@ -1873,7 +1873,7 @@ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) + static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) + { + struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); +- struct lpc32xx_udc *udc = ep->udc; ++ struct lpc32xx_udc *udc; + unsigned long flags; + + if ((!ep) || (ep->hwep_num <= 1)) +@@ -1883,6 +1883,7 @@ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) + if (ep->is_in) + return -EAGAIN; + ++ udc = ep->udc; + spin_lock_irqsave(&udc->lock, flags); + + if (value == 1) { +diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c +index 46ce7bc15f2b..53abad98af6d 100644 +--- a/drivers/usb/gadget/udc/m66592-udc.c ++++ b/drivers/usb/gadget/udc/m66592-udc.c +@@ -1672,7 +1672,7 @@ static int m66592_probe(struct platform_device *pdev) + + err_add_udc: + m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); +- ++ m66592->ep0_req = NULL; + clean_up3: + if (m66592->pdata->on_chip) { + clk_disable(m66592->clk); +diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c +index 394abd5d65c0..cf12ca567e69 100644 +--- a/drivers/usb/gadget/udc/s3c2410_udc.c ++++ b/drivers/usb/gadget/udc/s3c2410_udc.c +@@ -268,10 +268,6 @@ static void s3c2410_udc_done(struct s3c2410_ep *ep, + static void s3c2410_udc_nuke(struct s3c2410_udc *udc, + struct s3c2410_ep *ep, int status) + { +- /* Sanity check */ +- if (&ep->queue == NULL) +- return; +- + while (!list_empty(&ep->queue)) { + struct s3c2410_request *req; + req = list_entry(ep->queue.next, struct s3c2410_request, +diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c +index c7a9b31eeaef..637079a35003 100644 +--- a/drivers/usb/host/ehci-mxc.c ++++ b/drivers/usb/host/ehci-mxc.c +@@ -63,6 +63,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) + } + + irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return irq; + + hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev)); + if (!hcd) +diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c +index f1908ea9fbd8..6fcd33288014 100644 +--- a/drivers/usb/host/ehci-platform.c ++++ b/drivers/usb/host/ehci-platform.c +@@ -390,6 +390,11 @@ static int ehci_platform_resume(struct device *dev) + } + + ehci_resume(hcd, priv->reset_on_resume); ++ ++ pm_runtime_disable(dev); ++ pm_runtime_set_active(dev); ++ pm_runtime_enable(dev); ++ + return 0; + } + #endif /* CONFIG_PM_SLEEP */ +diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c +index 61fe2b985070..742cefa22c2b 100644 +--- a/drivers/usb/host/ohci-platform.c ++++ b/drivers/usb/host/ohci-platform.c +@@ -355,6 +355,11 @@ static int ohci_platform_resume(struct device *dev) + } + + ohci_resume(hcd, false); ++ ++ pm_runtime_disable(dev); ++ pm_runtime_set_active(dev); ++ pm_runtime_enable(dev); ++ + return 0; + } + #endif /* CONFIG_PM_SLEEP */ +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 2a73592908e1..7219cbf7c54c 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -381,7 +381,15 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) + if (ret) + return ret; + +- return xhci_resume(xhci, 0); ++ ret = xhci_resume(xhci, 0); ++ if (ret) ++ return ret; ++ ++ pm_runtime_disable(dev); ++ pm_runtime_set_active(dev); ++ pm_runtime_enable(dev); ++ ++ return 0; + } + + static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev) +diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c +index 802df210929b..7e474e41c85e 100644 +--- a/drivers/vfio/mdev/mdev_sysfs.c ++++ b/drivers/vfio/mdev/mdev_sysfs.c +@@ -113,7 +113,7 @@ struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, + "%s-%s", dev_driver_string(parent->dev), + group->name); + if (ret) { +- kfree(type); ++ kobject_put(&type->kobj); + return ERR_PTR(ret); + } + +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c +index 423ea1f98441..36bc8f104e42 100644 +--- a/drivers/vfio/pci/vfio_pci_config.c ++++ b/drivers/vfio/pci/vfio_pci_config.c +@@ -1464,7 +1464,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev) + if (ret) + return ret; + +- if (cap <= PCI_CAP_ID_MAX) { ++ /* ++ * ID 0 is a NULL capability, conflicting with our fake ++ * PCI_CAP_ID_BASIC. As it has no content, consider it ++ * hidden for now. ++ */ ++ if (cap && cap <= PCI_CAP_ID_MAX) { + len = pci_cap_length[cap]; + if (len == 0xFF) { /* Variable length */ + len = vfio_cap_len(vdev, cap, pos); +@@ -1732,8 +1737,11 @@ void vfio_config_free(struct vfio_pci_device *vdev) + vdev->vconfig = NULL; + kfree(vdev->pci_config_map); + vdev->pci_config_map = NULL; +- kfree(vdev->msi_perm); +- vdev->msi_perm = NULL; ++ if (vdev->msi_perm) { ++ free_perm_bits(vdev->msi_perm); ++ kfree(vdev->msi_perm); ++ vdev->msi_perm = NULL; ++ } + } + + /* +diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c +index 939f057836e1..4cdc7a3f6dc5 100644 +--- a/drivers/video/backlight/lp855x_bl.c ++++ b/drivers/video/backlight/lp855x_bl.c +@@ -460,7 +460,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) + ret = regulator_enable(lp->enable); + if (ret < 0) { + dev_err(lp->dev, "failed to enable vddio: %d\n", ret); +- return ret; ++ goto disable_supply; + } + + /* +@@ -475,24 +475,34 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) + ret = lp855x_configure(lp); + if (ret) { + dev_err(lp->dev, "device config err: %d", ret); +- return ret; ++ goto disable_vddio; + } + + ret = lp855x_backlight_register(lp); + if (ret) { + dev_err(lp->dev, + "failed to register backlight. err: %d\n", ret); +- return ret; ++ goto disable_vddio; + } + + ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group); + if (ret) { + dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret); +- return ret; ++ goto disable_vddio; + } + + backlight_update_status(lp->bl); ++ + return 0; ++ ++disable_vddio: ++ if (lp->enable) ++ regulator_disable(lp->enable); ++disable_supply: ++ if (lp->supply) ++ regulator_disable(lp->supply); ++ ++ return ret; + } + + static int lp855x_remove(struct i2c_client *cl) +@@ -501,6 +511,8 @@ static int lp855x_remove(struct i2c_client *cl) + + lp->bl->props.brightness = 0; + backlight_update_status(lp->bl); ++ if (lp->enable) ++ regulator_disable(lp->enable); + if (lp->supply) + regulator_disable(lp->supply); + sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group); +diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c +index 79383ff62019..1443386bb590 100644 +--- a/drivers/watchdog/da9062_wdt.c ++++ b/drivers/watchdog/da9062_wdt.c +@@ -94,11 +94,6 @@ static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt, + unsigned int regval) + { + struct da9062 *chip = wdt->hw; +- int ret; +- +- ret = da9062_reset_watchdog_timer(wdt); +- if (ret) +- return ret; + + return regmap_update_bits(chip->regmap, + DA9062AA_CONTROL_D, +diff --git a/fs/block_dev.c b/fs/block_dev.c +index 61949e3446e5..77ce77a28324 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -1439,10 +1439,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) + */ + if (!for_part) { + ret = devcgroup_inode_permission(bdev->bd_inode, perm); +- if (ret != 0) { +- bdput(bdev); ++ if (ret != 0) + return ret; +- } + } + + restart: +@@ -1515,8 +1513,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) + goto out_clear; + BUG_ON(for_part); + ret = __blkdev_get(whole, mode, 1); +- if (ret) ++ if (ret) { ++ bdput(whole); + goto out_clear; ++ } + bdev->bd_contains = whole; + bdev->bd_part = disk_get_part(disk, partno); + if (!(disk->flags & GENHD_FL_UP) || +@@ -1570,7 +1570,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) + put_disk(disk); + module_put(owner); + out: +- bdput(bdev); + + return ret; + } +@@ -1656,6 +1655,9 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) + bdput(whole); + } + ++ if (res) ++ bdput(bdev); ++ + return res; + } + EXPORT_SYMBOL(blkdev_get); +diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h +index 748e8d59e611..cb287df13a7a 100644 +--- a/fs/dlm/dlm_internal.h ++++ b/fs/dlm/dlm_internal.h +@@ -99,7 +99,6 @@ do { \ + __LINE__, __FILE__, #x, jiffies); \ + {do} \ + printk("\n"); \ +- BUG(); \ + panic("DLM: Record message above and reboot.\n"); \ + } \ + } +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 4f9eb4b61549..fc6bc261f7ac 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -2917,7 +2917,7 @@ again: + * in use to avoid freeing it when removing blocks. + */ + if (sbi->s_cluster_ratio > 1) { +- pblk = ext4_ext_pblock(ex) + end - ee_block + 2; ++ pblk = ext4_ext_pblock(ex) + end - ee_block + 1; + partial_cluster = + -(long long) EXT4_B2C(sbi, pblk); + } +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 2d021a33914a..89319c352406 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -918,7 +918,8 @@ static int f2fs_statfs_project(struct super_block *sb, + limit >>= sb->s_blocksize_bits; + + if (limit && buf->f_blocks > limit) { +- curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits; ++ curblock = (dquot->dq_dqb.dqb_curspace + ++ dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; + buf->f_blocks = limit; + buf->f_bfree = buf->f_bavail = + (buf->f_blocks > curblock) ? +diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c +index a3208511f35a..f30418911e1b 100644 +--- a/fs/gfs2/log.c ++++ b/fs/gfs2/log.c +@@ -804,8 +804,10 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, + * @new: New transaction to be merged + */ + +-static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) ++static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new) + { ++ struct gfs2_trans *old = sdp->sd_log_tr; ++ + WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); + + old->tr_num_buf_new += new->tr_num_buf_new; +@@ -817,6 +819,11 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) + + list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); + list_splice_tail_init(&new->tr_buf, &old->tr_buf); ++ ++ spin_lock(&sdp->sd_ail_lock); ++ list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list); ++ list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list); ++ spin_unlock(&sdp->sd_ail_lock); + } + + static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) +@@ -828,7 +835,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) + gfs2_log_lock(sdp); + + if (sdp->sd_log_tr) { +- gfs2_merge_trans(sdp->sd_log_tr, tr); ++ gfs2_merge_trans(sdp, tr); + } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { + gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); + sdp->sd_log_tr = tr; +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c +index 057be88eb1b4..7ed0359ebac6 100644 +--- a/fs/gfs2/ops_fstype.c ++++ b/fs/gfs2/ops_fstype.c +@@ -922,7 +922,7 @@ fail: + } + + static const match_table_t nolock_tokens = { +- { Opt_jid, "jid=%d\n", }, ++ { Opt_jid, "jid=%d", }, + { Opt_err, NULL }, + }; + +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 4d45786738ab..a19bbcfab7c5 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -7309,7 +7309,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) + } + + static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { +- .rpc_call_done = &nfs4_bind_one_conn_to_session_done, ++ .rpc_call_done = nfs4_bind_one_conn_to_session_done, + }; + + /* +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c +index 80aeb19b176b..22b784e7ef50 100644 +--- a/fs/nfsd/nfs4callback.c ++++ b/fs/nfsd/nfs4callback.c +@@ -1161,6 +1161,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) + err = setup_callback_client(clp, &conn, ses); + if (err) { + nfsd4_mark_cb_down(clp, err); ++ if (c) ++ svc_xprt_put(c->cn_xprt); + return; + } + } +diff --git a/include/linux/bitops.h b/include/linux/bitops.h +index c51574fab0b0..00dcb1bad76b 100644 +--- a/include/linux/bitops.h ++++ b/include/linux/bitops.h +@@ -50,7 +50,7 @@ static inline int get_bitmask_order(unsigned int count) + + static __always_inline unsigned long hweight_long(unsigned long w) + { +- return sizeof(w) == 4 ? hweight32(w) : hweight64(w); ++ return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); + } + + /** +diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h +index f236f5b931b2..7fdd7f355b52 100644 +--- a/include/linux/elfnote.h ++++ b/include/linux/elfnote.h +@@ -54,7 +54,7 @@ + .popsection ; + + #define ELFNOTE(name, type, desc) \ +- ELFNOTE_START(name, type, "") \ ++ ELFNOTE_START(name, type, "a") \ + desc ; \ + ELFNOTE_END + +diff --git a/include/linux/genhd.h b/include/linux/genhd.h +index 550fa358893a..37f30d62f3a2 100644 +--- a/include/linux/genhd.h ++++ b/include/linux/genhd.h +@@ -693,9 +693,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part) + static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) + { + #if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) ++ preempt_disable(); + write_seqcount_begin(&part->nr_sects_seq); + part->nr_sects = size; + write_seqcount_end(&part->nr_sects_seq); ++ preempt_enable(); + #elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + preempt_disable(); + part->nr_sects = size; +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h +index 520702b82134..be7a49f437ea 100644 +--- a/include/linux/kprobes.h ++++ b/include/linux/kprobes.h +@@ -385,6 +385,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) + return this_cpu_ptr(&kprobe_ctlblk); + } + ++extern struct kprobe kprobe_busy; ++void kprobe_busy_begin(void); ++void kprobe_busy_end(void); ++ + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); + int register_kprobe(struct kprobe *p); + void unregister_kprobe(struct kprobe *p); +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 93838d98e3f3..5c9a44e3a027 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + + /* + * Define if arch has non-standard setup. This is a _PCI_ standard +@@ -884,6 +885,8 @@ struct ata_port { + struct timer_list fastdrain_timer; + unsigned long fastdrain_cnt; + ++ async_cookie_t cookie; ++ + int em_message_type; + void *private_data; + +diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h +index 2b05f4273bab..e9a791f46eb6 100644 +--- a/include/linux/mtd/rawnand.h ++++ b/include/linux/mtd/rawnand.h +@@ -22,6 +22,7 @@ + #include + #include + ++struct nand_chip; + struct mtd_info; + struct nand_flash_dev; + struct device_node; +@@ -37,7 +38,7 @@ int nand_scan_ident(struct mtd_info *mtd, int max_chips, + int nand_scan_tail(struct mtd_info *mtd); + + /* Unregister the MTD device and free resources held by the NAND device */ +-void nand_release(struct mtd_info *mtd); ++void nand_release(struct nand_chip *chip); + + /* Internal helper for board drivers which need to override command function */ + void nand_wait_ready(struct mtd_info *mtd); +@@ -227,9 +228,6 @@ enum nand_ecc_algo { + #define NAND_CI_CELLTYPE_MSK 0x0C + #define NAND_CI_CELLTYPE_SHIFT 2 + +-/* Keep gcc happy */ +-struct nand_chip; +- + /* ONFI features */ + #define ONFI_FEATURE_16_BIT_BUS (1 << 0) + #define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) +diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h +index 590d313b5f39..a865698361c1 100644 +--- a/include/linux/usb/composite.h ++++ b/include/linux/usb/composite.h +@@ -248,6 +248,9 @@ int usb_function_activate(struct usb_function *); + + int usb_interface_id(struct usb_configuration *, struct usb_function *); + ++int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, ++ struct usb_ep *_ep, u8 alt); ++ + int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, + struct usb_ep *_ep); + +diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h +index b0d15c73f6d7..1f2d8c81f0e0 100644 +--- a/include/uapi/linux/raid/md_p.h ++++ b/include/uapi/linux/raid/md_p.h +@@ -329,6 +329,7 @@ struct mdp_superblock_1 { + #define MD_FEATURE_JOURNAL 512 /* support write cache */ + #define MD_FEATURE_PPL 1024 /* support PPL */ + #define MD_FEATURE_MULTIPLE_PPLS 2048 /* support for multiple PPLs */ ++#define MD_FEATURE_RAID0_LAYOUT 4096 /* layout is meaningful for RAID0 */ + #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ + |MD_FEATURE_RECOVERY_OFFSET \ + |MD_FEATURE_RESHAPE_ACTIVE \ +@@ -341,6 +342,7 @@ struct mdp_superblock_1 { + |MD_FEATURE_JOURNAL \ + |MD_FEATURE_PPL \ + |MD_FEATURE_MULTIPLE_PPLS \ ++ |MD_FEATURE_RAID0_LAYOUT \ + ) + + struct r5l_payload_header { +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 66f1818d4762..f2d2194b51ca 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -599,11 +599,12 @@ static void kprobe_optimizer(struct work_struct *work) + mutex_unlock(&module_mutex); + mutex_unlock(&text_mutex); + cpus_read_unlock(); +- mutex_unlock(&kprobe_mutex); + + /* Step 5: Kick optimizer again if needed */ + if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) + kick_kprobe_optimizer(); ++ ++ mutex_unlock(&kprobe_mutex); + } + + /* Wait for completing optimization and unoptimization */ +@@ -1217,6 +1218,26 @@ __releases(hlist_lock) + } + NOKPROBE_SYMBOL(kretprobe_table_unlock); + ++struct kprobe kprobe_busy = { ++ .addr = (void *) get_kprobe, ++}; ++ ++void kprobe_busy_begin(void) ++{ ++ struct kprobe_ctlblk *kcb; ++ ++ preempt_disable(); ++ __this_cpu_write(current_kprobe, &kprobe_busy); ++ kcb = get_kprobe_ctlblk(); ++ kcb->kprobe_status = KPROBE_HIT_ACTIVE; ++} ++ ++void kprobe_busy_end(void) ++{ ++ __this_cpu_write(current_kprobe, NULL); ++ preempt_enable(); ++} ++ + /* + * This function is called from finish_task_switch when task tk becomes dead, + * so that we can recycle any function-return probe instances associated +@@ -1234,6 +1255,8 @@ void kprobe_flush_task(struct task_struct *tk) + /* Early boot. kretprobe_table_locks not yet initialized. */ + return; + ++ kprobe_busy_begin(); ++ + INIT_HLIST_HEAD(&empty_rp); + hash = hash_ptr(tk, KPROBE_HASH_BITS); + head = &kretprobe_inst_table[hash]; +@@ -1247,6 +1270,8 @@ void kprobe_flush_task(struct task_struct *tk) + hlist_del(&ri->hlist); + kfree(ri); + } ++ ++ kprobe_busy_end(); + } + NOKPROBE_SYMBOL(kprobe_flush_task); + +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c +index a60c09e0bda8..304a164f5e7e 100644 +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -1022,8 +1022,10 @@ static void blk_add_trace_split(void *ignore, + + __blk_add_trace(bt, bio->bi_iter.bi_sector, + bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, +- BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), +- &rpdu, blk_trace_bio_get_cgid(q, bio)); ++ BLK_TA_SPLIT, ++ blk_status_to_errno(bio->bi_status), ++ sizeof(rpdu), &rpdu, ++ blk_trace_bio_get_cgid(q, bio)); + } + rcu_read_unlock(); + } +@@ -1060,7 +1062,8 @@ static void blk_add_trace_bio_remap(void *ignore, + r.sector_from = cpu_to_be64(from); + + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, +- bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, ++ bio_op(bio), bio->bi_opf, BLK_TA_REMAP, ++ blk_status_to_errno(bio->bi_status), + sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); + rcu_read_unlock(); + } +@@ -1282,21 +1285,10 @@ static inline __u16 t_error(const struct trace_entry *ent) + + static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) + { +- const __u64 *val = pdu_start(ent, has_cg); ++ const __be64 *val = pdu_start(ent, has_cg); + return be64_to_cpu(*val); + } + +-static void get_pdu_remap(const struct trace_entry *ent, +- struct blk_io_trace_remap *r, bool has_cg) +-{ +- const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); +- __u64 sector_from = __r->sector_from; +- +- r->device_from = be32_to_cpu(__r->device_from); +- r->device_to = be32_to_cpu(__r->device_to); +- r->sector_from = be64_to_cpu(sector_from); +-} +- + typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, + bool has_cg); + +@@ -1422,13 +1414,13 @@ static void blk_log_with_error(struct trace_seq *s, + + static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) + { +- struct blk_io_trace_remap r = { .device_from = 0, }; ++ const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); + +- get_pdu_remap(ent, &r, has_cg); + trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", + t_sector(ent), t_sec(ent), +- MAJOR(r.device_from), MINOR(r.device_from), +- (unsigned long long)r.sector_from); ++ MAJOR(be32_to_cpu(__r->device_from)), ++ MINOR(be32_to_cpu(__r->device_from)), ++ be64_to_cpu(__r->sector_from)); + } + + static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) +diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c +index 2c13ecc5bb2c..ed1f3df27260 100644 +--- a/lib/zlib_inflate/inffast.c ++++ b/lib/zlib_inflate/inffast.c +@@ -10,17 +10,6 @@ + + #ifndef ASMINF + +-/* Allow machine dependent optimization for post-increment or pre-increment. +- Based on testing to date, +- Pre-increment preferred for: +- - PowerPC G3 (Adler) +- - MIPS R5000 (Randers-Pehrson) +- Post-increment preferred for: +- - none +- No measurable difference: +- - Pentium III (Anderson) +- - M68060 (Nikl) +- */ + union uu { + unsigned short us; + unsigned char b[2]; +@@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p) + return mm.us; + } + +-#ifdef POSTINC +-# define OFF 0 +-# define PUP(a) *(a)++ +-# define UP_UNALIGNED(a) get_unaligned16((a)++) +-#else +-# define OFF 1 +-# define PUP(a) *++(a) +-# define UP_UNALIGNED(a) get_unaligned16(++(a)) +-#endif +- + /* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is +@@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start) + + /* copy state to local variables */ + state = (struct inflate_state *)strm->state; +- in = strm->next_in - OFF; ++ in = strm->next_in; + last = in + (strm->avail_in - 5); +- out = strm->next_out - OFF; ++ out = strm->next_out; + beg = out - (start - strm->avail_out); + end = out + (strm->avail_out - 257); + #ifdef INFLATE_STRICT +@@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start) + input data or output space */ + do { + if (bits < 15) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + this = lcode[hold & lmask]; +@@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start) + bits -= op; + op = (unsigned)(this.op); + if (op == 0) { /* literal */ +- PUP(out) = (unsigned char)(this.val); ++ *out++ = (unsigned char)(this.val); + } + else if (op & 16) { /* length base */ + len = (unsigned)(this.val); + op &= 15; /* number of extra bits */ + if (op) { + if (bits < op) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + len += (unsigned)hold & ((1U << op) - 1); +@@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start) + bits -= op; + } + if (bits < 15) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + this = dcode[hold & dmask]; +@@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start) + dist = (unsigned)(this.val); + op &= 15; /* number of extra bits */ + if (bits < op) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + if (bits < op) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + } +@@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start) + state->mode = BAD; + break; + } +- from = window - OFF; ++ from = window; + if (write == 0) { /* very common case */ + from += wsize - op; + if (op < len) { /* some from window */ + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } +@@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start) + if (op < len) { /* some from end of window */ + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); +- from = window - OFF; ++ from = window; + if (write < len) { /* some from start of window */ + op = write; + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } +@@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start) + if (op < len) { /* some from window */ + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } + } + while (len > 2) { +- PUP(out) = PUP(from); +- PUP(out) = PUP(from); +- PUP(out) = PUP(from); ++ *out++ = *from++; ++ *out++ = *from++; ++ *out++ = *from++; + len -= 3; + } + if (len) { +- PUP(out) = PUP(from); ++ *out++ = *from++; + if (len > 1) +- PUP(out) = PUP(from); ++ *out++ = *from++; + } + } + else { +@@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start) + from = out - dist; /* copy direct from output */ + /* minimum length is three */ + /* Align out addr */ +- if (!((long)(out - 1 + OFF) & 1)) { +- PUP(out) = PUP(from); ++ if (!((long)(out - 1) & 1)) { ++ *out++ = *from++; + len--; + } +- sout = (unsigned short *)(out - OFF); ++ sout = (unsigned short *)(out); + if (dist > 2) { + unsigned short *sfrom; + +- sfrom = (unsigned short *)(from - OFF); ++ sfrom = (unsigned short *)(from); + loops = len >> 1; + do + #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +- PUP(sout) = PUP(sfrom); ++ *sout++ = *sfrom++; + #else +- PUP(sout) = UP_UNALIGNED(sfrom); ++ *sout++ = get_unaligned16(sfrom++); + #endif + while (--loops); +- out = (unsigned char *)sout + OFF; +- from = (unsigned char *)sfrom + OFF; ++ out = (unsigned char *)sout; ++ from = (unsigned char *)sfrom; + } else { /* dist == 1 or dist == 2 */ + unsigned short pat16; + +- pat16 = *(sout-1+OFF); ++ pat16 = *(sout-1); + if (dist == 1) { + union uu mm; + /* copy one char pattern to both bytes */ +@@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start) + } + loops = len >> 1; + do +- PUP(sout) = pat16; ++ *sout++ = pat16; + while (--loops); +- out = (unsigned char *)sout + OFF; ++ out = (unsigned char *)sout; + } + if (len & 1) +- PUP(out) = PUP(from); ++ *out++ = *from++; + } + } + else if ((op & 64) == 0) { /* 2nd level distance code */ +@@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start) + hold &= (1U << bits) - 1; + + /* update state and return */ +- strm->next_in = in + OFF; +- strm->next_out = out + OFF; ++ strm->next_in = in; ++ strm->next_out = out; + strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); + strm->avail_out = (unsigned)(out < end ? + 257 + (end - out) : 257 - (out - end)); +diff --git a/net/core/dev.c b/net/core/dev.c +index ed552ad3f783..0aaa1426450f 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -83,6 +83,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -194,7 +195,7 @@ static DEFINE_SPINLOCK(napi_hash_lock); + static unsigned int napi_gen_id = NR_CPUS; + static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); + +-static seqcount_t devnet_rename_seq; ++static DECLARE_RWSEM(devnet_rename_sem); + + static inline void dev_base_seq_inc(struct net *net) + { +@@ -898,33 +899,28 @@ EXPORT_SYMBOL(dev_get_by_napi_id); + * @net: network namespace + * @name: a pointer to the buffer where the name will be stored. + * @ifindex: the ifindex of the interface to get the name from. +- * +- * The use of raw_seqcount_begin() and cond_resched() before +- * retrying is required as we want to give the writers a chance +- * to complete when CONFIG_PREEMPT is not set. + */ + int netdev_get_name(struct net *net, char *name, int ifindex) + { + struct net_device *dev; +- unsigned int seq; ++ int ret; + +-retry: +- seq = raw_seqcount_begin(&devnet_rename_seq); ++ down_read(&devnet_rename_sem); + rcu_read_lock(); ++ + dev = dev_get_by_index_rcu(net, ifindex); + if (!dev) { +- rcu_read_unlock(); +- return -ENODEV; ++ ret = -ENODEV; ++ goto out; + } + + strcpy(name, dev->name); +- rcu_read_unlock(); +- if (read_seqcount_retry(&devnet_rename_seq, seq)) { +- cond_resched(); +- goto retry; +- } + +- return 0; ++ ret = 0; ++out: ++ rcu_read_unlock(); ++ up_read(&devnet_rename_sem); ++ return ret; + } + + /** +@@ -1189,10 +1185,10 @@ int dev_change_name(struct net_device *dev, const char *newname) + if (dev->flags & IFF_UP) + return -EBUSY; + +- write_seqcount_begin(&devnet_rename_seq); ++ down_write(&devnet_rename_sem); + + if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + return 0; + } + +@@ -1200,7 +1196,7 @@ int dev_change_name(struct net_device *dev, const char *newname) + + err = dev_get_valid_name(net, dev, newname); + if (err < 0) { +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + return err; + } + +@@ -1215,11 +1211,11 @@ rollback: + if (ret) { + memcpy(dev->name, oldname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + return ret; + } + +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + + netdev_adjacent_rename_links(dev, oldname); + +@@ -1240,7 +1236,7 @@ rollback: + /* err >= 0 after dev_alloc_name() or stores the first errno */ + if (err >= 0) { + err = ret; +- write_seqcount_begin(&devnet_rename_seq); ++ down_write(&devnet_rename_sem); + memcpy(dev->name, oldname, IFNAMSIZ); + memcpy(oldname, newname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c +index 2e0a6f92e563..8391c2785550 100644 +--- a/net/sunrpc/addr.c ++++ b/net/sunrpc/addr.c +@@ -81,11 +81,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap, + + rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", + IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id); +- if (unlikely((size_t)rc > sizeof(scopebuf))) ++ if (unlikely((size_t)rc >= sizeof(scopebuf))) + return 0; + + len += rc; +- if (unlikely(len > buflen)) ++ if (unlikely(len >= buflen)) + return 0; + + strcat(buf, scopebuf); +diff --git a/scripts/mksysmap b/scripts/mksysmap +index a35acc0d0b82..9aa23d15862a 100755 +--- a/scripts/mksysmap ++++ b/scripts/mksysmap +@@ -41,4 +41,4 @@ + # so we just ignore them to let readprofile continue to work. + # (At least sparc64 has __crc_ in the middle). + +-$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)' > $2 ++$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2 +diff --git a/security/apparmor/label.c b/security/apparmor/label.c +index ea63710442ae..212a0f39ddae 100644 +--- a/security/apparmor/label.c ++++ b/security/apparmor/label.c +@@ -1536,13 +1536,13 @@ static const char *label_modename(struct aa_ns *ns, struct aa_label *label, + + label_for_each(i, label, profile) { + if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) { +- if (profile->mode == APPARMOR_UNCONFINED) ++ count++; ++ if (profile == profile->ns->unconfined) + /* special case unconfined so stacks with + * unconfined don't report as mixed. ie. + * profile_foo//&:ns1:unconfined (mixed) + */ + continue; +- count++; + if (mode == -1) + mode = profile->mode; + else if (mode != profile->mode) +diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c +index b275743e23cc..0158cde957ee 100644 +--- a/security/selinux/ss/services.c ++++ b/security/selinux/ss/services.c +@@ -2736,8 +2736,12 @@ err: + if (*names) { + for (i = 0; i < *len; i++) + kfree((*names)[i]); ++ kfree(*names); + } + kfree(*values); ++ *len = 0; ++ *names = NULL; ++ *values = NULL; + goto out; + } + +diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c +index 0b1e4b34b299..13c8e6542a2f 100644 +--- a/sound/isa/wavefront/wavefront_synth.c ++++ b/sound/isa/wavefront/wavefront_synth.c +@@ -1175,7 +1175,10 @@ wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header) + "alias for %d\n", + header->number, + header->hdr.a.OriginalSample); +- ++ ++ if (header->number >= WF_MAX_SAMPLE) ++ return -EINVAL; ++ + munge_int32 (header->number, &alias_hdr[0], 2); + munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2); + munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset), +@@ -1206,6 +1209,9 @@ wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) + int num_samples; + unsigned char *msample_hdr; + ++ if (header->number >= WF_MAX_SAMPLE) ++ return -EINVAL; ++ + msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL); + if (! msample_hdr) + return -ENOMEM; +diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c +index e10e03800cce..6991718d7c8a 100644 +--- a/sound/soc/davinci/davinci-mcasp.c ++++ b/sound/soc/davinci/davinci-mcasp.c +@@ -1747,8 +1747,10 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp) + PTR_ERR(chan)); + return PTR_ERR(chan); + } +- if (WARN_ON(!chan->device || !chan->device->dev)) ++ if (WARN_ON(!chan->device || !chan->device->dev)) { ++ dma_release_channel(chan); + return -EINVAL; ++ } + + if (chan->device->dev->of_node) + ret = of_property_read_string(chan->device->dev->of_node, +diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c +index e1b97e59275a..15d7e6da0555 100644 +--- a/sound/soc/fsl/fsl_asrc_dma.c ++++ b/sound/soc/fsl/fsl_asrc_dma.c +@@ -243,6 +243,7 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream, + ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be); + if (ret) { + dev_err(dev, "failed to config DMA channel for Back-End\n"); ++ dma_release_channel(pair->dma_chan[dir]); + return ret; + } + +diff --git a/sound/usb/card.h b/sound/usb/card.h +index ed87cc83eb47..9dbcbb27c28e 100644 +--- a/sound/usb/card.h ++++ b/sound/usb/card.h +@@ -81,6 +81,10 @@ struct snd_usb_endpoint { + dma_addr_t sync_dma; /* DMA address of syncbuf */ + + unsigned int pipe; /* the data i/o pipe */ ++ unsigned int framesize[2]; /* small/large frame sizes in samples */ ++ unsigned int sample_rem; /* remainder from division fs/fps */ ++ unsigned int sample_accum; /* sample accumulator */ ++ unsigned int fps; /* frames per second */ + unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */ + unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */ + int freqshift; /* how much to shift the feedback value to get Q16.16 */ +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c +index 8caf0b57f9c6..841219560872 100644 +--- a/sound/usb/endpoint.c ++++ b/sound/usb/endpoint.c +@@ -137,12 +137,12 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep) + + /* + * For streaming based on information derived from sync endpoints, +- * prepare_outbound_urb_sizes() will call next_packet_size() to ++ * prepare_outbound_urb_sizes() will call slave_next_packet_size() to + * determine the number of samples to be sent in the next packet. + * +- * For implicit feedback, next_packet_size() is unused. ++ * For implicit feedback, slave_next_packet_size() is unused. + */ +-int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) ++int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep) + { + unsigned long flags; + int ret; +@@ -159,6 +159,29 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) + return ret; + } + ++/* ++ * For adaptive and synchronous endpoints, prepare_outbound_urb_sizes() ++ * will call next_packet_size() to determine the number of samples to be ++ * sent in the next packet. ++ */ ++int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) ++{ ++ int ret; ++ ++ if (ep->fill_max) ++ return ep->maxframesize; ++ ++ ep->sample_accum += ep->sample_rem; ++ if (ep->sample_accum >= ep->fps) { ++ ep->sample_accum -= ep->fps; ++ ret = ep->framesize[1]; ++ } else { ++ ret = ep->framesize[0]; ++ } ++ ++ return ret; ++} ++ + static void retire_outbound_urb(struct snd_usb_endpoint *ep, + struct snd_urb_ctx *urb_ctx) + { +@@ -203,6 +226,8 @@ static void prepare_silent_urb(struct snd_usb_endpoint *ep, + + if (ctx->packet_size[i]) + counts = ctx->packet_size[i]; ++ else if (ep->sync_master) ++ counts = snd_usb_endpoint_slave_next_packet_size(ep); + else + counts = snd_usb_endpoint_next_packet_size(ep); + +@@ -889,10 +914,17 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, + ep->maxpacksize = fmt->maxpacksize; + ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX); + +- if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) ++ if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) { + ep->freqn = get_usb_full_speed_rate(rate); +- else ++ ep->fps = 1000; ++ } else { + ep->freqn = get_usb_high_speed_rate(rate); ++ ep->fps = 8000; ++ } ++ ++ ep->sample_rem = rate % ep->fps; ++ ep->framesize[0] = rate / ep->fps; ++ ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps; + + /* calculate the frequency in 16.16 format */ + ep->freqm = ep->freqn; +@@ -951,6 +983,7 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) + ep->active_mask = 0; + ep->unlink_mask = 0; + ep->phase = 0; ++ ep->sample_accum = 0; + + snd_usb_endpoint_start_quirk(ep); + +diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h +index 63a39d4fa8d8..d23fa0a8c11b 100644 +--- a/sound/usb/endpoint.h ++++ b/sound/usb/endpoint.h +@@ -28,6 +28,7 @@ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep); + void snd_usb_endpoint_free(struct snd_usb_endpoint *ep); + + int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep); ++int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep); + int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); + + void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c +index ff38fca1781b..fd73186d6003 100644 +--- a/sound/usb/pcm.c ++++ b/sound/usb/pcm.c +@@ -1484,6 +1484,8 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, + for (i = 0; i < ctx->packets; i++) { + if (ctx->packet_size[i]) + counts = ctx->packet_size[i]; ++ else if (ep->sync_master) ++ counts = snd_usb_endpoint_slave_next_packet_size(ep); + else + counts = snd_usb_endpoint_next_packet_size(ep); + +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c +index 429c3e140dc3..35a10b598544 100644 +--- a/tools/perf/builtin-report.c ++++ b/tools/perf/builtin-report.c +@@ -401,8 +401,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report + if (evname != NULL) + ret += fprintf(fp, " of event '%s'", evname); + +- if (symbol_conf.show_ref_callgraph && +- strstr(evname, "call-graph=no")) { ++ if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { + ret += fprintf(fp, ", show reference callgraph"); + } + +diff --git a/tools/testing/selftests/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c +index 5cdfd743447b..900ed4b47899 100644 +--- a/tools/testing/selftests/networking/timestamping/timestamping.c ++++ b/tools/testing/selftests/networking/timestamping/timestamping.c +@@ -332,10 +332,16 @@ int main(int argc, char **argv) + int val; + socklen_t len; + struct timeval next; ++ size_t if_len; + + if (argc < 2) + usage(0); + interface = argv[1]; ++ if_len = strlen(interface); ++ if (if_len >= IFNAMSIZ) { ++ printf("interface name exceeds IFNAMSIZ\n"); ++ exit(1); ++ } + + for (i = 2; i < argc; i++) { + if (!strcasecmp(argv[i], "SO_TIMESTAMP")) +@@ -369,12 +375,12 @@ int main(int argc, char **argv) + bail("socket"); + + memset(&device, 0, sizeof(device)); +- strncpy(device.ifr_name, interface, sizeof(device.ifr_name)); ++ memcpy(device.ifr_name, interface, if_len + 1); + if (ioctl(sock, SIOCGIFADDR, &device) < 0) + bail("getting interface IP address"); + + memset(&hwtstamp, 0, sizeof(hwtstamp)); +- strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name)); ++ memcpy(hwtstamp.ifr_name, interface, if_len + 1); + hwtstamp.ifr_data = (void *)&hwconfig; + memset(&hwconfig, 0, sizeof(hwconfig)); + hwconfig.tx_type = +diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c +index 5d546dcdbc80..b8778960da10 100644 +--- a/tools/testing/selftests/x86/protection_keys.c ++++ b/tools/testing/selftests/x86/protection_keys.c +@@ -24,6 +24,7 @@ + #define _GNU_SOURCE + #include + #include ++#include + #include + #include + #include +@@ -612,10 +613,10 @@ int alloc_random_pkey(void) + int nr_alloced = 0; + int random_index; + memset(alloced_pkeys, 0, sizeof(alloced_pkeys)); ++ srand((unsigned int)time(NULL)); + + /* allocate every possible key and make a note of which ones we got */ + max_nr_pkey_allocs = NR_PKEYS; +- max_nr_pkey_allocs = 1; + for (i = 0; i < max_nr_pkey_allocs; i++) { + int new_pkey = alloc_pkey(); + if (new_pkey < 0) diff --git a/patch/misc/0001-bootsplash.patch b/patch/misc/0001-bootsplash.patch new file mode 100644 index 000000000..924f23f33 --- /dev/null +++ b/patch/misc/0001-bootsplash.patch @@ -0,0 +1,746 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index a74227ad082e..b5633b56391e 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2705,6 +2705,14 @@ S: Supported + F: drivers/net/bonding/ + F: include/uapi/linux/if_bonding.h + ++BOOTSPLASH ++M: Max Staudt ++L: linux-fbdev@vger.kernel.org ++S: Maintained ++F: drivers/video/fbdev/core/bootsplash*.* ++F: drivers/video/fbdev/core/dummycon.c ++F: include/linux/bootsplash.h ++ + BPF (Safe dynamic programs and tools) + M: Alexei Starovoitov + M: Daniel Borkmann +diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig +index 7f1f1fbcef9e..f3ff976266fe 100644 +--- a/drivers/video/console/Kconfig ++++ b/drivers/video/console/Kconfig +@@ -151,6 +151,30 @@ config FRAMEBUFFER_CONSOLE_ROTATION + such that other users of the framebuffer will remain normally + oriented. + ++config BOOTSPLASH ++ bool "Bootup splash screen" ++ depends on FRAMEBUFFER_CONSOLE ++ ---help--- ++ This option enables the Linux bootsplash screen. ++ ++ The bootsplash is a full-screen logo or animation indicating a ++ booting system. It replaces the classic scrolling text with a ++ graphical alternative, similar to other systems. ++ ++ Since this is technically implemented as a hook on top of fbcon, ++ it can only work if the FRAMEBUFFER_CONSOLE is enabled and a ++ framebuffer driver is active. Thus, to get a text-free boot, ++ the system needs to boot with vesafb, efifb, or similar. ++ ++ Once built into the kernel, the bootsplash needs to be enabled ++ with bootsplash.enabled=1 and a splash file needs to be supplied. ++ ++ Further documentation can be found in: ++ Documentation/fb/bootsplash.txt ++ ++ If unsure, say N. ++ This is typically used by distributors and system integrators. ++ + config STI_CONSOLE + bool "STI text console" + depends on PARISC +diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile +index 73493bbd7a15..66895321928e 100644 +--- a/drivers/video/fbdev/core/Makefile ++++ b/drivers/video/fbdev/core/Makefile +@@ -29,3 +29,6 @@ obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o + obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o + obj-$(CONFIG_FB_SVGALIB) += svgalib.o + obj-$(CONFIG_FB_DDC) += fb_ddc.o ++ ++obj-$(CONFIG_BOOTSPLASH) += bootsplash.o bootsplash_render.o \ ++ dummyblit.o +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +new file mode 100644 +index 000000000000..e449755af268 +--- /dev/null ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -0,0 +1,294 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Main file: Glue code, workers, timer, PM, kernel and userland API) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#define pr_fmt(fmt) "bootsplash: " fmt ++ ++ ++#include ++#include ++#include ++#include /* dev_warn() */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* console_blanked */ ++#include ++#include ++#include ++#include ++#include ++ ++#include "bootsplash_internal.h" ++ ++ ++/* ++ * We only have one splash screen, so let's keep a single ++ * instance of the internal state. ++ */ ++static struct splash_priv splash_state; ++ ++ ++static void splash_callback_redraw_vc(struct work_struct *ignored) ++{ ++ if (console_blanked) ++ return; ++ ++ console_lock(); ++ if (vc_cons[fg_console].d) ++ update_screen(vc_cons[fg_console].d); ++ console_unlock(); ++} ++ ++ ++static bool is_fb_compatible(const struct fb_info *info) ++{ ++ if (!(info->flags & FBINFO_BE_MATH) ++ != !fb_be_math((struct fb_info *)info)) { ++ dev_warn(info->device, ++ "Can't draw on foreign endianness framebuffer.\n"); ++ ++ return false; ++ } ++ ++ if (info->flags & FBINFO_MISC_TILEBLITTING) { ++ dev_warn(info->device, ++ "Can't draw splash on tiling framebuffer.\n"); ++ ++ return false; ++ } ++ ++ if (info->fix.type != FB_TYPE_PACKED_PIXELS ++ || (info->fix.visual != FB_VISUAL_TRUECOLOR ++ && info->fix.visual != FB_VISUAL_DIRECTCOLOR)) { ++ dev_warn(info->device, ++ "Can't draw splash on non-packed or non-truecolor framebuffer.\n"); ++ ++ dev_warn(info->device, ++ " type: %u visual: %u\n", ++ info->fix.type, info->fix.visual); ++ ++ return false; ++ } ++ ++ if (info->var.bits_per_pixel != 16 ++ && info->var.bits_per_pixel != 24 ++ && info->var.bits_per_pixel != 32) { ++ dev_warn(info->device, ++ "We only support drawing on framebuffers with 16, 24, or 32 bpp, not %d.\n", ++ info->var.bits_per_pixel); ++ ++ return false; ++ } ++ ++ return true; ++} ++ ++ ++/* ++ * Called by fbcon_switch() when an instance is activated or refreshed. ++ */ ++void bootsplash_render_full(struct fb_info *info) ++{ ++ if (!is_fb_compatible(info)) ++ return; ++ ++ bootsplash_do_render_background(info); ++} ++ ++ ++/* ++ * External status enquiry and on/off switch ++ */ ++bool bootsplash_would_render_now(void) ++{ ++ return !oops_in_progress ++ && !console_blanked ++ && bootsplash_is_enabled(); ++} ++ ++bool bootsplash_is_enabled(void) ++{ ++ bool was_enabled; ++ ++ /* Make sure we have the newest state */ ++ smp_rmb(); ++ ++ was_enabled = test_bit(0, &splash_state.enabled); ++ ++ return was_enabled; ++} ++ ++void bootsplash_disable(void) ++{ ++ int was_enabled; ++ ++ was_enabled = test_and_clear_bit(0, &splash_state.enabled); ++ ++ if (was_enabled) { ++ if (oops_in_progress) { ++ /* Redraw screen now so we can see a panic */ ++ if (vc_cons[fg_console].d) ++ update_screen(vc_cons[fg_console].d); ++ } else { ++ /* No urgency, redraw at next opportunity */ ++ schedule_work(&splash_state.work_redraw_vc); ++ } ++ } ++} ++ ++void bootsplash_enable(void) ++{ ++ bool was_enabled; ++ ++ if (oops_in_progress) ++ return; ++ ++ was_enabled = test_and_set_bit(0, &splash_state.enabled); ++ ++ if (!was_enabled) ++ schedule_work(&splash_state.work_redraw_vc); ++} ++ ++ ++/* ++ * Userland API via platform device in sysfs ++ */ ++static ssize_t splash_show_enabled(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", bootsplash_is_enabled()); ++} ++ ++static ssize_t splash_store_enabled(struct device *device, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ bool enable; ++ int err; ++ ++ if (!buf || !count) ++ return -EFAULT; ++ ++ err = kstrtobool(buf, &enable); ++ if (err) ++ return err; ++ ++ if (enable) ++ bootsplash_enable(); ++ else ++ bootsplash_disable(); ++ ++ return count; ++} ++ ++static DEVICE_ATTR(enabled, 0644, splash_show_enabled, splash_store_enabled); ++ ++ ++static struct attribute *splash_dev_attrs[] = { ++ &dev_attr_enabled.attr, ++ NULL ++}; ++ ++ATTRIBUTE_GROUPS(splash_dev); ++ ++ ++ ++ ++/* ++ * Power management fixup via platform device ++ * ++ * When the system is woken from sleep or restored after hibernating, we ++ * cannot expect the screen contents to still be present in video RAM. ++ * Thus, we have to redraw the splash if we're currently active. ++ */ ++static int splash_resume(struct device *device) ++{ ++ if (bootsplash_would_render_now()) ++ schedule_work(&splash_state.work_redraw_vc); ++ ++ return 0; ++} ++ ++static int splash_suspend(struct device *device) ++{ ++ cancel_work_sync(&splash_state.work_redraw_vc); ++ ++ return 0; ++} ++ ++ ++static const struct dev_pm_ops splash_pm_ops = { ++ .thaw = splash_resume, ++ .restore = splash_resume, ++ .resume = splash_resume, ++ .suspend = splash_suspend, ++ .freeze = splash_suspend, ++}; ++ ++static struct platform_driver splash_driver = { ++ .driver = { ++ .name = "bootsplash", ++ .pm = &splash_pm_ops, ++ }, ++}; ++ ++ ++/* ++ * Main init ++ */ ++void bootsplash_init(void) ++{ ++ int ret; ++ ++ /* Initialized already? */ ++ if (splash_state.splash_device) ++ return; ++ ++ ++ /* Register platform device to export user API */ ++ ret = platform_driver_register(&splash_driver); ++ if (ret) { ++ pr_err("platform_driver_register() failed: %d\n", ret); ++ goto err; ++ } ++ ++ splash_state.splash_device ++ = platform_device_alloc("bootsplash", 0); ++ ++ if (!splash_state.splash_device) ++ goto err_driver; ++ ++ splash_state.splash_device->dev.groups = splash_dev_groups; ++ ++ ret = platform_device_add(splash_state.splash_device); ++ if (ret) { ++ pr_err("platform_device_add() failed: %d\n", ret); ++ goto err_device; ++ } ++ ++ ++ INIT_WORK(&splash_state.work_redraw_vc, splash_callback_redraw_vc); ++ ++ return; ++ ++err_device: ++ platform_device_put(splash_state.splash_device); ++ splash_state.splash_device = NULL; ++err_driver: ++ platform_driver_unregister(&splash_driver); ++err: ++ pr_err("Failed to initialize.\n"); ++} +diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h +new file mode 100644 +index 000000000000..b11da5cb90bf +--- /dev/null ++++ b/drivers/video/fbdev/core/bootsplash_internal.h +@@ -0,0 +1,55 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Internal data structures used at runtime) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#ifndef __BOOTSPLASH_INTERNAL_H ++#define __BOOTSPLASH_INTERNAL_H ++ ++ ++#include ++#include ++#include ++#include ++#include ++ ++ ++/* ++ * Runtime types ++ */ ++struct splash_priv { ++ /* ++ * Enabled/disabled state, to be used with atomic bit operations. ++ * Bit 0: 0 = Splash hidden ++ * 1 = Splash shown ++ * ++ * Note: fbcon.c uses this twice, by calling ++ * bootsplash_would_render_now() in set_blitting_type() and ++ * in fbcon_switch(). ++ * This is racy, but eventually consistent: Turning the ++ * splash on/off will cause a redraw, which calls ++ * fbcon_switch(), which calls set_blitting_type(). ++ * So the last on/off toggle will make things consistent. ++ */ ++ unsigned long enabled; ++ ++ /* Our gateway to userland via sysfs */ ++ struct platform_device *splash_device; ++ ++ struct work_struct work_redraw_vc; ++}; ++ ++ ++ ++/* ++ * Rendering functions ++ */ ++void bootsplash_do_render_background(struct fb_info *info); ++ ++#endif +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +new file mode 100644 +index 000000000000..4d7e0117f653 +--- /dev/null ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -0,0 +1,93 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Rendering functions) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#define pr_fmt(fmt) "bootsplash: " fmt ++ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "bootsplash_internal.h" ++ ++ ++ ++ ++/* ++ * Rendering: Internal drawing routines ++ */ ++ ++ ++/* ++ * Pack pixel into target format and do Big/Little Endian handling. ++ * This would be a good place to handle endianness conversion if necessary. ++ */ ++static inline u32 pack_pixel(const struct fb_var_screeninfo *dst_var, ++ u8 red, u8 green, u8 blue) ++{ ++ u32 dstpix; ++ ++ /* Quantize pixel */ ++ red = red >> (8 - dst_var->red.length); ++ green = green >> (8 - dst_var->green.length); ++ blue = blue >> (8 - dst_var->blue.length); ++ ++ /* Pack pixel */ ++ dstpix = red << (dst_var->red.offset) ++ | green << (dst_var->green.offset) ++ | blue << (dst_var->blue.offset); ++ ++ /* ++ * Move packed pixel to the beginning of the memory cell, ++ * so we can memcpy() it out easily ++ */ ++#ifdef __BIG_ENDIAN ++ switch (dst_var->bits_per_pixel) { ++ case 16: ++ dstpix <<= 16; ++ break; ++ case 24: ++ dstpix <<= 8; ++ break; ++ case 32: ++ break; ++ } ++#else ++ /* This is intrinsically unnecessary on Little Endian */ ++#endif ++ ++ return dstpix; ++} ++ ++ ++void bootsplash_do_render_background(struct fb_info *info) ++{ ++ unsigned int x, y; ++ u32 dstpix; ++ u32 dst_octpp = info->var.bits_per_pixel / 8; ++ ++ dstpix = pack_pixel(&info->var, ++ 0, ++ 0, ++ 0); ++ ++ for (y = 0; y < info->var.yres_virtual; y++) { ++ u8 *dstline = info->screen_buffer + (y * info->fix.line_length); ++ ++ for (x = 0; x < info->var.xres_virtual; x++) { ++ memcpy(dstline, &dstpix, dst_octpp); ++ ++ dstline += dst_octpp; ++ } ++ } ++} +diff --git a/drivers/video/fbdev/core/dummyblit.c b/drivers/video/fbdev/core/dummyblit.c +new file mode 100644 +index 000000000000..8c22ff92ce24 +--- /dev/null ++++ b/drivers/video/fbdev/core/dummyblit.c +@@ -0,0 +1,89 @@ ++/* ++ * linux/drivers/video/fbdev/core/dummyblit.c -- Dummy Blitting Operation ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * These functions are used in place of blitblit/tileblit to suppress ++ * fbcon's text output while a splash is shown. ++ * ++ * Only suppressing actual rendering keeps the text buffer in the VC layer ++ * intact and makes it easy to switch back from the bootsplash to a full ++ * text console with a simple redraw (with the original functions in place). ++ * ++ * Based on linux/drivers/video/fbdev/core/bitblit.c ++ * and linux/drivers/video/fbdev/core/tileblit.c ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include "fbcon.h" ++ ++static void dummy_bmove(struct vc_data *vc, struct fb_info *info, int sy, ++ int sx, int dy, int dx, int height, int width) ++{ ++ ; ++} ++ ++static void dummy_clear(struct vc_data *vc, struct fb_info *info, int sy, ++ int sx, int height, int width) ++{ ++ ; ++} ++ ++static void dummy_putcs(struct vc_data *vc, struct fb_info *info, ++ const unsigned short *s, int count, int yy, int xx, ++ int fg, int bg) ++{ ++ ; ++} ++ ++static void dummy_clear_margins(struct vc_data *vc, struct fb_info *info, ++ int color, int bottom_only) ++{ ++ ; ++} ++ ++static void dummy_cursor(struct vc_data *vc, struct fb_info *info, int mode, ++ int softback_lines, int fg, int bg) ++{ ++ ; ++} ++ ++static int dummy_update_start(struct fb_info *info) ++{ ++ /* ++ * Copied from bitblit.c and tileblit.c ++ * ++ * As of Linux 4.12, nobody seems to care about our return value. ++ */ ++ struct fbcon_ops *ops = info->fbcon_par; ++ int err; ++ ++ err = fb_pan_display(info, &ops->var); ++ ops->var.xoffset = info->var.xoffset; ++ ops->var.yoffset = info->var.yoffset; ++ ops->var.vmode = info->var.vmode; ++ return err; ++} ++ ++void fbcon_set_dummyops(struct fbcon_ops *ops) ++{ ++ ops->bmove = dummy_bmove; ++ ops->clear = dummy_clear; ++ ops->putcs = dummy_putcs; ++ ops->clear_margins = dummy_clear_margins; ++ ops->cursor = dummy_cursor; ++ ops->update_start = dummy_update_start; ++ ops->rotate_font = NULL; ++} ++EXPORT_SYMBOL_GPL(fbcon_set_dummyops); ++ ++MODULE_AUTHOR("Max Staudt "); ++MODULE_DESCRIPTION("Dummy Blitting Operation"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c +index 04612f938bab..9a39a6fcfe98 100644 +--- a/drivers/video/fbdev/core/fbcon.c ++++ b/drivers/video/fbdev/core/fbcon.c +@@ -80,6 +80,7 @@ + #include + + #include "fbcon.h" ++#include + + #ifdef FBCONDEBUG + # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args) +@@ -542,6 +543,8 @@ static int do_fbcon_takeover(int show_logo) + for (i = first_fb_vc; i <= last_fb_vc; i++) + con2fb_map[i] = info_idx; + ++ bootsplash_init(); ++ + err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc, + fbcon_is_default); + +@@ -661,6 +664,9 @@ static void set_blitting_type(struct vc_data *vc, struct fb_info *info) + else { + fbcon_set_rotation(info); + fbcon_set_bitops(ops); ++ ++ if (bootsplash_would_render_now()) ++ fbcon_set_dummyops(ops); + } + } + +@@ -683,6 +689,19 @@ static void set_blitting_type(struct vc_data *vc, struct fb_info *info) + ops->p = &fb_display[vc->vc_num]; + fbcon_set_rotation(info); + fbcon_set_bitops(ops); ++ ++ /* ++ * Note: ++ * This is *eventually correct*. ++ * Setting the fbcon operations and drawing the splash happen at ++ * different points in time. If the splash is enabled/disabled ++ * in between, then bootsplash_{en,dis}able will schedule a ++ * redraw, which will again render the splash (or not) and set ++ * the correct fbcon ops. ++ * The last run will then be the right one. ++ */ ++ if (bootsplash_would_render_now()) ++ fbcon_set_dummyops(ops); + } + + static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) +@@ -2184,6 +2203,9 @@ static int fbcon_switch(struct vc_data *vc) + info = registered_fb[con2fb_map[vc->vc_num]]; + ops = info->fbcon_par; + ++ if (bootsplash_would_render_now()) ++ bootsplash_render_full(info); ++ + if (softback_top) { + if (softback_lines) + fbcon_set_origin(vc); +diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h +index 18f3ac144237..45f94347fe5e 100644 +--- a/drivers/video/fbdev/core/fbcon.h ++++ b/drivers/video/fbdev/core/fbcon.h +@@ -214,6 +214,11 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, + #define SCROLL_REDRAW 0x004 + #define SCROLL_PAN_REDRAW 0x005 + ++#ifdef CONFIG_BOOTSPLASH ++extern void fbcon_set_dummyops(struct fbcon_ops *ops); ++#else /* CONFIG_BOOTSPLASH */ ++#define fbcon_set_dummyops(x) ++#endif /* CONFIG_BOOTSPLASH */ + #ifdef CONFIG_FB_TILEBLITTING + extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); + #endif +diff --git a/include/linux/bootsplash.h b/include/linux/bootsplash.h +new file mode 100644 +index 000000000000..c6dd0b43180d +--- /dev/null ++++ b/include/linux/bootsplash.h +@@ -0,0 +1,43 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#ifndef __LINUX_BOOTSPLASH_H ++#define __LINUX_BOOTSPLASH_H ++ ++#include ++ ++ ++#ifdef CONFIG_BOOTSPLASH ++ ++extern void bootsplash_render_full(struct fb_info *info); ++ ++extern bool bootsplash_would_render_now(void); ++ ++extern bool bootsplash_is_enabled(void); ++extern void bootsplash_disable(void); ++extern void bootsplash_enable(void); ++ ++extern void bootsplash_init(void); ++ ++#else /* CONFIG_BOOTSPLASH */ ++ ++#define bootsplash_render_full(x) ++ ++#define bootsplash_would_render_now() (false) ++ ++#define bootsplash_is_enabled() (false) ++#define bootsplash_disable() ++#define bootsplash_enable() ++ ++#define bootsplash_init() ++ ++#endif /* CONFIG_BOOTSPLASH */ ++ ++ ++#endif diff --git a/patch/misc/0002-bootsplash.patch b/patch/misc/0002-bootsplash.patch new file mode 100644 index 000000000..92d62caa7 --- /dev/null +++ b/patch/misc/0002-bootsplash.patch @@ -0,0 +1,669 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index b5633b56391e..5c237445761e 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2712,6 +2712,7 @@ S: Maintained + F: drivers/video/fbdev/core/bootsplash*.* + F: drivers/video/fbdev/core/dummycon.c + F: include/linux/bootsplash.h ++F: include/uapi/linux/bootsplash_file.h + + BPF (Safe dynamic programs and tools) + M: Alexei Starovoitov +diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile +index 66895321928e..6a8d1bab8a01 100644 +--- a/drivers/video/fbdev/core/Makefile ++++ b/drivers/video/fbdev/core/Makefile +@@ -31,4 +31,4 @@ obj-$(CONFIG_FB_SVGALIB) += svgalib.o + obj-$(CONFIG_FB_DDC) += fb_ddc.o + + obj-$(CONFIG_BOOTSPLASH) += bootsplash.o bootsplash_render.o \ +- dummyblit.o ++ bootsplash_load.o dummyblit.o +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index e449755af268..843c5400fefc 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -32,6 +32,7 @@ + #include + + #include "bootsplash_internal.h" ++#include "uapi/linux/bootsplash_file.h" + + + /* +@@ -102,10 +103,17 @@ static bool is_fb_compatible(const struct fb_info *info) + */ + void bootsplash_render_full(struct fb_info *info) + { ++ mutex_lock(&splash_state.data_lock); ++ + if (!is_fb_compatible(info)) +- return; ++ goto out; ++ ++ bootsplash_do_render_background(info, splash_state.file); ++ ++ bootsplash_do_render_pictures(info, splash_state.file); + +- bootsplash_do_render_background(info); ++out: ++ mutex_unlock(&splash_state.data_lock); + } + + +@@ -116,6 +124,7 @@ bool bootsplash_would_render_now(void) + { + return !oops_in_progress + && !console_blanked ++ && splash_state.file + && bootsplash_is_enabled(); + } + +@@ -252,6 +261,7 @@ static struct platform_driver splash_driver = { + void bootsplash_init(void) + { + int ret; ++ struct splash_file_priv *fp; + + /* Initialized already? */ + if (splash_state.splash_device) +@@ -280,8 +290,26 @@ void bootsplash_init(void) + } + + ++ mutex_init(&splash_state.data_lock); ++ set_bit(0, &splash_state.enabled); ++ + INIT_WORK(&splash_state.work_redraw_vc, splash_callback_redraw_vc); + ++ ++ if (!splash_state.bootfile || !strlen(splash_state.bootfile)) ++ return; ++ ++ fp = bootsplash_load_firmware(&splash_state.splash_device->dev, ++ splash_state.bootfile); ++ ++ if (!fp) ++ goto err; ++ ++ mutex_lock(&splash_state.data_lock); ++ splash_state.splash_fb = NULL; ++ splash_state.file = fp; ++ mutex_unlock(&splash_state.data_lock); ++ + return; + + err_device: +@@ -292,3 +320,7 @@ void bootsplash_init(void) + err: + pr_err("Failed to initialize.\n"); + } ++ ++ ++module_param_named(bootfile, splash_state.bootfile, charp, 0444); ++MODULE_PARM_DESC(bootfile, "Bootsplash file to load on boot"); +diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h +index b11da5cb90bf..71e2a27ac0b8 100644 +--- a/drivers/video/fbdev/core/bootsplash_internal.h ++++ b/drivers/video/fbdev/core/bootsplash_internal.h +@@ -15,15 +15,43 @@ + + #include + #include ++#include + #include + #include + #include + ++#include "uapi/linux/bootsplash_file.h" ++ + + /* + * Runtime types + */ ++struct splash_blob_priv { ++ struct splash_blob_header *blob_header; ++ const void *data; ++}; ++ ++ ++struct splash_pic_priv { ++ const struct splash_pic_header *pic_header; ++ ++ struct splash_blob_priv *blobs; ++ u16 blobs_loaded; ++}; ++ ++ ++struct splash_file_priv { ++ const struct firmware *fw; ++ const struct splash_file_header *header; ++ ++ struct splash_pic_priv *pics; ++}; ++ ++ + struct splash_priv { ++ /* Bootup and runtime state */ ++ char *bootfile; ++ + /* + * Enabled/disabled state, to be used with atomic bit operations. + * Bit 0: 0 = Splash hidden +@@ -43,6 +71,13 @@ struct splash_priv { + struct platform_device *splash_device; + + struct work_struct work_redraw_vc; ++ ++ /* Splash data structures including lock for everything below */ ++ struct mutex data_lock; ++ ++ struct fb_info *splash_fb; ++ ++ struct splash_file_priv *file; + }; + + +@@ -50,6 +85,14 @@ struct splash_priv { + /* + * Rendering functions + */ +-void bootsplash_do_render_background(struct fb_info *info); ++void bootsplash_do_render_background(struct fb_info *info, ++ const struct splash_file_priv *fp); ++void bootsplash_do_render_pictures(struct fb_info *info, ++ const struct splash_file_priv *fp); ++ ++ ++void bootsplash_free_file(struct splash_file_priv *fp); ++struct splash_file_priv *bootsplash_load_firmware(struct device *device, ++ const char *path); + + #endif +diff --git a/drivers/video/fbdev/core/bootsplash_load.c b/drivers/video/fbdev/core/bootsplash_load.c +new file mode 100644 +index 000000000000..fd807571ab7d +--- /dev/null ++++ b/drivers/video/fbdev/core/bootsplash_load.c +@@ -0,0 +1,225 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Loading and freeing functions) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#define pr_fmt(fmt) "bootsplash: " fmt ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "bootsplash_internal.h" ++#include "uapi/linux/bootsplash_file.h" ++ ++ ++ ++ ++/* ++ * Free all vmalloc()'d resources describing a splash file. ++ */ ++void bootsplash_free_file(struct splash_file_priv *fp) ++{ ++ if (!fp) ++ return; ++ ++ if (fp->pics) { ++ unsigned int i; ++ ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ ++ if (pp->blobs) ++ vfree(pp->blobs); ++ } ++ ++ vfree(fp->pics); ++ } ++ ++ release_firmware(fp->fw); ++ vfree(fp); ++} ++ ++ ++ ++ ++/* ++ * Load a splash screen from a "firmware" file. ++ * ++ * Parsing, and sanity checks. ++ */ ++#ifdef __BIG_ENDIAN ++ #define BOOTSPLASH_MAGIC BOOTSPLASH_MAGIC_BE ++#else ++ #define BOOTSPLASH_MAGIC BOOTSPLASH_MAGIC_LE ++#endif ++ ++struct splash_file_priv *bootsplash_load_firmware(struct device *device, ++ const char *path) ++{ ++ const struct firmware *fw; ++ struct splash_file_priv *fp; ++ unsigned int i; ++ const u8 *walker; ++ ++ if (request_firmware(&fw, path, device)) ++ return NULL; ++ ++ if (fw->size < sizeof(struct splash_file_header) ++ || memcmp(fw->data, BOOTSPLASH_MAGIC, sizeof(fp->header->id))) { ++ pr_err("Not a bootsplash file.\n"); ++ ++ release_firmware(fw); ++ return NULL; ++ } ++ ++ fp = vzalloc(sizeof(struct splash_file_priv)); ++ if (!fp) { ++ release_firmware(fw); ++ return NULL; ++ } ++ ++ pr_info("Loading splash file (%li bytes)\n", fw->size); ++ ++ fp->fw = fw; ++ fp->header = (struct splash_file_header *)fw->data; ++ ++ /* Sanity checks */ ++ if (fp->header->version != BOOTSPLASH_VERSION) { ++ pr_err("Loaded v%d file, but we only support version %d\n", ++ fp->header->version, ++ BOOTSPLASH_VERSION); ++ ++ goto err; ++ } ++ ++ if (fw->size < sizeof(struct splash_file_header) ++ + fp->header->num_pics ++ * sizeof(struct splash_pic_header) ++ + fp->header->num_blobs ++ * sizeof(struct splash_blob_header)) { ++ pr_err("File incomplete.\n"); ++ ++ goto err; ++ } ++ ++ /* Read picture headers */ ++ if (fp->header->num_pics) { ++ fp->pics = vzalloc(fp->header->num_pics ++ * sizeof(struct splash_pic_priv)); ++ if (!fp->pics) ++ goto err; ++ } ++ ++ walker = fw->data + sizeof(struct splash_file_header); ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ struct splash_pic_header *ph = (void *)walker; ++ ++ pr_debug("Picture %u: Size %ux%u\n", i, ph->width, ph->height); ++ ++ if (ph->num_blobs < 1) { ++ pr_err("Picture %u: Zero blobs? Aborting load.\n", i); ++ goto err; ++ } ++ ++ pp->pic_header = ph; ++ pp->blobs = vzalloc(ph->num_blobs ++ * sizeof(struct splash_blob_priv)); ++ if (!pp->blobs) ++ goto err; ++ ++ walker += sizeof(struct splash_pic_header); ++ } ++ ++ /* Read blob headers */ ++ for (i = 0; i < fp->header->num_blobs; i++) { ++ struct splash_blob_header *bh = (void *)walker; ++ struct splash_pic_priv *pp; ++ ++ if (walker + sizeof(struct splash_blob_header) ++ > fw->data + fw->size) ++ goto err; ++ ++ walker += sizeof(struct splash_blob_header); ++ ++ if (walker + bh->length > fw->data + fw->size) ++ goto err; ++ ++ if (bh->picture_id >= fp->header->num_pics) ++ goto nextblob; ++ ++ pp = &fp->pics[bh->picture_id]; ++ ++ pr_debug("Blob %u, pic %u, blobs_loaded %u, num_blobs %u.\n", ++ i, bh->picture_id, ++ pp->blobs_loaded, pp->pic_header->num_blobs); ++ ++ if (pp->blobs_loaded >= pp->pic_header->num_blobs) ++ goto nextblob; ++ ++ switch (bh->type) { ++ case 0: ++ /* Raw 24-bit packed pixels */ ++ if (bh->length != pp->pic_header->width ++ * pp->pic_header->height * 3) { ++ pr_err("Blob %u, type 1: Length doesn't match picture.\n", ++ i); ++ ++ goto err; ++ } ++ break; ++ default: ++ pr_warn("Blob %u, unknown type %u.\n", i, bh->type); ++ goto nextblob; ++ } ++ ++ pp->blobs[pp->blobs_loaded].blob_header = bh; ++ pp->blobs[pp->blobs_loaded].data = walker; ++ pp->blobs_loaded++; ++ ++nextblob: ++ walker += bh->length; ++ if (bh->length % 16) ++ walker += 16 - (bh->length % 16); ++ } ++ ++ if (walker != fw->data + fw->size) ++ pr_warn("Trailing data in splash file.\n"); ++ ++ /* Walk over pictures and ensure all blob slots are filled */ ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ ++ if (pp->blobs_loaded != pp->pic_header->num_blobs) { ++ pr_err("Picture %u doesn't have all blob slots filled.\n", ++ i); ++ ++ goto err; ++ } ++ } ++ ++ pr_info("Loaded (%ld bytes, %u pics, %u blobs).\n", ++ fw->size, ++ fp->header->num_pics, ++ fp->header->num_blobs); ++ ++ return fp; ++ ++ ++err: ++ bootsplash_free_file(fp); ++ return NULL; ++} +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +index 4d7e0117f653..2ae36949d0e3 100644 +--- a/drivers/video/fbdev/core/bootsplash_render.c ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -19,6 +19,7 @@ + #include + + #include "bootsplash_internal.h" ++#include "uapi/linux/bootsplash_file.h" + + + +@@ -70,16 +71,69 @@ static inline u32 pack_pixel(const struct fb_var_screeninfo *dst_var, + } + + +-void bootsplash_do_render_background(struct fb_info *info) ++/* ++ * Copy from source and blend into the destination picture. ++ * Currently assumes that the source picture is 24bpp. ++ * Currently assumes that the destination is <= 32bpp. ++ */ ++static int splash_convert_to_fb(u8 *dst, ++ const struct fb_var_screeninfo *dst_var, ++ unsigned int dst_stride, ++ unsigned int dst_xoff, ++ unsigned int dst_yoff, ++ const u8 *src, ++ unsigned int src_width, ++ unsigned int src_height) ++{ ++ unsigned int x, y; ++ unsigned int src_stride = 3 * src_width; /* Assume 24bpp packed */ ++ u32 dst_octpp = dst_var->bits_per_pixel / 8; ++ ++ dst_xoff += dst_var->xoffset; ++ dst_yoff += dst_var->yoffset; ++ ++ /* Copy with stride and pixel size adjustment */ ++ for (y = 0; ++ y < src_height && y + dst_yoff < dst_var->yres_virtual; ++ y++) { ++ const u8 *srcline = src + (y * src_stride); ++ u8 *dstline = dst + ((y + dst_yoff) * dst_stride) ++ + (dst_xoff * dst_octpp); ++ ++ for (x = 0; ++ x < src_width && x + dst_xoff < dst_var->xres_virtual; ++ x++) { ++ u8 red, green, blue; ++ u32 dstpix; ++ ++ /* Read pixel */ ++ red = *srcline++; ++ green = *srcline++; ++ blue = *srcline++; ++ ++ /* Write pixel */ ++ dstpix = pack_pixel(dst_var, red, green, blue); ++ memcpy(dstline, &dstpix, dst_octpp); ++ ++ dstline += dst_octpp; ++ } ++ } ++ ++ return 0; ++} ++ ++ ++void bootsplash_do_render_background(struct fb_info *info, ++ const struct splash_file_priv *fp) + { + unsigned int x, y; + u32 dstpix; + u32 dst_octpp = info->var.bits_per_pixel / 8; + + dstpix = pack_pixel(&info->var, +- 0, +- 0, +- 0); ++ fp->header->bg_red, ++ fp->header->bg_green, ++ fp->header->bg_blue); + + for (y = 0; y < info->var.yres_virtual; y++) { + u8 *dstline = info->screen_buffer + (y * info->fix.line_length); +@@ -91,3 +145,44 @@ void bootsplash_do_render_background(struct fb_info *info) + } + } + } ++ ++ ++void bootsplash_do_render_pictures(struct fb_info *info, ++ const struct splash_file_priv *fp) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_blob_priv *bp; ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ long dst_xoff, dst_yoff; ++ ++ if (pp->blobs_loaded < 1) ++ continue; ++ ++ bp = &pp->blobs[0]; ++ ++ if (!bp || bp->blob_header->type != 0) ++ continue; ++ ++ dst_xoff = (info->var.xres - pp->pic_header->width) / 2; ++ dst_yoff = (info->var.yres - pp->pic_header->height) / 2; ++ ++ if (dst_xoff < 0 ++ || dst_yoff < 0 ++ || dst_xoff + pp->pic_header->width > info->var.xres ++ || dst_yoff + pp->pic_header->height > info->var.yres) { ++ pr_info_once("Picture %u is out of bounds at current resolution: %dx%d\n" ++ "(this will only be printed once every reboot)\n", ++ i, info->var.xres, info->var.yres); ++ ++ continue; ++ } ++ ++ /* Draw next splash frame */ ++ splash_convert_to_fb(info->screen_buffer, &info->var, ++ info->fix.line_length, dst_xoff, dst_yoff, ++ bp->data, ++ pp->pic_header->width, pp->pic_header->height); ++ } ++} +diff --git a/include/uapi/linux/bootsplash_file.h b/include/uapi/linux/bootsplash_file.h +new file mode 100644 +index 000000000000..89dc9cca8f0c +--- /dev/null ++++ b/include/uapi/linux/bootsplash_file.h +@@ -0,0 +1,118 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (File format) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note ++ */ ++ ++#ifndef __BOOTSPLASH_FILE_H ++#define __BOOTSPLASH_FILE_H ++ ++ ++#define BOOTSPLASH_VERSION 55561 ++ ++ ++#include ++#include ++ ++ ++/* ++ * On-disk types ++ * ++ * A splash file consists of: ++ * - One single 'struct splash_file_header' ++ * - An array of 'struct splash_pic_header' ++ * - An array of raw data blocks, each padded to 16 bytes and ++ * preceded by a 'struct splash_blob_header' ++ * ++ * A single-frame splash may look like this: ++ * ++ * +--------------------+ ++ * | | ++ * | splash_file_header | ++ * | -> num_blobs = 1 | ++ * | -> num_pics = 1 | ++ * | | ++ * +--------------------+ ++ * | | ++ * | splash_pic_header | ++ * | | ++ * +--------------------+ ++ * | | ++ * | splash_blob_header | ++ * | -> type = 0 | ++ * | -> picture_id = 0 | ++ * | | ++ * | (raw RGB data) | ++ * | (pad to 16 bytes) | ++ * | | ++ * +--------------------+ ++ * ++ * All multi-byte values are stored on disk in the native format ++ * expected by the system the file will be used on. ++ */ ++#define BOOTSPLASH_MAGIC_BE "Linux bootsplash" ++#define BOOTSPLASH_MAGIC_LE "hsalpstoob xuniL" ++ ++struct splash_file_header { ++ uint8_t id[16]; /* "Linux bootsplash" (no trailing NUL) */ ++ ++ /* Splash file format version to avoid clashes */ ++ uint16_t version; ++ ++ /* The background color */ ++ uint8_t bg_red; ++ uint8_t bg_green; ++ uint8_t bg_blue; ++ uint8_t bg_reserved; ++ ++ /* ++ * Number of pic/blobs so we can allocate memory for internal ++ * structures ahead of time when reading the file ++ */ ++ uint16_t num_blobs; ++ uint8_t num_pics; ++ ++ uint8_t padding[103]; ++} __attribute__((__packed__)); ++ ++ ++struct splash_pic_header { ++ uint16_t width; ++ uint16_t height; ++ ++ /* ++ * Number of data packages associated with this picture. ++ * Currently, the only use for more than 1 is for animations. ++ */ ++ uint8_t num_blobs; ++ ++ uint8_t padding[27]; ++} __attribute__((__packed__)); ++ ++ ++struct splash_blob_header { ++ /* Length of the data block in bytes. */ ++ uint32_t length; ++ ++ /* ++ * Type of the contents. ++ * 0 - Raw RGB data. ++ */ ++ uint16_t type; ++ ++ /* ++ * Picture this blob is associated with. ++ * Blobs will be added to a picture in the order they are ++ * found in the file. ++ */ ++ uint8_t picture_id; ++ ++ uint8_t padding[9]; ++} __attribute__((__packed__)); ++ ++#endif diff --git a/patch/misc/0003-bootsplash.patch b/patch/misc/0003-bootsplash.patch new file mode 100644 index 000000000..216953762 --- /dev/null +++ b/patch/misc/0003-bootsplash.patch @@ -0,0 +1,66 @@ +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index 843c5400fefc..815b007f81ca 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -112,6 +112,8 @@ void bootsplash_render_full(struct fb_info *info) + + bootsplash_do_render_pictures(info, splash_state.file); + ++ bootsplash_do_render_flush(info); ++ + out: + mutex_unlock(&splash_state.data_lock); + } +diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h +index 71e2a27ac0b8..0acb383aa4e3 100644 +--- a/drivers/video/fbdev/core/bootsplash_internal.h ++++ b/drivers/video/fbdev/core/bootsplash_internal.h +@@ -89,6 +89,7 @@ void bootsplash_do_render_background(struct fb_info *info, + const struct splash_file_priv *fp); + void bootsplash_do_render_pictures(struct fb_info *info, + const struct splash_file_priv *fp); ++void bootsplash_do_render_flush(struct fb_info *info); + + + void bootsplash_free_file(struct splash_file_priv *fp); +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +index 2ae36949d0e3..8c09c306ff67 100644 +--- a/drivers/video/fbdev/core/bootsplash_render.c ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -186,3 +186,36 @@ void bootsplash_do_render_pictures(struct fb_info *info, + pp->pic_header->width, pp->pic_header->height); + } + } ++ ++ ++void bootsplash_do_render_flush(struct fb_info *info) ++{ ++ /* ++ * FB drivers using deferred_io (such as Xen) need to sync the ++ * screen after modifying its contents. When the FB is mmap()ed ++ * from userspace, this happens via a dirty pages callback, but ++ * when modifying the FB from the kernel, there is no such thing. ++ * ++ * So let's issue a fake fb_copyarea (copying the FB onto itself) ++ * to trick the FB driver into syncing the screen. ++ * ++ * A few DRM drivers' FB implementations are broken by not using ++ * deferred_io when they really should - we match on the known ++ * bad ones manually for now. ++ */ ++ if (info->fbdefio ++ || !strcmp(info->fix.id, "astdrmfb") ++ || !strcmp(info->fix.id, "cirrusdrmfb") ++ || !strcmp(info->fix.id, "mgadrmfb")) { ++ struct fb_copyarea area; ++ ++ area.dx = 0; ++ area.dy = 0; ++ area.width = info->var.xres; ++ area.height = info->var.yres; ++ area.sx = 0; ++ area.sy = 0; ++ ++ info->fbops->fb_copyarea(info, &area); ++ } ++} diff --git a/patch/misc/0004-bootsplash.patch b/patch/misc/0004-bootsplash.patch new file mode 100644 index 000000000..7eb54aff7 --- /dev/null +++ b/patch/misc/0004-bootsplash.patch @@ -0,0 +1,215 @@ +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +index 8c09c306ff67..07e3a4eab811 100644 +--- a/drivers/video/fbdev/core/bootsplash_render.c ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -155,6 +155,7 @@ void bootsplash_do_render_pictures(struct fb_info *info, + for (i = 0; i < fp->header->num_pics; i++) { + struct splash_blob_priv *bp; + struct splash_pic_priv *pp = &fp->pics[i]; ++ const struct splash_pic_header *ph = pp->pic_header; + long dst_xoff, dst_yoff; + + if (pp->blobs_loaded < 1) +@@ -165,8 +166,139 @@ void bootsplash_do_render_pictures(struct fb_info *info, + if (!bp || bp->blob_header->type != 0) + continue; + +- dst_xoff = (info->var.xres - pp->pic_header->width) / 2; +- dst_yoff = (info->var.yres - pp->pic_header->height) / 2; ++ switch (ph->position) { ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_TOP_LEFT: ++ dst_xoff = 0; ++ dst_yoff = 0; ++ ++ dst_xoff += ph->position_offset; ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_TOP: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = 0; ++ ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_TOP_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_yoff = 0; ++ ++ dst_xoff -= ph->position_offset; ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff -= ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_BOTTOM_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ ++ dst_xoff -= ph->position_offset; ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_BOTTOM: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_BOTTOM_LEFT: ++ dst_xoff = 0 + ph->position_offset; ++ dst_yoff = info->var.yres - pp->pic_header->height ++ - ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_LEFT: ++ dst_xoff = 0; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff += ph->position_offset; ++ break; ++ ++ case SPLASH_CORNER_TOP_LEFT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff -= ph->position_offset; ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_CORNER_TOP: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_CORNER_TOP_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff += ph->position_offset; ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_CORNER_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff += ph->position_offset; ++ break; ++ case SPLASH_CORNER_BOTTOM_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff += ph->position_offset; ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_CORNER_BOTTOM: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_CORNER_BOTTOM_LEFT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff -= ph->position_offset; ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_CORNER_LEFT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff -= ph->position_offset; ++ break; ++ ++ default: ++ /* As a fallback, center the picture. */ ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ break; ++ } + + if (dst_xoff < 0 + || dst_yoff < 0 +diff --git a/include/uapi/linux/bootsplash_file.h b/include/uapi/linux/bootsplash_file.h +index 89dc9cca8f0c..71cedcc68933 100644 +--- a/include/uapi/linux/bootsplash_file.h ++++ b/include/uapi/linux/bootsplash_file.h +@@ -91,7 +91,32 @@ struct splash_pic_header { + */ + uint8_t num_blobs; + +- uint8_t padding[27]; ++ /* ++ * Corner to move the picture to / from. ++ * 0x00 - Top left ++ * 0x01 - Top ++ * 0x02 - Top right ++ * 0x03 - Right ++ * 0x04 - Bottom right ++ * 0x05 - Bottom ++ * 0x06 - Bottom left ++ * 0x07 - Left ++ * ++ * Flags: ++ * 0x10 - Calculate offset from the corner towards the center, ++ * rather than from the center towards the corner ++ */ ++ uint8_t position; ++ ++ /* ++ * Pixel offset from the selected position. ++ * Example: If the picture is in the top right corner, it will ++ * be placed position_offset pixels from the top and ++ * position_offset pixels from the right margin. ++ */ ++ uint16_t position_offset; ++ ++ uint8_t padding[24]; + } __attribute__((__packed__)); + + +@@ -115,4 +140,22 @@ struct splash_blob_header { + uint8_t padding[9]; + } __attribute__((__packed__)); + ++ ++ ++ ++/* ++ * Enums for on-disk types ++ */ ++enum splash_position { ++ SPLASH_CORNER_TOP_LEFT = 0, ++ SPLASH_CORNER_TOP = 1, ++ SPLASH_CORNER_TOP_RIGHT = 2, ++ SPLASH_CORNER_RIGHT = 3, ++ SPLASH_CORNER_BOTTOM_RIGHT = 4, ++ SPLASH_CORNER_BOTTOM = 5, ++ SPLASH_CORNER_BOTTOM_LEFT = 6, ++ SPLASH_CORNER_LEFT = 7, ++ SPLASH_POS_FLAG_CORNER = 0x10, ++}; ++ + #endif diff --git a/patch/misc/0005-bootsplash.patch b/patch/misc/0005-bootsplash.patch new file mode 100644 index 000000000..2785c5e65 --- /dev/null +++ b/patch/misc/0005-bootsplash.patch @@ -0,0 +1,327 @@ +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index 815b007f81ca..c8642142cfea 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -53,6 +53,14 @@ static void splash_callback_redraw_vc(struct work_struct *ignored) + console_unlock(); + } + ++static void splash_callback_animation(struct work_struct *ignored) ++{ ++ if (bootsplash_would_render_now()) { ++ /* This will also re-schedule this delayed worker */ ++ splash_callback_redraw_vc(ignored); ++ } ++} ++ + + static bool is_fb_compatible(const struct fb_info *info) + { +@@ -103,17 +111,44 @@ static bool is_fb_compatible(const struct fb_info *info) + */ + void bootsplash_render_full(struct fb_info *info) + { ++ bool is_update = false; ++ + mutex_lock(&splash_state.data_lock); + +- if (!is_fb_compatible(info)) +- goto out; ++ /* ++ * If we've painted on this FB recently, we don't have to do ++ * the sanity checks and background drawing again. ++ */ ++ if (splash_state.splash_fb == info) ++ is_update = true; ++ ++ ++ if (!is_update) { ++ /* Check whether we actually support this FB. */ ++ splash_state.splash_fb = NULL; ++ ++ if (!is_fb_compatible(info)) ++ goto out; ++ ++ /* Draw the background only once */ ++ bootsplash_do_render_background(info, splash_state.file); + +- bootsplash_do_render_background(info, splash_state.file); ++ /* Mark this FB as last seen */ ++ splash_state.splash_fb = info; ++ } + +- bootsplash_do_render_pictures(info, splash_state.file); ++ bootsplash_do_render_pictures(info, splash_state.file, is_update); + + bootsplash_do_render_flush(info); + ++ bootsplash_do_step_animations(splash_state.file); ++ ++ /* Schedule update for animated splash screens */ ++ if (splash_state.file->frame_ms > 0) ++ schedule_delayed_work(&splash_state.dwork_animation, ++ msecs_to_jiffies( ++ splash_state.file->frame_ms)); ++ + out: + mutex_unlock(&splash_state.data_lock); + } +@@ -169,8 +204,14 @@ void bootsplash_enable(void) + + was_enabled = test_and_set_bit(0, &splash_state.enabled); + +- if (!was_enabled) ++ if (!was_enabled) { ++ /* Force a full redraw when the splash is re-activated */ ++ mutex_lock(&splash_state.data_lock); ++ splash_state.splash_fb = NULL; ++ mutex_unlock(&splash_state.data_lock); ++ + schedule_work(&splash_state.work_redraw_vc); ++ } + } + + +@@ -227,6 +268,14 @@ ATTRIBUTE_GROUPS(splash_dev); + */ + static int splash_resume(struct device *device) + { ++ /* ++ * Force full redraw on resume since we've probably lost the ++ * framebuffer's contents meanwhile ++ */ ++ mutex_lock(&splash_state.data_lock); ++ splash_state.splash_fb = NULL; ++ mutex_unlock(&splash_state.data_lock); ++ + if (bootsplash_would_render_now()) + schedule_work(&splash_state.work_redraw_vc); + +@@ -235,6 +284,7 @@ static int splash_resume(struct device *device) + + static int splash_suspend(struct device *device) + { ++ cancel_delayed_work_sync(&splash_state.dwork_animation); + cancel_work_sync(&splash_state.work_redraw_vc); + + return 0; +@@ -296,6 +346,8 @@ void bootsplash_init(void) + set_bit(0, &splash_state.enabled); + + INIT_WORK(&splash_state.work_redraw_vc, splash_callback_redraw_vc); ++ INIT_DELAYED_WORK(&splash_state.dwork_animation, ++ splash_callback_animation); + + + if (!splash_state.bootfile || !strlen(splash_state.bootfile)) +diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h +index 0acb383aa4e3..b3a74835d90f 100644 +--- a/drivers/video/fbdev/core/bootsplash_internal.h ++++ b/drivers/video/fbdev/core/bootsplash_internal.h +@@ -37,6 +37,8 @@ struct splash_pic_priv { + + struct splash_blob_priv *blobs; + u16 blobs_loaded; ++ ++ u16 anim_nextframe; + }; + + +@@ -45,6 +47,12 @@ struct splash_file_priv { + const struct splash_file_header *header; + + struct splash_pic_priv *pics; ++ ++ /* ++ * A local copy of the frame delay in the header. ++ * We modify it to keep the code simple. ++ */ ++ u16 frame_ms; + }; + + +@@ -71,6 +79,7 @@ struct splash_priv { + struct platform_device *splash_device; + + struct work_struct work_redraw_vc; ++ struct delayed_work dwork_animation; + + /* Splash data structures including lock for everything below */ + struct mutex data_lock; +@@ -88,8 +97,10 @@ struct splash_priv { + void bootsplash_do_render_background(struct fb_info *info, + const struct splash_file_priv *fp); + void bootsplash_do_render_pictures(struct fb_info *info, +- const struct splash_file_priv *fp); ++ const struct splash_file_priv *fp, ++ bool is_update); + void bootsplash_do_render_flush(struct fb_info *info); ++void bootsplash_do_step_animations(struct splash_file_priv *fp); + + + void bootsplash_free_file(struct splash_file_priv *fp); +diff --git a/drivers/video/fbdev/core/bootsplash_load.c b/drivers/video/fbdev/core/bootsplash_load.c +index fd807571ab7d..1f661b2d4cc9 100644 +--- a/drivers/video/fbdev/core/bootsplash_load.c ++++ b/drivers/video/fbdev/core/bootsplash_load.c +@@ -71,6 +71,7 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device, + { + const struct firmware *fw; + struct splash_file_priv *fp; ++ bool have_anim = false; + unsigned int i; + const u8 *walker; + +@@ -135,6 +136,13 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device, + goto err; + } + ++ if (ph->anim_type > SPLASH_ANIM_LOOP_FORWARD) { ++ pr_warn("Picture %u: Unsupported animation type %u.\n", ++ i, ph->anim_type); ++ ++ ph->anim_type = SPLASH_ANIM_NONE; ++ } ++ + pp->pic_header = ph; + pp->blobs = vzalloc(ph->num_blobs + * sizeof(struct splash_blob_priv)); +@@ -202,6 +210,7 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device, + /* Walk over pictures and ensure all blob slots are filled */ + for (i = 0; i < fp->header->num_pics; i++) { + struct splash_pic_priv *pp = &fp->pics[i]; ++ const struct splash_pic_header *ph = pp->pic_header; + + if (pp->blobs_loaded != pp->pic_header->num_blobs) { + pr_err("Picture %u doesn't have all blob slots filled.\n", +@@ -209,8 +218,20 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device, + + goto err; + } ++ ++ if (ph->anim_type ++ && ph->num_blobs > 1 ++ && ph->anim_loop < pp->blobs_loaded) ++ have_anim = true; + } + ++ if (!have_anim) ++ /* Disable animation timer if there is nothing to animate */ ++ fp->frame_ms = 0; ++ else ++ /* Enforce minimum delay between frames */ ++ fp->frame_ms = max((u16)20, fp->header->frame_ms); ++ + pr_info("Loaded (%ld bytes, %u pics, %u blobs).\n", + fw->size, + fp->header->num_pics, +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +index 07e3a4eab811..76033606ca8a 100644 +--- a/drivers/video/fbdev/core/bootsplash_render.c ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -148,7 +148,8 @@ void bootsplash_do_render_background(struct fb_info *info, + + + void bootsplash_do_render_pictures(struct fb_info *info, +- const struct splash_file_priv *fp) ++ const struct splash_file_priv *fp, ++ bool is_update) + { + unsigned int i; + +@@ -161,7 +162,11 @@ void bootsplash_do_render_pictures(struct fb_info *info, + if (pp->blobs_loaded < 1) + continue; + +- bp = &pp->blobs[0]; ++ /* Skip static pictures when refreshing animations */ ++ if (ph->anim_type == SPLASH_ANIM_NONE && is_update) ++ continue; ++ ++ bp = &pp->blobs[pp->anim_nextframe]; + + if (!bp || bp->blob_header->type != 0) + continue; +@@ -351,3 +356,24 @@ void bootsplash_do_render_flush(struct fb_info *info) + info->fbops->fb_copyarea(info, &area); + } + } ++ ++ ++void bootsplash_do_step_animations(struct splash_file_priv *fp) ++{ ++ unsigned int i; ++ ++ /* Step every animation once */ ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ ++ if (pp->blobs_loaded < 2 ++ || pp->pic_header->anim_loop > pp->blobs_loaded) ++ continue; ++ ++ if (pp->pic_header->anim_type == SPLASH_ANIM_LOOP_FORWARD) { ++ pp->anim_nextframe++; ++ if (pp->anim_nextframe >= pp->pic_header->num_blobs) ++ pp->anim_nextframe = pp->pic_header->anim_loop; ++ } ++ } ++} +diff --git a/include/uapi/linux/bootsplash_file.h b/include/uapi/linux/bootsplash_file.h +index 71cedcc68933..b3af0a3c6487 100644 +--- a/include/uapi/linux/bootsplash_file.h ++++ b/include/uapi/linux/bootsplash_file.h +@@ -77,7 +77,17 @@ struct splash_file_header { + uint16_t num_blobs; + uint8_t num_pics; + +- uint8_t padding[103]; ++ uint8_t unused_1; ++ ++ /* ++ * Milliseconds to wait before painting the next frame in ++ * an animation. ++ * This is actually a minimum, as the system is allowed to ++ * stall for longer between frames. ++ */ ++ uint16_t frame_ms; ++ ++ uint8_t padding[100]; + } __attribute__((__packed__)); + + +@@ -116,7 +126,23 @@ struct splash_pic_header { + */ + uint16_t position_offset; + +- uint8_t padding[24]; ++ /* ++ * Animation type. ++ * 0 - off ++ * 1 - forward loop ++ */ ++ uint8_t anim_type; ++ ++ /* ++ * Animation loop point. ++ * Actual meaning depends on animation type: ++ * Type 0 - Unused ++ * 1 - Frame at which to restart the forward loop ++ * (allowing for "intro" frames) ++ */ ++ uint8_t anim_loop; ++ ++ uint8_t padding[22]; + } __attribute__((__packed__)); + + +@@ -158,4 +184,9 @@ enum splash_position { + SPLASH_POS_FLAG_CORNER = 0x10, + }; + ++enum splash_anim_type { ++ SPLASH_ANIM_NONE = 0, ++ SPLASH_ANIM_LOOP_FORWARD = 1, ++}; ++ + #endif diff --git a/patch/misc/0006-bootsplash.patch b/patch/misc/0006-bootsplash.patch new file mode 100644 index 000000000..d6c6db659 --- /dev/null +++ b/patch/misc/0006-bootsplash.patch @@ -0,0 +1,82 @@ +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 2ebaba16f785..416735ab6dc1 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -105,6 +105,7 @@ + #include + #include + #include ++#include + + #define MAX_NR_CON_DRIVER 16 + +@@ -4235,6 +4236,7 @@ void do_unblank_screen(int leaving_gfx) + } + + console_blanked = 0; ++ bootsplash_mark_dirty(); + if (vc->vc_sw->con_blank(vc, 0, leaving_gfx)) + /* Low-level driver cannot restore -> do it ourselves */ + update_screen(vc); +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index c8642142cfea..13fcaabbc2ca 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -165,6 +165,13 @@ bool bootsplash_would_render_now(void) + && bootsplash_is_enabled(); + } + ++void bootsplash_mark_dirty(void) ++{ ++ mutex_lock(&splash_state.data_lock); ++ splash_state.splash_fb = NULL; ++ mutex_unlock(&splash_state.data_lock); ++} ++ + bool bootsplash_is_enabled(void) + { + bool was_enabled; +@@ -206,9 +213,7 @@ void bootsplash_enable(void) + + if (!was_enabled) { + /* Force a full redraw when the splash is re-activated */ +- mutex_lock(&splash_state.data_lock); +- splash_state.splash_fb = NULL; +- mutex_unlock(&splash_state.data_lock); ++ bootsplash_mark_dirty(); + + schedule_work(&splash_state.work_redraw_vc); + } +@@ -272,9 +277,7 @@ static int splash_resume(struct device *device) + * Force full redraw on resume since we've probably lost the + * framebuffer's contents meanwhile + */ +- mutex_lock(&splash_state.data_lock); +- splash_state.splash_fb = NULL; +- mutex_unlock(&splash_state.data_lock); ++ bootsplash_mark_dirty(); + + if (bootsplash_would_render_now()) + schedule_work(&splash_state.work_redraw_vc); +diff --git a/include/linux/bootsplash.h b/include/linux/bootsplash.h +index c6dd0b43180d..4075098aaadd 100644 +--- a/include/linux/bootsplash.h ++++ b/include/linux/bootsplash.h +@@ -19,6 +19,8 @@ extern void bootsplash_render_full(struct fb_info *info); + + extern bool bootsplash_would_render_now(void); + ++extern void bootsplash_mark_dirty(void); ++ + extern bool bootsplash_is_enabled(void); + extern void bootsplash_disable(void); + extern void bootsplash_enable(void); +@@ -31,6 +33,8 @@ extern void bootsplash_init(void); + + #define bootsplash_would_render_now() (false) + ++#define bootsplash_mark_dirty() ++ + #define bootsplash_is_enabled() (false) + #define bootsplash_disable() + #define bootsplash_enable() diff --git a/patch/misc/0007-bootsplash.patch b/patch/misc/0007-bootsplash.patch new file mode 100644 index 000000000..e8cd47931 --- /dev/null +++ b/patch/misc/0007-bootsplash.patch @@ -0,0 +1,42 @@ +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c +index f4166263bb3a..a248429194bb 100644 +--- a/drivers/tty/vt/keyboard.c ++++ b/drivers/tty/vt/keyboard.c +@@ -47,6 +47,8 @@ + + #include + ++#include ++ + extern void ctrl_alt_del(void); + + /* +@@ -1353,6 +1355,28 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw) + } + #endif + ++ /* Trap keys when bootsplash is shown */ ++ if (bootsplash_would_render_now()) { ++ /* Deactivate bootsplash on ESC or Alt+Fxx VT switch */ ++ if (keycode >= KEY_F1 && keycode <= KEY_F12) { ++ bootsplash_disable(); ++ ++ /* ++ * No return here since we want to actually ++ * perform the VT switch. ++ */ ++ } else { ++ if (keycode == KEY_ESC) ++ bootsplash_disable(); ++ ++ /* ++ * Just drop any other keys. ++ * Their effect would be hidden by the splash. ++ */ ++ return; ++ } ++ } ++ + if (kbd->kbdmode == VC_MEDIUMRAW) { + /* + * This is extended medium raw mode, with keys above 127 diff --git a/patch/misc/0008-bootsplash.patch b/patch/misc/0008-bootsplash.patch new file mode 100644 index 000000000..8a3b715ce --- /dev/null +++ b/patch/misc/0008-bootsplash.patch @@ -0,0 +1,21 @@ +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c +index 3ffc1ce29023..bc6a24c9dfa8 100644 +--- a/drivers/tty/sysrq.c ++++ b/drivers/tty/sysrq.c +@@ -49,6 +49,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -104,6 +105,8 @@ static void sysrq_handle_SAK(int key) + { + struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work; + schedule_work(SAK_work); ++ ++ bootsplash_disable(); + } + static struct sysrq_key_op sysrq_SAK_op = { + .handler = sysrq_handle_SAK, diff --git a/patch/misc/0009-bootsplash.patch b/patch/misc/0009-bootsplash.patch new file mode 100644 index 000000000..add68e7b2 --- /dev/null +++ b/patch/misc/0009-bootsplash.patch @@ -0,0 +1,21 @@ +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c +index 9a39a6fcfe98..8a9c67e1c5d8 100644 +--- a/drivers/video/fbdev/core/fbcon.c ++++ b/drivers/video/fbdev/core/fbcon.c +@@ -1343,6 +1343,16 @@ static void fbcon_cursor(struct vc_data *vc, int mode) + int y; + int c = scr_readw((u16 *) vc->vc_pos); + ++ /* ++ * Disable the splash here so we don't have to hook into ++ * vt_console_print() in drivers/tty/vt/vt.c ++ * ++ * We'd disable the splash just before the call to ++ * hide_cursor() anyway, so this spot is just fine. ++ */ ++ if (oops_in_progress) ++ bootsplash_disable(); ++ + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); + + if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) diff --git a/patch/misc/0010-bootsplash.patch b/patch/misc/0010-bootsplash.patch new file mode 100644 index 000000000..e5c1fd0c8 --- /dev/null +++ b/patch/misc/0010-bootsplash.patch @@ -0,0 +1,321 @@ +diff --git a/Documentation/ABI/testing/sysfs-platform-bootsplash b/Documentation/ABI/testing/sysfs-platform-bootsplash +new file mode 100644 +index 000000000000..742c7b035ded +--- /dev/null ++++ b/Documentation/ABI/testing/sysfs-platform-bootsplash +@@ -0,0 +1,11 @@ ++What: /sys/devices/platform/bootsplash.0/enabled ++Date: Oct 2017 ++KernelVersion: 4.14 ++Contact: Max Staudt ++Description: ++ Can be set and read. ++ ++ 0: Splash is disabled. ++ 1: Splash is shown whenever fbcon would show a text console ++ (i.e. no graphical application is running), and a splash ++ file is loaded. +diff --git a/Documentation/bootsplash.rst b/Documentation/bootsplash.rst +new file mode 100644 +index 000000000000..611f0c558925 +--- /dev/null ++++ b/Documentation/bootsplash.rst +@@ -0,0 +1,285 @@ ++==================== ++The Linux bootsplash ++==================== ++ ++:Date: November, 2017 ++:Author: Max Staudt ++ ++ ++The Linux bootsplash is a graphical replacement for the '``quiet``' boot ++option, typically showing a logo and a spinner animation as the system starts. ++ ++Currently, it is a part of the Framebuffer Console support, and can be found ++as ``CONFIG_BOOTSPLASH`` in the kernel configuration. This means that as long ++as it is enabled, it hijacks fbcon's output and draws a splash screen instead. ++ ++Purely compiling in the bootsplash will not render it functional - to actually ++render a splash, you will also need a splash theme file. See the example ++utility and script in ``tools/bootsplash`` for a live demo. ++ ++ ++ ++Motivation ++========== ++ ++- The '``quiet``' boot option only suppresses most messages during boot, but ++ errors are still shown. ++ ++- A user space implementation can only show a logo once user space has been ++ initialized far enough to allow this. A kernel splash can display a splash ++ immediately as soon as fbcon can be displayed. ++ ++- Implementing a splash screen in user space (e.g. Plymouth) is problematic ++ due to resource conflicts. ++ ++ For example, if Plymouth is keeping ``/dev/fb0`` (provided via vesafb/efifb) ++ open, then most DRM drivers can't replace it because the address space is ++ still busy - thus leading to a VRAM reservation error. ++ ++ See: https://bugzilla.opensuse.org/show_bug.cgi?id=980750 ++ ++ ++ ++Command line arguments ++====================== ++ ++``bootsplash.bootfile`` ++ Which file in the initramfs to load. ++ ++ The splash theme is loaded via request_firmware(), thus to load ++ ``/lib/firmware/bootsplash/mytheme`` pass the command line: ++ ++ ``bootsplash.bootfile=bootsplash/mytheme`` ++ ++ Note: The splash file *has to be* in the initramfs, as it needs to be ++ available when the splash is initialized early on. ++ ++ Default: none, i.e. a non-functional splash, falling back to showing text. ++ ++ ++ ++sysfs run-time configuration ++============================ ++ ++``/sys/devices/platform/bootsplash.0/enabled`` ++ Enable/disable the bootsplash. ++ The system boots with this set to 1, but will not show a splash unless ++ a splash theme file is also loaded. ++ ++ ++ ++Kconfig ++======= ++ ++``BOOTSPLASH`` ++ Whether to compile in bootsplash support ++ (depends on fbcon compiled in, i.e. ``FRAMEBUFFER_CONSOLE=y``) ++ ++ ++ ++Bootsplash file format ++====================== ++ ++A file specified in the kernel configuration as ``CONFIG_BOOTSPLASH_FILE`` ++or specified on the command line as ``bootsplash.bootfile`` will be loaded ++and displayed as soon as fbcon is initialized. ++ ++ ++Main blocks ++----------- ++ ++There are 3 main blocks in each file: ++ ++ - one File header ++ - n Picture headers ++ - m (Blob header + payload) blocks ++ ++ ++Structures ++---------- ++ ++The on-disk structures are defined in ++``drivers/video/fbdev/core/bootsplash_file.h`` and represent these blocks: ++ ++ - ``struct splash_file_header`` ++ ++ Represents the file header, with splash-wide information including: ++ ++ - The magic string "``Linux bootsplash``" on big-endian platforms ++ (the reverse on little endian) ++ - The file format version (for incompatible updates, hopefully never) ++ - The background color ++ - Number of picture and blob blocks ++ - Animation speed (we only allow one delay for all animations) ++ ++ The file header is followed by the first picture header. ++ ++ ++ - ``struct splash_picture_header`` ++ ++ Represents an object (picture) drawn on screen, including its immutable ++ properties: ++ - Width, height ++ - Positioning relative to screen corners or in the center ++ - Animation, if any ++ - Animation type ++ - Number of blobs ++ ++ The picture header is followed by another picture header, up until n ++ picture headers (as defined in the file header) have been read. Then, ++ the (blob header, payload) pairs follow. ++ ++ ++ - ``struct splash_blob_header`` ++ (followed by payload) ++ ++ Represents one raw data stream. So far, only picture data is defined. ++ ++ The blob header is followed by a payload, then padding to n*16 bytes, ++ then (if further blobs are defined in the file header) a further blob ++ header. ++ ++ ++Alignment ++--------- ++ ++The bootsplash file is designed to be loaded into memory as-is. ++ ++All structures are a multiple of 16 bytes long, all elements therein are ++aligned to multiples of their length, and the payloads are always padded ++up to multiples of 16 bytes. This is to allow aligned accesses in all ++cases while still simply mapping the structures over an in-memory copy of ++the bootsplash file. ++ ++ ++Further information ++------------------- ++ ++Please see ``drivers/video/fbdev/core/bootsplash_file.h`` for further ++details and possible values in the file. ++ ++ ++ ++Hooks - how the bootsplash is integrated ++======================================== ++ ++``drivers/video/fbdev/core/fbcon.c`` ++ ``fbcon_init()`` calls ``bootsplash_init()``, which loads the default ++ bootsplash file or the one specified on the kernel command line. ++ ++ ``fbcon_switch()`` draws the bootsplash when it's active, and is also ++ one of the callers of ``set_blitting_type()``. ++ ++ ``set_blitting_type()`` calls ``fbcon_set_dummyops()`` when the ++ bootsplash is active, overriding the text rendering functions. ++ ++ ``fbcon_cursor()`` will call ``bootsplash_disable()`` when an oops is ++ being printed in order to make a kernel panic visible. ++ ++``drivers/video/fbdev/core/dummyblit.c`` ++ This contains the dummy text rendering functions used to suppress text ++ output while the bootsplash is shown. ++ ++``drivers/tty/vt/keyboard.c`` ++ ``kbd_keycode()`` can call ``bootsplash_disable()`` when the user ++ presses ESC or F1-F12 (changing VT). This is to provide a built-in way ++ of disabling the splash manually at any time. ++ ++ ++ ++FAQ: Frequently Asked Questions ++=============================== ++ ++I want to see the log! How do I show the log? ++--------------------------------------------- ++ ++Press ESC while the splash is shown, or remove the ``bootsplash.bootfile`` ++parameter from the kernel cmdline. Without that parameter, the bootsplash ++will boot disabled. ++ ++ ++Why use FB instead of modern DRM/KMS? ++------------------------------------- ++ ++This is a semantic problem: ++ - What memory to draw the splash to? ++ - And what mode will the screen be set to? ++ ++Using the fbdev emulation solves these issues. ++ ++Let's start from a bare KMS system, without fbcon, and without fbdev ++emulation. In this case, as long as userspace doesn't open the KMS ++device, the state of the screen is undefined. No framebuffer is ++allocated in video RAM, and no particular mode is set. ++ ++In this case, we'd have to allocate a framebuffer to show the splash, ++and set our mode ourselves. This either wastes a screenful of video RAM ++if the splash is to co-exist with the userspace program's own allocated ++framebuffer, or there is a flicker as we deactivate and delete the ++bootsplash's framebuffer and hand control over to userspace. Since we ++may set a different mode than userspace, we'd also have flicker due ++to mode switching. ++ ++This logic is already contained in every KMS driver that performs fbdev ++emulation. So we might as well use that. And the correct API to do so is ++fbdev. Plus, we get compatibility with old, pure fbdev drivers for free. ++With the fbdev emulation, there is *always* a well-defined framebuffer ++to draw on. And the selection of mode has already been done by the ++graphics driver, so we don't need to reinvent that wheel, either. ++Finally, if userspace decides to use /dev/fbX, we don't have to worry ++about wasting video RAM, either. ++ ++ ++Why is the bootsplash integrated in fbcon? ++------------------------------------------ ++ ++Right now, the bootsplash is drawn from within fbcon, as this allows us ++to easily know *when* to draw - i.e. when we're safe from fbcon and ++userspace drawing all over our beautiful splash logo. ++ ++Separating them is not easy - see the to-do list below. ++ ++ ++ ++TO DO list for future development ++================================= ++ ++Second enable/disable switch for the system ++------------------------------------------- ++ ++It may be helpful to differentiate between the system and the user ++switching off the bootsplash. Thus, the system may make it disappear and ++reappear e.g. for a password prompt, yet once the user has pressed ESC, ++it could stay gone. ++ ++ ++Fix buggy DRM/KMS drivers ++------------------------- ++ ++Currently, the splash code manually checks for fbdev emulation provided by ++the ast, cirrus, and mgag200 DRM/KMS drivers. ++These drivers use a manual mechanism similar to deferred I/O for their FB ++emulation, and thus need to be manually flushed onto the screen in the same ++way. ++ ++This may be improved upon in several ways: ++ ++1. Changing these drivers to expose the fbdev BO's memory directly, like ++ bochsdrmfb does. ++2. Creating a new fb_ops->fb_flush() API to allow the kernel to flush the ++ framebuffer once the bootsplash has been drawn into it. ++ ++ ++Separating from fbcon ++--------------------- ++ ++Separating these two components would yield independence from fbcon being ++compiled into the kernel, and thus lowering code size in embedded ++applications. ++ ++To do this cleanly will involve a clean separation of users of an FB device ++within the kernel, i.e. fbcon, bootsplash, and userspace. Right now, the ++legacy fbcon code and VT code co-operate to switch between fbcon and ++userspace (by setting the VT into KD_GRAPHICS mode). Installing a muxer ++between these components ensues refactoring of old code and checking for ++correct locking. +diff --git a/MAINTAINERS b/MAINTAINERS +index 5c237445761e..7ffac272434e 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2709,6 +2709,8 @@ BOOTSPLASH + M: Max Staudt + L: linux-fbdev@vger.kernel.org + S: Maintained ++F: Documentation/ABI/testing/sysfs-platform-bootsplash ++F: Documentation/bootsplash.rst + F: drivers/video/fbdev/core/bootsplash*.* + F: drivers/video/fbdev/core/dummycon.c + F: include/linux/bootsplash.h diff --git a/patch/misc/0011-bootsplash.patch b/patch/misc/0011-bootsplash.patch new file mode 100644 index 000000000..8e87eb463 --- /dev/null +++ b/patch/misc/0011-bootsplash.patch @@ -0,0 +1,129 @@ +diff --git a/Documentation/ABI/testing/sysfs-platform-bootsplash b/Documentation/ABI/testing/sysfs-platform-bootsplash +index 742c7b035ded..f8f4b259220e 100644 +--- a/Documentation/ABI/testing/sysfs-platform-bootsplash ++++ b/Documentation/ABI/testing/sysfs-platform-bootsplash +@@ -9,3 +9,35 @@ Description: + 1: Splash is shown whenever fbcon would show a text console + (i.e. no graphical application is running), and a splash + file is loaded. ++ ++What: /sys/devices/platform/bootsplash.0/drop_splash ++Date: Oct 2017 ++KernelVersion: 4.14 ++Contact: Max Staudt ++Description: ++ Can only be set. ++ ++ Any value written will cause the current splash theme file ++ to be unloaded and the text console to be redrawn. ++ ++What: /sys/devices/platform/bootsplash.0/load_file ++Date: Oct 2017 ++KernelVersion: 4.14 ++Contact: Max Staudt ++Description: ++ Can only be set. ++ ++ Any value written will cause the splash to be disabled and ++ internal memory structures to be freed. ++ ++ A firmware path written will cause a new theme file to be ++ loaded and the current bootsplash to be replaced. ++ The current enabled/disabled status is not touched. ++ If the splash is already active, it will be redrawn. ++ ++ The path has to be a path in /lib/firmware since ++ request_firmware() is used to fetch the data. ++ ++ When setting the splash from the shell, echo -n has to be ++ used as any trailing '\n' newline will be interpreted as ++ part of the path. +diff --git a/Documentation/bootsplash.rst b/Documentation/bootsplash.rst +index 611f0c558925..b35aba5093e8 100644 +--- a/Documentation/bootsplash.rst ++++ b/Documentation/bootsplash.rst +@@ -67,6 +67,14 @@ sysfs run-time configuration + a splash theme file is also loaded. + + ++``/sys/devices/platform/bootsplash.0/drop_splash`` ++ Unload splash data and free memory. ++ ++``/sys/devices/platform/bootsplash.0/load_file`` ++ Load a splash file from ``/lib/firmware/``. ++ Note that trailing newlines will be interpreted as part of the file name. ++ ++ + + Kconfig + ======= +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index 13fcaabbc2ca..16cb0493629d 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -251,11 +251,65 @@ static ssize_t splash_store_enabled(struct device *device, + return count; + } + ++static ssize_t splash_store_drop_splash(struct device *device, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct splash_file_priv *fp; ++ ++ if (!buf || !count || !splash_state.file) ++ return count; ++ ++ mutex_lock(&splash_state.data_lock); ++ fp = splash_state.file; ++ splash_state.file = NULL; ++ mutex_unlock(&splash_state.data_lock); ++ ++ /* Redraw the text console */ ++ schedule_work(&splash_state.work_redraw_vc); ++ ++ bootsplash_free_file(fp); ++ ++ return count; ++} ++ ++static ssize_t splash_store_load_file(struct device *device, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct splash_file_priv *fp, *fp_old; ++ ++ if (!count) ++ return 0; ++ ++ fp = bootsplash_load_firmware(&splash_state.splash_device->dev, ++ buf); ++ ++ if (!fp) ++ return -ENXIO; ++ ++ mutex_lock(&splash_state.data_lock); ++ fp_old = splash_state.file; ++ splash_state.splash_fb = NULL; ++ splash_state.file = fp; ++ mutex_unlock(&splash_state.data_lock); ++ ++ /* Update the splash or text console */ ++ schedule_work(&splash_state.work_redraw_vc); ++ ++ bootsplash_free_file(fp_old); ++ return count; ++} ++ + static DEVICE_ATTR(enabled, 0644, splash_show_enabled, splash_store_enabled); ++static DEVICE_ATTR(drop_splash, 0200, NULL, splash_store_drop_splash); ++static DEVICE_ATTR(load_file, 0200, NULL, splash_store_load_file); + + + static struct attribute *splash_dev_attrs[] = { + &dev_attr_enabled.attr, ++ &dev_attr_drop_splash.attr, ++ &dev_attr_load_file.attr, + NULL + }; + diff --git a/patch/misc/0012-bootsplash.patch b/patch/misc/0012-bootsplash.patch new file mode 100644 index 000000000..5d8ea1fe2 --- /dev/null +++ b/patch/misc/0012-bootsplash.patch @@ -0,0 +1,511 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index 7ffac272434e..ddff07cd794c 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2715,6 +2715,7 @@ F: drivers/video/fbdev/core/bootsplash*.* + F: drivers/video/fbdev/core/dummycon.c + F: include/linux/bootsplash.h + F: include/uapi/linux/bootsplash_file.h ++F: tools/bootsplash/* + + BPF (Safe dynamic programs and tools) + M: Alexei Starovoitov +diff --git a/tools/bootsplash/.gitignore b/tools/bootsplash/.gitignore +new file mode 100644 +index 000000000000..091b99a17567 +--- /dev/null ++++ b/tools/bootsplash/.gitignore +@@ -0,0 +1 @@ ++bootsplash-packer +diff --git a/tools/bootsplash/Makefile b/tools/bootsplash/Makefile +new file mode 100644 +index 000000000000..0ad8e8a84942 +--- /dev/null ++++ b/tools/bootsplash/Makefile +@@ -0,0 +1,9 @@ ++CC := $(CROSS_COMPILE)gcc ++CFLAGS := -I../../usr/include ++ ++PROGS := bootsplash-packer ++ ++all: $(PROGS) ++ ++clean: ++ rm -fr $(PROGS) +diff --git a/tools/bootsplash/bootsplash-packer.c b/tools/bootsplash/bootsplash-packer.c +new file mode 100644 +index 000000000000..ffb6a8b69885 +--- /dev/null ++++ b/tools/bootsplash/bootsplash-packer.c +@@ -0,0 +1,471 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Splash file packer tool) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++ ++static void print_help(char *progname) ++{ ++ printf("Usage: %s [OPTIONS] outfile\n", progname); ++ printf("\n" ++ "Options, executed in order given:\n" ++ " -h, --help Print this help message\n" ++ "\n" ++ " --bg_red Background color (red part)\n" ++ " --bg_green Background color (green part)\n" ++ " --bg_blue Background color (blue part)\n" ++ " --bg_reserved (do not use)\n" ++ " --frame_ms Minimum milliseconds between animation steps\n" ++ "\n" ++ " --picture Start describing the next picture\n" ++ " --pic_width Picture width in pixels\n" ++ " --pic_height Picture height in pixels\n" ++ " --pic_position Coarse picture placement:\n" ++ " 0x00 - Top left\n" ++ " 0x01 - Top\n" ++ " 0x02 - Top right\n" ++ " 0x03 - Right\n" ++ " 0x04 - Bottom right\n" ++ " 0x05 - Bottom\n" ++ " 0x06 - Bottom left\n" ++ " 0x07 - Left\n" ++ "\n" ++ " Flags:\n" ++ " 0x10 - Calculate offset from corner towards center,\n" ++ " rather than from center towards corner\n" ++ " --pic_position_offset Distance from base position in pixels\n" ++ " --pic_anim_type Animation type:\n" ++ " 0 - None\n" ++ " 1 - Forward loop\n" ++ " --pic_anim_loop Loop point for animation\n" ++ "\n" ++ " --blob Include next data stream\n" ++ " --blob_type Type of data\n" ++ " --blob_picture_id Picture to associate this blob with, starting at 0\n" ++ " (default: number of last --picture)\n" ++ "\n"); ++ printf("This tool will write %s files.\n\n", ++#if __BYTE_ORDER == __BIG_ENDIAN ++ "Big Endian (BE)"); ++#elif __BYTE_ORDER == __LITTLE_ENDIAN ++ "Little Endian (LE)"); ++#else ++#error ++#endif ++} ++ ++ ++struct blob_entry { ++ struct blob_entry *next; ++ ++ char *fn; ++ ++ struct splash_blob_header header; ++}; ++ ++ ++static void dump_file_header(struct splash_file_header *h) ++{ ++ printf(" --- File header ---\n"); ++ printf("\n"); ++ printf(" version: %5u\n", h->version); ++ printf("\n"); ++ printf(" bg_red: %5u\n", h->bg_red); ++ printf(" bg_green: %5u\n", h->bg_green); ++ printf(" bg_blue: %5u\n", h->bg_blue); ++ printf(" bg_reserved: %5u\n", h->bg_reserved); ++ printf("\n"); ++ printf(" num_blobs: %5u\n", h->num_blobs); ++ printf(" num_pics: %5u\n", h->num_pics); ++ printf("\n"); ++ printf(" frame_ms: %5u\n", h->frame_ms); ++ printf("\n"); ++} ++ ++static void dump_pic_header(struct splash_pic_header *ph) ++{ ++ printf(" --- Picture header ---\n"); ++ printf("\n"); ++ printf(" width: %5u\n", ph->width); ++ printf(" height: %5u\n", ph->height); ++ printf("\n"); ++ printf(" num_blobs: %5u\n", ph->num_blobs); ++ printf("\n"); ++ printf(" position: %0x3x\n", ph->position); ++ printf(" position_offset: %5u\n", ph->position_offset); ++ printf("\n"); ++ printf(" anim_type: %5u\n", ph->anim_type); ++ printf(" anim_loop: %5u\n", ph->anim_loop); ++ printf("\n"); ++} ++ ++static void dump_blob(struct blob_entry *b) ++{ ++ printf(" --- Blob header ---\n"); ++ printf("\n"); ++ printf(" length: %7u\n", b->header.length); ++ printf(" type: %7u\n", b->header.type); ++ printf("\n"); ++ printf(" picture_id: %7u\n", b->header.picture_id); ++ printf("\n"); ++} ++ ++ ++#define OPT_MAX(var, max) \ ++ do { \ ++ if ((var) > max) { \ ++ fprintf(stderr, "--%s: Invalid value\n", \ ++ long_options[option_index].name); \ ++ break; \ ++ } \ ++ } while (0) ++ ++static struct option long_options[] = { ++ {"help", 0, 0, 'h'}, ++ {"bg_red", 1, 0, 10001}, ++ {"bg_green", 1, 0, 10002}, ++ {"bg_blue", 1, 0, 10003}, ++ {"bg_reserved", 1, 0, 10004}, ++ {"frame_ms", 1, 0, 10005}, ++ {"picture", 0, 0, 20000}, ++ {"pic_width", 1, 0, 20001}, ++ {"pic_height", 1, 0, 20002}, ++ {"pic_position", 1, 0, 20003}, ++ {"pic_position_offset", 1, 0, 20004}, ++ {"pic_anim_type", 1, 0, 20005}, ++ {"pic_anim_loop", 1, 0, 20006}, ++ {"blob", 1, 0, 30000}, ++ {"blob_type", 1, 0, 30001}, ++ {"blob_picture_id", 1, 0, 30002}, ++ {NULL, 0, NULL, 0} ++}; ++ ++ ++int main(int argc, char **argv) ++{ ++ FILE *of; ++ char *ofn; ++ int c; ++ int option_index = 0; ++ ++ unsigned long ul; ++ struct splash_file_header fh = {}; ++ struct splash_pic_header ph[255]; ++ struct blob_entry *blob_first = NULL; ++ struct blob_entry *blob_last = NULL; ++ struct blob_entry *blob_cur = NULL; ++ ++ if (argc < 2) { ++ print_help(argv[0]); ++ return EXIT_FAILURE; ++ } ++ ++ ++ /* Parse and and execute user commands */ ++ while ((c = getopt_long(argc, argv, "h", ++ long_options, &option_index)) != -1) { ++ switch (c) { ++ case 10001: /* bg_red */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ fh.bg_red = ul; ++ break; ++ case 10002: /* bg_green */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ fh.bg_green = ul; ++ break; ++ case 10003: /* bg_blue */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ fh.bg_blue = ul; ++ break; ++ case 10004: /* bg_reserved */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ fh.bg_reserved = ul; ++ break; ++ case 10005: /* frame_ms */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 65535); ++ fh.frame_ms = ul; ++ break; ++ ++ ++ case 20000: /* picture */ ++ if (fh.num_pics >= 255) { ++ fprintf(stderr, "--%s: Picture array full\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ fh.num_pics++; ++ break; ++ ++ case 20001: /* pic_width */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 65535); ++ ph[fh.num_pics - 1].width = ul; ++ break; ++ ++ case 20002: /* pic_height */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 65535); ++ ph[fh.num_pics - 1].height = ul; ++ break; ++ ++ case 20003: /* pic_position */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ ph[fh.num_pics - 1].position = ul; ++ break; ++ ++ case 20004: /* pic_position_offset */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ ph[fh.num_pics - 1].position_offset = ul; ++ break; ++ ++ case 20005: /* pic_anim_type */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ ph[fh.num_pics - 1].anim_type = ul; ++ break; ++ ++ case 20006: /* pic_anim_loop */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ ph[fh.num_pics - 1].anim_loop = ul; ++ break; ++ ++ ++ case 30000: /* blob */ ++ if (fh.num_blobs >= 65535) { ++ fprintf(stderr, "--%s: Blob array full\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ blob_cur = calloc(1, sizeof(struct blob_entry)); ++ if (!blob_cur) { ++ fprintf(stderr, "--%s: Out of memory\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ blob_cur->fn = optarg; ++ if (fh.num_pics) ++ blob_cur->header.picture_id = fh.num_pics - 1; ++ ++ if (!blob_first) ++ blob_first = blob_cur; ++ if (blob_last) ++ blob_last->next = blob_cur; ++ blob_last = blob_cur; ++ fh.num_blobs++; ++ break; ++ ++ case 30001: /* blob_type */ ++ if (!blob_cur) { ++ fprintf(stderr, "--%s: No blob selected\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ blob_cur->header.type = ul; ++ break; ++ ++ case 30002: /* blob_picture_id */ ++ if (!blob_cur) { ++ fprintf(stderr, "--%s: No blob selected\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ blob_cur->header.picture_id = ul; ++ break; ++ ++ ++ ++ case 'h': ++ case '?': ++ default: ++ print_help(argv[0]); ++ goto EXIT; ++ } /* switch (c) */ ++ } /* while ((c = getopt_long(...)) != -1) */ ++ ++ /* Consume and drop lone arguments */ ++ while (optind < argc) { ++ ofn = argv[optind]; ++ optind++; ++ } ++ ++ ++ /* Read file lengths */ ++ for (blob_cur = blob_first; blob_cur; blob_cur = blob_cur->next) { ++ FILE *f; ++ long pos; ++ int i; ++ ++ if (!blob_cur->fn) ++ continue; ++ ++ f = fopen(blob_cur->fn, "rb"); ++ if (!f) ++ goto ERR_FILE_LEN; ++ ++ if (fseek(f, 0, SEEK_END)) ++ goto ERR_FILE_LEN; ++ ++ pos = ftell(f); ++ if (pos < 0 || pos > (1 << 30)) ++ goto ERR_FILE_LEN; ++ ++ blob_cur->header.length = pos; ++ ++ fclose(f); ++ continue; ++ ++ERR_FILE_LEN: ++ fprintf(stderr, "Error getting file length (or too long): %s\n", ++ blob_cur->fn); ++ if (f) ++ fclose(f); ++ continue; ++ } ++ ++ ++ /* Set magic headers */ ++#if __BYTE_ORDER == __BIG_ENDIAN ++ memcpy(&fh.id[0], BOOTSPLASH_MAGIC_BE, 16); ++#elif __BYTE_ORDER == __LITTLE_ENDIAN ++ memcpy(&fh.id[0], BOOTSPLASH_MAGIC_LE, 16); ++#else ++#error ++#endif ++ fh.version = BOOTSPLASH_VERSION; ++ ++ /* Set blob counts */ ++ for (blob_cur = blob_first; blob_cur; blob_cur = blob_cur->next) { ++ if (blob_cur->header.picture_id < fh.num_pics) ++ ph[blob_cur->header.picture_id].num_blobs++; ++ } ++ ++ ++ /* Dump structs */ ++ dump_file_header(&fh); ++ ++ for (ul = 0; ul < fh.num_pics; ul++) ++ dump_pic_header(&ph[ul]); ++ ++ for (blob_cur = blob_first; blob_cur; blob_cur = blob_cur->next) ++ dump_blob(blob_cur); ++ ++ ++ /* Write to file */ ++ printf("Writing splash to file: %s\n", ofn); ++ of = fopen(ofn, "wb"); ++ if (!of) ++ goto ERR_WRITING; ++ ++ if (fwrite(&fh, sizeof(struct splash_file_header), 1, of) != 1) ++ goto ERR_WRITING; ++ ++ for (ul = 0; ul < fh.num_pics; ul++) { ++ if (fwrite(&ph[ul], sizeof(struct splash_pic_header), 1, of) ++ != 1) ++ goto ERR_WRITING; ++ } ++ ++ blob_cur = blob_first; ++ while (blob_cur) { ++ struct blob_entry *blob_old = blob_cur; ++ FILE *f; ++ char *buf[256]; ++ uint32_t left; ++ ++ if (fwrite(&blob_cur->header, ++ sizeof(struct splash_blob_header), 1, of) != 1) ++ goto ERR_WRITING; ++ ++ if (!blob_cur->header.length || !blob_cur->fn) ++ continue; ++ ++ f = fopen(blob_cur->fn, "rb"); ++ if (!f) ++ goto ERR_FILE_COPY; ++ ++ left = blob_cur->header.length; ++ while (left >= sizeof(buf)) { ++ if (fread(buf, sizeof(buf), 1, f) != 1) ++ goto ERR_FILE_COPY; ++ if (fwrite(buf, sizeof(buf), 1, of) != 1) ++ goto ERR_FILE_COPY; ++ left -= sizeof(buf); ++ } ++ if (left) { ++ if (fread(buf, left, 1, f) != 1) ++ goto ERR_FILE_COPY; ++ if (fwrite(buf, left, 1, of) != 1) ++ goto ERR_FILE_COPY; ++ } ++ ++ /* Pad data stream to 16 bytes */ ++ if (left % 16) { ++ if (fwrite("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", ++ 16 - (left % 16), 1, of) != 1) ++ goto ERR_FILE_COPY; ++ } ++ ++ fclose(f); ++ blob_cur = blob_cur->next; ++ free(blob_old); ++ continue; ++ ++ERR_FILE_COPY: ++ if (f) ++ fclose(f); ++ goto ERR_WRITING; ++ } ++ ++ fclose(of); ++ ++EXIT: ++ return EXIT_SUCCESS; ++ ++ ++ERR_WRITING: ++ fprintf(stderr, "Error writing splash.\n"); ++ fprintf(stderr, "The output file is probably corrupt.\n"); ++ if (of) ++ fclose(of); ++ ++ while (blob_cur) { ++ struct blob_entry *blob_old = blob_cur; ++ ++ blob_cur = blob_cur->next; ++ free(blob_old); ++ } ++ ++ return EXIT_FAILURE; ++}