mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 09:02:06 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/wwan/mhi_wwan_mbim.c - drop the extra arg. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
97c78d0af5
204 changed files with 1109 additions and 1029 deletions
|
@ -24,10 +24,10 @@ allOf:
|
||||||
select:
|
select:
|
||||||
properties:
|
properties:
|
||||||
compatible:
|
compatible:
|
||||||
items:
|
contains:
|
||||||
- enum:
|
enum:
|
||||||
- sifive,fu540-c000-ccache
|
- sifive,fu540-c000-ccache
|
||||||
- sifive,fu740-c000-ccache
|
- sifive,fu740-c000-ccache
|
||||||
|
|
||||||
required:
|
required:
|
||||||
- compatible
|
- compatible
|
||||||
|
|
21
MAINTAINERS
21
MAINTAINERS
|
@ -3866,6 +3866,16 @@ L: bcm-kernel-feedback-list@broadcom.com
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/mtd/nand/raw/brcmnand/
|
F: drivers/mtd/nand/raw/brcmnand/
|
||||||
|
|
||||||
|
BROADCOM STB PCIE DRIVER
|
||||||
|
M: Jim Quinlan <jim2101024@gmail.com>
|
||||||
|
M: Nicolas Saenz Julienne <nsaenz@kernel.org>
|
||||||
|
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||||
|
M: bcm-kernel-feedback-list@broadcom.com
|
||||||
|
L: linux-pci@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
|
||||||
|
F: drivers/pci/controller/pcie-brcmstb.c
|
||||||
|
|
||||||
BROADCOM SYSTEMPORT ETHERNET DRIVER
|
BROADCOM SYSTEMPORT ETHERNET DRIVER
|
||||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||||
L: bcm-kernel-feedback-list@broadcom.com
|
L: bcm-kernel-feedback-list@broadcom.com
|
||||||
|
@ -4498,7 +4508,7 @@ L: clang-built-linux@googlegroups.com
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://clangbuiltlinux.github.io/
|
W: https://clangbuiltlinux.github.io/
|
||||||
B: https://github.com/ClangBuiltLinux/linux/issues
|
B: https://github.com/ClangBuiltLinux/linux/issues
|
||||||
C: irc://chat.freenode.net/clangbuiltlinux
|
C: irc://irc.libera.chat/clangbuiltlinux
|
||||||
F: Documentation/kbuild/llvm.rst
|
F: Documentation/kbuild/llvm.rst
|
||||||
F: include/linux/compiler-clang.h
|
F: include/linux/compiler-clang.h
|
||||||
F: scripts/clang-tools/
|
F: scripts/clang-tools/
|
||||||
|
@ -6952,7 +6962,7 @@ F: include/uapi/linux/mdio.h
|
||||||
F: include/uapi/linux/mii.h
|
F: include/uapi/linux/mii.h
|
||||||
|
|
||||||
EXFAT FILE SYSTEM
|
EXFAT FILE SYSTEM
|
||||||
M: Namjae Jeon <namjae.jeon@samsung.com>
|
M: Namjae Jeon <linkinjeon@kernel.org>
|
||||||
M: Sungjong Seo <sj1557.seo@samsung.com>
|
M: Sungjong Seo <sj1557.seo@samsung.com>
|
||||||
L: linux-fsdevel@vger.kernel.org
|
L: linux-fsdevel@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -14457,6 +14467,13 @@ S: Maintained
|
||||||
F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
|
F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
|
||||||
F: drivers/pci/controller/dwc/pcie-histb.c
|
F: drivers/pci/controller/dwc/pcie-histb.c
|
||||||
|
|
||||||
|
PCIE DRIVER FOR INTEL LGM GW SOC
|
||||||
|
M: Rahul Tanwar <rtanwar@maxlinear.com>
|
||||||
|
L: linux-pci@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
|
||||||
|
F: drivers/pci/controller/dwc/pcie-intel-gw.c
|
||||||
|
|
||||||
PCIE DRIVER FOR MEDIATEK
|
PCIE DRIVER FOR MEDIATEK
|
||||||
M: Ryder Lee <ryder.lee@mediatek.com>
|
M: Ryder Lee <ryder.lee@mediatek.com>
|
||||||
M: Jianjun Wang <jianjun.wang@mediatek.com>
|
M: Jianjun Wang <jianjun.wang@mediatek.com>
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 14
|
PATCHLEVEL = 14
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Opossums on Parade
|
NAME = Opossums on Parade
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -15,8 +15,6 @@ CONFIG_SLAB=y
|
||||||
CONFIG_ARCH_NOMADIK=y
|
CONFIG_ARCH_NOMADIK=y
|
||||||
CONFIG_MACH_NOMADIK_8815NHK=y
|
CONFIG_MACH_NOMADIK_8815NHK=y
|
||||||
CONFIG_AEABI=y
|
CONFIG_AEABI=y
|
||||||
CONFIG_ZBOOT_ROM_TEXT=0x0
|
|
||||||
CONFIG_ZBOOT_ROM_BSS=0x0
|
|
||||||
CONFIG_MODULES=y
|
CONFIG_MODULES=y
|
||||||
CONFIG_MODULE_UNLOAD=y
|
CONFIG_MODULE_UNLOAD=y
|
||||||
# CONFIG_BLK_DEV_BSG is not set
|
# CONFIG_BLK_DEV_BSG is not set
|
||||||
|
@ -52,9 +50,9 @@ CONFIG_MTD_BLOCK=y
|
||||||
CONFIG_MTD_ONENAND=y
|
CONFIG_MTD_ONENAND=y
|
||||||
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
|
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
|
||||||
CONFIG_MTD_ONENAND_GENERIC=y
|
CONFIG_MTD_ONENAND_GENERIC=y
|
||||||
CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
|
|
||||||
CONFIG_MTD_RAW_NAND=y
|
CONFIG_MTD_RAW_NAND=y
|
||||||
CONFIG_MTD_NAND_FSMC=y
|
CONFIG_MTD_NAND_FSMC=y
|
||||||
|
CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
|
||||||
CONFIG_BLK_DEV_LOOP=y
|
CONFIG_BLK_DEV_LOOP=y
|
||||||
CONFIG_BLK_DEV_CRYPTOLOOP=y
|
CONFIG_BLK_DEV_CRYPTOLOOP=y
|
||||||
CONFIG_BLK_DEV_RAM=y
|
CONFIG_BLK_DEV_RAM=y
|
||||||
|
@ -97,6 +95,7 @@ CONFIG_REGULATOR=y
|
||||||
CONFIG_DRM=y
|
CONFIG_DRM=y
|
||||||
CONFIG_DRM_PANEL_TPO_TPG110=y
|
CONFIG_DRM_PANEL_TPO_TPG110=y
|
||||||
CONFIG_DRM_PL111=y
|
CONFIG_DRM_PL111=y
|
||||||
|
CONFIG_FB=y
|
||||||
CONFIG_BACKLIGHT_CLASS_DEVICE=y
|
CONFIG_BACKLIGHT_CLASS_DEVICE=y
|
||||||
CONFIG_BACKLIGHT_PWM=y
|
CONFIG_BACKLIGHT_PWM=y
|
||||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||||
|
@ -136,9 +135,8 @@ CONFIG_NLS_ISO8859_15=y
|
||||||
CONFIG_CRYPTO_MD5=y
|
CONFIG_CRYPTO_MD5=y
|
||||||
CONFIG_CRYPTO_SHA1=y
|
CONFIG_CRYPTO_SHA1=y
|
||||||
CONFIG_CRYPTO_DES=y
|
CONFIG_CRYPTO_DES=y
|
||||||
|
# CONFIG_DEBUG_BUGVERBOSE is not set
|
||||||
CONFIG_DEBUG_INFO=y
|
CONFIG_DEBUG_INFO=y
|
||||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
|
||||||
CONFIG_DEBUG_FS=y
|
CONFIG_DEBUG_FS=y
|
||||||
# CONFIG_SCHED_DEBUG is not set
|
# CONFIG_SCHED_DEBUG is not set
|
||||||
# CONFIG_DEBUG_PREEMPT is not set
|
# CONFIG_DEBUG_PREEMPT is not set
|
||||||
# CONFIG_DEBUG_BUGVERBOSE is not set
|
|
||||||
|
|
|
@ -218,30 +218,30 @@
|
||||||
/*
|
/*
|
||||||
* PCI Control/Status Registers
|
* PCI Control/Status Registers
|
||||||
*/
|
*/
|
||||||
#define IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
|
#define _IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
|
||||||
|
|
||||||
#define PCI_NP_AD IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
|
#define PCI_NP_AD _IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
|
||||||
#define PCI_NP_CBE IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
|
#define PCI_NP_CBE _IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
|
||||||
#define PCI_NP_WDATA IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
|
#define PCI_NP_WDATA _IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
|
||||||
#define PCI_NP_RDATA IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
|
#define PCI_NP_RDATA _IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
|
||||||
#define PCI_CRP_AD_CBE IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
|
#define PCI_CRP_AD_CBE _IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
|
||||||
#define PCI_CRP_WDATA IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
|
#define PCI_CRP_WDATA _IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
|
||||||
#define PCI_CRP_RDATA IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
|
#define PCI_CRP_RDATA _IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
|
||||||
#define PCI_CSR IXP4XX_PCI_CSR(PCI_CSR_OFFSET)
|
#define PCI_CSR _IXP4XX_PCI_CSR(PCI_CSR_OFFSET)
|
||||||
#define PCI_ISR IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
|
#define PCI_ISR _IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
|
||||||
#define PCI_INTEN IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
|
#define PCI_INTEN _IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
|
||||||
#define PCI_DMACTRL IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
|
#define PCI_DMACTRL _IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
|
||||||
#define PCI_AHBMEMBASE IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
|
#define PCI_AHBMEMBASE _IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
|
||||||
#define PCI_AHBIOBASE IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
|
#define PCI_AHBIOBASE _IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
|
||||||
#define PCI_PCIMEMBASE IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
|
#define PCI_PCIMEMBASE _IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
|
||||||
#define PCI_AHBDOORBELL IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
|
#define PCI_AHBDOORBELL _IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
|
||||||
#define PCI_PCIDOORBELL IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
|
#define PCI_PCIDOORBELL _IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
|
||||||
#define PCI_ATPDMA0_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
|
#define PCI_ATPDMA0_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
|
||||||
#define PCI_ATPDMA0_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
|
#define PCI_ATPDMA0_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
|
||||||
#define PCI_ATPDMA0_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
|
#define PCI_ATPDMA0_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
|
||||||
#define PCI_ATPDMA1_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
|
#define PCI_ATPDMA1_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
|
||||||
#define PCI_ATPDMA1_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
|
#define PCI_ATPDMA1_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
|
||||||
#define PCI_ATPDMA1_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
|
#define PCI_ATPDMA1_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PCI register values and bit definitions
|
* PCI register values and bit definitions
|
||||||
|
|
|
@ -156,6 +156,7 @@ config ARM64
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
select HAVE_ARCH_MMAP_RND_BITS
|
select HAVE_ARCH_MMAP_RND_BITS
|
||||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||||
|
select HAVE_ARCH_PFN_VALID
|
||||||
select HAVE_ARCH_PREL32_RELOCATIONS
|
select HAVE_ARCH_PREL32_RELOCATIONS
|
||||||
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
|
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
|
||||||
select HAVE_ARCH_SECCOMP_FILTER
|
select HAVE_ARCH_SECCOMP_FILTER
|
||||||
|
|
|
@ -183,6 +183,8 @@ endif
|
||||||
# We use MRPROPER_FILES and CLEAN_FILES now
|
# We use MRPROPER_FILES and CLEAN_FILES now
|
||||||
archclean:
|
archclean:
|
||||||
$(Q)$(MAKE) $(clean)=$(boot)
|
$(Q)$(MAKE) $(clean)=$(boot)
|
||||||
|
$(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso
|
||||||
|
$(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32
|
||||||
|
|
||||||
ifeq ($(KBUILD_EXTMOD),)
|
ifeq ($(KBUILD_EXTMOD),)
|
||||||
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
|
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-only
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
/* Copyright (c) 2015, LGE Inc. All rights reserved.
|
/* Copyright (c) 2015, LGE Inc. All rights reserved.
|
||||||
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||||
|
* Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/dts-v1/;
|
/dts-v1/;
|
||||||
|
@ -9,6 +10,9 @@
|
||||||
#include "pm8994.dtsi"
|
#include "pm8994.dtsi"
|
||||||
#include "pmi8994.dtsi"
|
#include "pmi8994.dtsi"
|
||||||
|
|
||||||
|
/* cont_splash_mem has different memory mapping */
|
||||||
|
/delete-node/ &cont_splash_mem;
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "LG Nexus 5X";
|
model = "LG Nexus 5X";
|
||||||
compatible = "lg,bullhead", "qcom,msm8992";
|
compatible = "lg,bullhead", "qcom,msm8992";
|
||||||
|
@ -17,6 +21,9 @@
|
||||||
qcom,board-id = <0xb64 0>;
|
qcom,board-id = <0xb64 0>;
|
||||||
qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
|
qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
|
||||||
|
|
||||||
|
/* Bullhead firmware doesn't support PSCI */
|
||||||
|
/delete-node/ psci;
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
serial0 = &blsp1_uart2;
|
serial0 = &blsp1_uart2;
|
||||||
};
|
};
|
||||||
|
@ -38,6 +45,11 @@
|
||||||
ftrace-size = <0x10000>;
|
ftrace-size = <0x10000>;
|
||||||
pmsg-size = <0x20000>;
|
pmsg-size = <0x20000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
cont_splash_mem: memory@3400000 {
|
||||||
|
reg = <0 0x03400000 0 0x1200000>;
|
||||||
|
no-map;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,16 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-only
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
/* Copyright (c) 2015, Huawei Inc. All rights reserved.
|
/* Copyright (c) 2015, Huawei Inc. All rights reserved.
|
||||||
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||||
|
* Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/dts-v1/;
|
/dts-v1/;
|
||||||
|
|
||||||
#include "msm8994.dtsi"
|
#include "msm8994.dtsi"
|
||||||
|
|
||||||
|
/* Angler's firmware does not report where the memory is allocated */
|
||||||
|
/delete-node/ &cont_splash_mem;
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "Huawei Nexus 6P";
|
model = "Huawei Nexus 6P";
|
||||||
compatible = "huawei,angler", "qcom,msm8994";
|
compatible = "huawei,angler", "qcom,msm8994";
|
||||||
|
|
|
@ -200,7 +200,7 @@
|
||||||
&BIG_CPU_SLEEP_1
|
&BIG_CPU_SLEEP_1
|
||||||
&CLUSTER_SLEEP_0>;
|
&CLUSTER_SLEEP_0>;
|
||||||
next-level-cache = <&L2_700>;
|
next-level-cache = <&L2_700>;
|
||||||
qcom,freq-domain = <&cpufreq_hw 1>;
|
qcom,freq-domain = <&cpufreq_hw 2>;
|
||||||
#cooling-cells = <2>;
|
#cooling-cells = <2>;
|
||||||
L2_700: l2-cache {
|
L2_700: l2-cache {
|
||||||
compatible = "cache";
|
compatible = "cache";
|
||||||
|
|
|
@ -69,7 +69,7 @@
|
||||||
};
|
};
|
||||||
rmtfs_upper_guard: memory@f5d01000 {
|
rmtfs_upper_guard: memory@f5d01000 {
|
||||||
no-map;
|
no-map;
|
||||||
reg = <0 0xf5d01000 0 0x2000>;
|
reg = <0 0xf5d01000 0 0x1000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -78,7 +78,7 @@
|
||||||
*/
|
*/
|
||||||
removed_region: memory@88f00000 {
|
removed_region: memory@88f00000 {
|
||||||
no-map;
|
no-map;
|
||||||
reg = <0 0x88f00000 0 0x200000>;
|
reg = <0 0x88f00000 0 0x1c00000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
ramoops: ramoops@ac300000 {
|
ramoops: ramoops@ac300000 {
|
||||||
|
|
|
@ -700,7 +700,7 @@
|
||||||
left_spkr: wsa8810-left{
|
left_spkr: wsa8810-left{
|
||||||
compatible = "sdw10217211000";
|
compatible = "sdw10217211000";
|
||||||
reg = <0 3>;
|
reg = <0 3>;
|
||||||
powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
|
powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
|
||||||
#thermal-sensor-cells = <0>;
|
#thermal-sensor-cells = <0>;
|
||||||
sound-name-prefix = "SpkrLeft";
|
sound-name-prefix = "SpkrLeft";
|
||||||
#sound-dai-cells = <0>;
|
#sound-dai-cells = <0>;
|
||||||
|
@ -708,7 +708,7 @@
|
||||||
|
|
||||||
right_spkr: wsa8810-right{
|
right_spkr: wsa8810-right{
|
||||||
compatible = "sdw10217211000";
|
compatible = "sdw10217211000";
|
||||||
powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>;
|
powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
|
||||||
reg = <0 4>;
|
reg = <0 4>;
|
||||||
#thermal-sensor-cells = <0>;
|
#thermal-sensor-cells = <0>;
|
||||||
sound-name-prefix = "SpkrRight";
|
sound-name-prefix = "SpkrRight";
|
||||||
|
|
|
@ -33,8 +33,7 @@
|
||||||
* EL2.
|
* EL2.
|
||||||
*/
|
*/
|
||||||
.macro __init_el2_timers
|
.macro __init_el2_timers
|
||||||
mrs x0, cnthctl_el2
|
mov x0, #3 // Enable EL1 physical timers
|
||||||
orr x0, x0, #3 // Enable EL1 physical timers
|
|
||||||
msr cnthctl_el2, x0
|
msr cnthctl_el2, x0
|
||||||
msr cntvoff_el2, xzr // Clear virtual offset
|
msr cntvoff_el2, xzr // Clear virtual offset
|
||||||
.endm
|
.endm
|
||||||
|
|
|
@ -41,6 +41,7 @@ void tag_clear_highpage(struct page *to);
|
||||||
|
|
||||||
typedef struct page *pgtable_t;
|
typedef struct page *pgtable_t;
|
||||||
|
|
||||||
|
int pfn_valid(unsigned long pfn);
|
||||||
int pfn_is_map_memory(unsigned long pfn);
|
int pfn_is_map_memory(unsigned long pfn);
|
||||||
|
|
||||||
#include <asm/memory.h>
|
#include <asm/memory.h>
|
||||||
|
|
|
@ -219,6 +219,43 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
||||||
free_area_init(max_zone_pfns);
|
free_area_init(max_zone_pfns);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int pfn_valid(unsigned long pfn)
|
||||||
|
{
|
||||||
|
phys_addr_t addr = PFN_PHYS(pfn);
|
||||||
|
struct mem_section *ms;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure the upper PAGE_SHIFT bits are clear in the
|
||||||
|
* pfn. Else it might lead to false positives when
|
||||||
|
* some of the upper bits are set, but the lower bits
|
||||||
|
* match a valid pfn.
|
||||||
|
*/
|
||||||
|
if (PHYS_PFN(addr) != pfn)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ms = __pfn_to_section(pfn);
|
||||||
|
if (!valid_section(ms))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ZONE_DEVICE memory does not have the memblock entries.
|
||||||
|
* memblock_is_map_memory() check for ZONE_DEVICE based
|
||||||
|
* addresses will always fail. Even the normal hotplugged
|
||||||
|
* memory will never have MEMBLOCK_NOMAP flag set in their
|
||||||
|
* memblock entries. Skip memblock search for all non early
|
||||||
|
* memory sections covering all of hotplug memory including
|
||||||
|
* both normal and ZONE_DEVICE based.
|
||||||
|
*/
|
||||||
|
if (!early_section(ms))
|
||||||
|
return pfn_section_valid(ms, pfn);
|
||||||
|
|
||||||
|
return memblock_is_memory(addr);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pfn_valid);
|
||||||
|
|
||||||
int pfn_is_map_memory(unsigned long pfn)
|
int pfn_is_map_memory(unsigned long pfn)
|
||||||
{
|
{
|
||||||
phys_addr_t addr = PFN_PHYS(pfn);
|
phys_addr_t addr = PFN_PHYS(pfn);
|
||||||
|
|
|
@ -4,6 +4,8 @@
|
||||||
|
|
||||||
#include <asm/bug.h>
|
#include <asm/bug.h>
|
||||||
#include <asm/book3s/32/mmu-hash.h>
|
#include <asm/book3s/32/mmu-hash.h>
|
||||||
|
#include <asm/mmu.h>
|
||||||
|
#include <asm/synch.h>
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
@ -28,6 +30,15 @@ static inline void kuep_lock(void)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
update_user_segments(mfsr(0) | SR_NX);
|
update_user_segments(mfsr(0) | SR_NX);
|
||||||
|
/*
|
||||||
|
* This isync() shouldn't be necessary as the kernel is not excepted to
|
||||||
|
* run any instruction in userspace soon after the update of segments,
|
||||||
|
* but hash based cores (at least G3) seem to exhibit a random
|
||||||
|
* behaviour when the 'isync' is not there. 603 cores don't have this
|
||||||
|
* behaviour so don't do the 'isync' as it saves several CPU cycles.
|
||||||
|
*/
|
||||||
|
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
|
||||||
|
isync(); /* Context sync required after mtsr() */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kuep_unlock(void)
|
static inline void kuep_unlock(void)
|
||||||
|
@ -36,6 +47,15 @@ static inline void kuep_unlock(void)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
update_user_segments(mfsr(0) & ~SR_NX);
|
update_user_segments(mfsr(0) & ~SR_NX);
|
||||||
|
/*
|
||||||
|
* This isync() shouldn't be necessary as a 'rfi' will soon be executed
|
||||||
|
* to return to userspace, but hash based cores (at least G3) seem to
|
||||||
|
* exhibit a random behaviour when the 'isync' is not there. 603 cores
|
||||||
|
* don't have this behaviour so don't do the 'isync' as it saves several
|
||||||
|
* CPU cycles.
|
||||||
|
*/
|
||||||
|
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
|
||||||
|
isync(); /* Context sync required after mtsr() */
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_KUAP
|
#ifdef CONFIG_PPC_KUAP
|
||||||
|
|
|
@ -18,16 +18,12 @@
|
||||||
/*
|
/*
|
||||||
* Updates the attributes of a page in three steps:
|
* Updates the attributes of a page in three steps:
|
||||||
*
|
*
|
||||||
* 1. invalidate the page table entry
|
* 1. take the page_table_lock
|
||||||
* 2. flush the TLB
|
* 2. install the new entry with the updated attributes
|
||||||
* 3. install the new entry with the updated attributes
|
* 3. flush the TLB
|
||||||
*
|
|
||||||
* Invalidating the pte means there are situations where this will not work
|
|
||||||
* when in theory it should.
|
|
||||||
* For example:
|
|
||||||
* - removing write from page whilst it is being executed
|
|
||||||
* - setting a page read-only whilst it is being read by another CPU
|
|
||||||
*
|
*
|
||||||
|
* This sequence is safe against concurrent updates, and also allows updating the
|
||||||
|
* attributes of a page currently being executed or accessed.
|
||||||
*/
|
*/
|
||||||
static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
|
static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
|
||||||
{
|
{
|
||||||
|
@ -36,9 +32,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
|
||||||
|
|
||||||
spin_lock(&init_mm.page_table_lock);
|
spin_lock(&init_mm.page_table_lock);
|
||||||
|
|
||||||
/* invalidate the PTE so it's safe to modify */
|
pte = ptep_get(ptep);
|
||||||
pte = ptep_get_and_clear(&init_mm, addr, ptep);
|
|
||||||
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
|
||||||
|
|
||||||
/* modify the PTE bits as desired, then apply */
|
/* modify the PTE bits as desired, then apply */
|
||||||
switch (action) {
|
switch (action) {
|
||||||
|
@ -59,11 +53,14 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
set_pte_at(&init_mm, addr, ptep, pte);
|
pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);
|
||||||
|
|
||||||
/* See ptesync comment in radix__set_pte_at() */
|
/* See ptesync comment in radix__set_pte_at() */
|
||||||
if (radix_enabled())
|
if (radix_enabled())
|
||||||
asm volatile("ptesync": : :"memory");
|
asm volatile("ptesync": : :"memory");
|
||||||
|
|
||||||
|
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
||||||
|
|
||||||
spin_unlock(&init_mm.page_table_lock);
|
spin_unlock(&init_mm.page_table_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1170,7 +1170,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init xive_request_ipi(unsigned int cpu)
|
static int xive_request_ipi(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
|
struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
|
@ -229,8 +229,8 @@ static void __init init_resources(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clean-up any unused pre-allocated resources */
|
/* Clean-up any unused pre-allocated resources */
|
||||||
mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
|
if (res_idx >= 0)
|
||||||
memblock_free(__pa(mem_res), mem_res_sz);
|
memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res));
|
||||||
return;
|
return;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
|
|
@ -560,9 +560,12 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
|
||||||
|
|
||||||
int pcibios_add_device(struct pci_dev *pdev)
|
int pcibios_add_device(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
|
struct zpci_dev *zdev = to_zpci(pdev);
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/* The pdev has a reference to the zdev via its bus */
|
||||||
|
zpci_zdev_get(zdev);
|
||||||
if (pdev->is_physfn)
|
if (pdev->is_physfn)
|
||||||
pdev->no_vf_scan = 1;
|
pdev->no_vf_scan = 1;
|
||||||
|
|
||||||
|
@ -582,7 +585,10 @@ int pcibios_add_device(struct pci_dev *pdev)
|
||||||
|
|
||||||
void pcibios_release_device(struct pci_dev *pdev)
|
void pcibios_release_device(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
|
struct zpci_dev *zdev = to_zpci(pdev);
|
||||||
|
|
||||||
zpci_unmap_resources(pdev);
|
zpci_unmap_resources(pdev);
|
||||||
|
zpci_zdev_put(zdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pcibios_enable_device(struct pci_dev *pdev, int mask)
|
int pcibios_enable_device(struct pci_dev *pdev, int mask)
|
||||||
|
|
|
@ -22,6 +22,11 @@ static inline void zpci_zdev_put(struct zpci_dev *zdev)
|
||||||
kref_put(&zdev->kref, zpci_release_device);
|
kref_put(&zdev->kref, zpci_release_device);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void zpci_zdev_get(struct zpci_dev *zdev)
|
||||||
|
{
|
||||||
|
kref_get(&zdev->kref);
|
||||||
|
}
|
||||||
|
|
||||||
int zpci_alloc_domain(int domain);
|
int zpci_alloc_domain(int domain);
|
||||||
void zpci_free_domain(int domain);
|
void zpci_free_domain(int domain);
|
||||||
int zpci_setup_bus_resources(struct zpci_dev *zdev,
|
int zpci_setup_bus_resources(struct zpci_dev *zdev,
|
||||||
|
|
|
@ -122,7 +122,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
||||||
rq->internal_tag = BLK_MQ_NO_TAG;
|
rq->internal_tag = BLK_MQ_NO_TAG;
|
||||||
rq->start_time_ns = ktime_get_ns();
|
rq->start_time_ns = ktime_get_ns();
|
||||||
rq->part = NULL;
|
rq->part = NULL;
|
||||||
refcount_set(&rq->ref, 1);
|
|
||||||
blk_crypto_rq_set_defaults(rq);
|
blk_crypto_rq_set_defaults(rq);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_rq_init);
|
EXPORT_SYMBOL(blk_rq_init);
|
||||||
|
|
|
@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
||||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_flush_rq(struct request *rq)
|
||||||
|
{
|
||||||
|
return rq->end_io == flush_end_io;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_kick_flush - consider issuing flush request
|
* blk_kick_flush - consider issuing flush request
|
||||||
* @q: request_queue being kicked
|
* @q: request_queue being kicked
|
||||||
|
@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
||||||
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
|
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
|
||||||
flush_rq->rq_disk = first_rq->rq_disk;
|
flush_rq->rq_disk = first_rq->rq_disk;
|
||||||
flush_rq->end_io = flush_end_io;
|
flush_rq->end_io = flush_end_io;
|
||||||
|
/*
|
||||||
|
* Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
|
||||||
|
* implied in refcount_inc_not_zero() called from
|
||||||
|
* blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
|
||||||
|
* and READ flush_rq->end_io
|
||||||
|
*/
|
||||||
|
smp_wmb();
|
||||||
|
refcount_set(&flush_rq->ref, 1);
|
||||||
|
|
||||||
blk_flush_queue_rq(flush_rq, false);
|
blk_flush_queue_rq(flush_rq, false);
|
||||||
}
|
}
|
||||||
|
|
|
@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
|
||||||
|
|
||||||
void blk_mq_put_rq_ref(struct request *rq)
|
void blk_mq_put_rq_ref(struct request *rq)
|
||||||
{
|
{
|
||||||
if (is_flush_rq(rq, rq->mq_hctx))
|
if (is_flush_rq(rq))
|
||||||
rq->end_io(rq, 0);
|
rq->end_io(rq, 0);
|
||||||
else if (refcount_dec_and_test(&rq->ref))
|
else if (refcount_dec_and_test(&rq->ref))
|
||||||
__blk_mq_free_request(rq);
|
__blk_mq_free_request(rq);
|
||||||
|
@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||||
unsigned long *next = priv;
|
unsigned long *next = priv;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Just do a quick check if it is expired before locking the request in
|
* blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
|
||||||
* so we're not unnecessarilly synchronizing across CPUs.
|
* be reallocated underneath the timeout handler's processing, then
|
||||||
*/
|
* the expire check is reliable. If the request is not expired, then
|
||||||
if (!blk_mq_req_expired(rq, next))
|
* it was completed and reallocated as a new request after returning
|
||||||
return true;
|
* from blk_mq_check_expired().
|
||||||
|
|
||||||
/*
|
|
||||||
* We have reason to believe the request may be expired. Take a
|
|
||||||
* reference on the request to lock this request lifetime into its
|
|
||||||
* currently allocated context to prevent it from being reallocated in
|
|
||||||
* the event the completion by-passes this timeout handler.
|
|
||||||
*
|
|
||||||
* If the reference was already released, then the driver beat the
|
|
||||||
* timeout handler to posting a natural completion.
|
|
||||||
*/
|
|
||||||
if (!refcount_inc_not_zero(&rq->ref))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The request is now locked and cannot be reallocated underneath the
|
|
||||||
* timeout handler's processing. Re-verify this exact request is truly
|
|
||||||
* expired; if it is not expired, then the request was completed and
|
|
||||||
* reallocated as a new request.
|
|
||||||
*/
|
*/
|
||||||
if (blk_mq_req_expired(rq, next))
|
if (blk_mq_req_expired(rq, next))
|
||||||
blk_mq_rq_timed_out(rq, reserved);
|
blk_mq_rq_timed_out(rq, reserved);
|
||||||
|
|
||||||
blk_mq_put_rq_ref(rq);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q)
|
||||||
kobject_get(&q->kobj);
|
kobject_get(&q->kobj);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
bool is_flush_rq(struct request *req);
|
||||||
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
|
|
||||||
{
|
|
||||||
return hctx->fq->flush_rq == req;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
|
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
|
||||||
gfp_t flags);
|
gfp_t flags);
|
||||||
|
|
|
@ -292,6 +292,12 @@ void __init init_prmt(void)
|
||||||
int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
|
int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
|
||||||
sizeof (struct acpi_table_prmt_header),
|
sizeof (struct acpi_table_prmt_header),
|
||||||
0, acpi_parse_prmt, 0);
|
0, acpi_parse_prmt, 0);
|
||||||
|
/*
|
||||||
|
* Return immediately if PRMT table is not present or no PRM module found.
|
||||||
|
*/
|
||||||
|
if (mc <= 0)
|
||||||
|
return;
|
||||||
|
|
||||||
pr_info("PRM: found %u modules\n", mc);
|
pr_info("PRM: found %u modules\n", mc);
|
||||||
|
|
||||||
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
|
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
|
||||||
|
|
|
@ -452,7 +452,7 @@ int acpi_s2idle_prepare_late(void)
|
||||||
if (lps0_dsm_func_mask_microsoft > 0) {
|
if (lps0_dsm_func_mask_microsoft > 0) {
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
|
||||||
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
|
||||||
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
|
||||||
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
||||||
|
@ -479,7 +479,7 @@ void acpi_s2idle_restore_early(void)
|
||||||
if (lps0_dsm_func_mask_microsoft > 0) {
|
if (lps0_dsm_func_mask_microsoft > 0) {
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
|
||||||
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
|
||||||
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
|
||||||
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
|
||||||
|
|
|
@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
|
||||||
struct image_info *img_info);
|
struct image_info *img_info);
|
||||||
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
|
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
|
||||||
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
|
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
|
||||||
struct mhi_chan *mhi_chan, unsigned int flags);
|
struct mhi_chan *mhi_chan);
|
||||||
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
||||||
struct mhi_chan *mhi_chan);
|
struct mhi_chan *mhi_chan);
|
||||||
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
||||||
|
|
|
@ -1430,7 +1430,7 @@ exit_unprepare_channel:
|
||||||
}
|
}
|
||||||
|
|
||||||
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
|
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
|
||||||
struct mhi_chan *mhi_chan, unsigned int flags)
|
struct mhi_chan *mhi_chan)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct device *dev = &mhi_chan->mhi_dev->dev;
|
struct device *dev = &mhi_chan->mhi_dev->dev;
|
||||||
|
@ -1455,9 +1455,6 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error_pm_state;
|
goto error_pm_state;
|
||||||
|
|
||||||
if (mhi_chan->dir == DMA_FROM_DEVICE)
|
|
||||||
mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
|
|
||||||
|
|
||||||
/* Pre-allocate buffer for xfer ring */
|
/* Pre-allocate buffer for xfer ring */
|
||||||
if (mhi_chan->pre_alloc) {
|
if (mhi_chan->pre_alloc) {
|
||||||
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
|
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
|
||||||
|
@ -1613,7 +1610,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Move channel to start state */
|
/* Move channel to start state */
|
||||||
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
|
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
|
||||||
{
|
{
|
||||||
int ret, dir;
|
int ret, dir;
|
||||||
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
|
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||||
|
@ -1624,7 +1621,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
|
||||||
if (!mhi_chan)
|
if (!mhi_chan)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
|
ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error_open_chan;
|
goto error_open_chan;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3097,8 +3097,10 @@ static int sysc_probe(struct platform_device *pdev)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
error = sysc_check_active_timer(ddata);
|
error = sysc_check_active_timer(ddata);
|
||||||
if (error == -EBUSY)
|
if (error == -ENXIO)
|
||||||
ddata->reserved = true;
|
ddata->reserved = true;
|
||||||
|
else if (error)
|
||||||
|
return error;
|
||||||
|
|
||||||
error = sysc_get_clocks(ddata);
|
error = sysc_get_clocks(ddata);
|
||||||
if (error)
|
if (error)
|
||||||
|
|
|
@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
|
||||||
hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
|
hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
imx_register_uart_clocks(1);
|
imx_register_uart_clocks(2);
|
||||||
}
|
}
|
||||||
CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
|
CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
|
||||||
|
|
|
@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc)
|
||||||
if (on < 0)
|
if (on < 0)
|
||||||
return on;
|
return on;
|
||||||
|
|
||||||
/*
|
if (on) {
|
||||||
* Votable GDSCs can be ON due to Vote from other masters.
|
/* The regulator must be on, sync the kernel state */
|
||||||
* If a Votable GDSC is ON, make sure we have a Vote.
|
if (sc->rsupply) {
|
||||||
*/
|
ret = regulator_enable(sc->rsupply);
|
||||||
if ((sc->flags & VOTABLE) && on)
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Votable GDSCs can be ON due to Vote from other masters.
|
||||||
|
* If a Votable GDSC is ON, make sure we have a Vote.
|
||||||
|
*/
|
||||||
|
if (sc->flags & VOTABLE) {
|
||||||
|
ret = regmap_update_bits(sc->regmap, sc->gdscr,
|
||||||
|
SW_COLLAPSE_MASK, val);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Turn on HW trigger mode if supported */
|
||||||
|
if (sc->flags & HW_CTRL) {
|
||||||
|
ret = gdsc_hwctrl(sc, true);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure the retain bit is set if the GDSC is already on,
|
||||||
|
* otherwise we end up turning off the GDSC and destroying all
|
||||||
|
* the register contents that we thought we were saving.
|
||||||
|
*/
|
||||||
|
if (sc->flags & RETAIN_FF_ENABLE)
|
||||||
|
gdsc_retain_ff_on(sc);
|
||||||
|
} else if (sc->flags & ALWAYS_ON) {
|
||||||
|
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
|
||||||
gdsc_enable(&sc->pd);
|
gdsc_enable(&sc->pd);
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure the retain bit is set if the GDSC is already on, otherwise
|
|
||||||
* we end up turning off the GDSC and destroying all the register
|
|
||||||
* contents that we thought we were saving.
|
|
||||||
*/
|
|
||||||
if ((sc->flags & RETAIN_FF_ENABLE) && on)
|
|
||||||
gdsc_retain_ff_on(sc);
|
|
||||||
|
|
||||||
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
|
|
||||||
if (sc->flags & ALWAYS_ON) {
|
|
||||||
if (!on)
|
|
||||||
gdsc_enable(&sc->pd);
|
|
||||||
on = true;
|
on = true;
|
||||||
sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (on || (sc->pwrsts & PWRSTS_RET))
|
if (on || (sc->pwrsts & PWRSTS_RET))
|
||||||
|
@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc)
|
||||||
else
|
else
|
||||||
gdsc_clear_mem_on(sc);
|
gdsc_clear_mem_on(sc);
|
||||||
|
|
||||||
|
if (sc->flags & ALWAYS_ON)
|
||||||
|
sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
|
||||||
if (!sc->pd.power_off)
|
if (!sc->pd.power_off)
|
||||||
sc->pd.power_off = gdsc_disable;
|
sc->pd.power_off = gdsc_disable;
|
||||||
if (!sc->pd.power_on)
|
if (!sc->pd.power_on)
|
||||||
|
|
|
@ -104,7 +104,11 @@ struct armada_37xx_dvfs {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
|
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
|
||||||
{.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
|
/*
|
||||||
|
* The cpufreq scaling for 1.2 GHz variant of the SOC is currently
|
||||||
|
* unstable because we do not know how to configure it properly.
|
||||||
|
*/
|
||||||
|
/* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
|
||||||
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
|
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
|
||||||
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
|
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
|
||||||
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
|
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
|
||||||
|
|
|
@ -139,7 +139,9 @@ static const struct of_device_id blocklist[] __initconst = {
|
||||||
{ .compatible = "qcom,qcs404", },
|
{ .compatible = "qcom,qcs404", },
|
||||||
{ .compatible = "qcom,sc7180", },
|
{ .compatible = "qcom,sc7180", },
|
||||||
{ .compatible = "qcom,sc7280", },
|
{ .compatible = "qcom,sc7280", },
|
||||||
|
{ .compatible = "qcom,sc8180x", },
|
||||||
{ .compatible = "qcom,sdm845", },
|
{ .compatible = "qcom,sdm845", },
|
||||||
|
{ .compatible = "qcom,sm8150", },
|
||||||
|
|
||||||
{ .compatible = "st,stih407", },
|
{ .compatible = "st,stih407", },
|
||||||
{ .compatible = "st,stih410", },
|
{ .compatible = "st,stih410", },
|
||||||
|
|
|
@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
|
||||||
ret = -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Obtain CPUs that share SCMI performance controls */
|
/* Obtain CPUs that share SCMI performance controls */
|
||||||
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
|
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
|
||||||
|
|
|
@ -3026,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
||||||
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
|
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
|
||||||
start + size - 1, nattr);
|
start + size - 1, nattr);
|
||||||
|
|
||||||
|
/* Flush pending deferred work to avoid racing with deferred actions from
|
||||||
|
* previous memory map changes (e.g. munmap). Concurrent memory map changes
|
||||||
|
* can still race with get_attr because we don't hold the mmap lock. But that
|
||||||
|
* would be a race condition in the application anyway, and undefined
|
||||||
|
* behaviour is acceptable in that case.
|
||||||
|
*/
|
||||||
|
flush_work(&p->svms.deferred_list_work);
|
||||||
|
|
||||||
mmap_read_lock(mm);
|
mmap_read_lock(mm);
|
||||||
if (!svm_range_is_valid(mm, start, size)) {
|
if (!svm_range_is_valid(mm, start, size)) {
|
||||||
pr_debug("invalid range\n");
|
pr_debug("invalid range\n");
|
||||||
|
|
|
@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc)
|
||||||
if (dc->hwss.z10_restore)
|
if (dc->hwss.z10_restore)
|
||||||
dc->hwss.z10_restore(dc);
|
dc->hwss.z10_restore(dc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void dc_z10_save_init(struct dc *dc)
|
||||||
|
{
|
||||||
|
if (dc->hwss.z10_save_init)
|
||||||
|
dc->hwss.z10_save_init(dc);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* Applies given context to HW and copy it into current context.
|
* Applies given context to HW and copy it into current context.
|
||||||
|
|
|
@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
|
||||||
*/
|
*/
|
||||||
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
|
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
|
||||||
dc->vm_pa_config.valid = true;
|
dc->vm_pa_config.valid = true;
|
||||||
|
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||||
|
dc_z10_save_init(dc);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
return num_vmids;
|
return num_vmids;
|
||||||
|
|
|
@ -1338,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
|
||||||
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
|
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
|
||||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||||
void dc_z10_restore(struct dc *dc);
|
void dc_z10_restore(struct dc *dc);
|
||||||
|
void dc_z10_save_init(struct dc *dc);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool dc_enable_dmub_notifications(struct dc *dc);
|
bool dc_enable_dmub_notifications(struct dc *dc);
|
||||||
|
|
|
@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
|
||||||
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
|
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void calculate_wm_set_for_vlevel(
|
|
||||||
int vlevel,
|
|
||||||
struct wm_range_table_entry *table_entry,
|
|
||||||
struct dcn_watermarks *wm_set,
|
|
||||||
struct display_mode_lib *dml,
|
|
||||||
display_e2e_pipe_params_st *pipes,
|
|
||||||
int pipe_cnt)
|
|
||||||
{
|
|
||||||
double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
|
|
||||||
|
|
||||||
ASSERT(vlevel < dml->soc.num_states);
|
|
||||||
/* only pipe 0 is read for voltage and dcf/soc clocks */
|
|
||||||
pipes[0].clks_cfg.voltage = vlevel;
|
|
||||||
pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
|
|
||||||
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
|
|
||||||
|
|
||||||
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
|
|
||||||
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
|
|
||||||
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
|
|
||||||
|
|
||||||
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
|
|
||||||
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
|
|
||||||
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
|
|
||||||
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
|
|
||||||
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
|
|
||||||
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
|
|
||||||
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
|
|
||||||
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
|
|
||||||
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static void dcn301_calculate_wm_and_dlg(
|
|
||||||
struct dc *dc, struct dc_state *context,
|
|
||||||
display_e2e_pipe_params_st *pipes,
|
|
||||||
int pipe_cnt,
|
|
||||||
int vlevel_req)
|
|
||||||
{
|
|
||||||
int i, pipe_idx;
|
|
||||||
int vlevel, vlevel_max;
|
|
||||||
struct wm_range_table_entry *table_entry;
|
|
||||||
struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
|
|
||||||
|
|
||||||
ASSERT(bw_params);
|
|
||||||
|
|
||||||
vlevel_max = bw_params->clk_table.num_entries - 1;
|
|
||||||
|
|
||||||
/* WM Set D */
|
|
||||||
table_entry = &bw_params->wm_table.entries[WM_D];
|
|
||||||
if (table_entry->wm_type == WM_TYPE_RETRAINING)
|
|
||||||
vlevel = 0;
|
|
||||||
else
|
|
||||||
vlevel = vlevel_max;
|
|
||||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
|
|
||||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
|
||||||
/* WM Set C */
|
|
||||||
table_entry = &bw_params->wm_table.entries[WM_C];
|
|
||||||
vlevel = min(max(vlevel_req, 2), vlevel_max);
|
|
||||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
|
|
||||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
|
||||||
/* WM Set B */
|
|
||||||
table_entry = &bw_params->wm_table.entries[WM_B];
|
|
||||||
vlevel = min(max(vlevel_req, 1), vlevel_max);
|
|
||||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
|
|
||||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
|
||||||
|
|
||||||
/* WM Set A */
|
|
||||||
table_entry = &bw_params->wm_table.entries[WM_A];
|
|
||||||
vlevel = min(vlevel_req, vlevel_max);
|
|
||||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
|
|
||||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
|
||||||
|
|
||||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
|
||||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
|
|
||||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
|
||||||
|
|
||||||
if (dc->config.forced_clocks) {
|
|
||||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
|
|
||||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
|
|
||||||
}
|
|
||||||
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
|
|
||||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
|
|
||||||
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
|
|
||||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
|
|
||||||
|
|
||||||
pipe_idx++;
|
|
||||||
}
|
|
||||||
|
|
||||||
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct resource_funcs dcn301_res_pool_funcs = {
|
static struct resource_funcs dcn301_res_pool_funcs = {
|
||||||
.destroy = dcn301_destroy_resource_pool,
|
.destroy = dcn301_destroy_resource_pool,
|
||||||
.link_enc_create = dcn301_link_encoder_create,
|
.link_enc_create = dcn301_link_encoder_create,
|
||||||
.panel_cntl_create = dcn301_panel_cntl_create,
|
.panel_cntl_create = dcn301_panel_cntl_create,
|
||||||
.validate_bandwidth = dcn30_validate_bandwidth,
|
.validate_bandwidth = dcn30_validate_bandwidth,
|
||||||
.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
|
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
|
||||||
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
|
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
|
||||||
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
|
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
|
||||||
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
||||||
|
|
|
@ -404,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
|
||||||
&pipe_ctx->stream_res.encoder_info_frame);
|
&pipe_ctx->stream_res.encoder_info_frame);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
void dcn31_z10_save_init(struct dc *dc)
|
||||||
|
{
|
||||||
|
union dmub_rb_cmd cmd;
|
||||||
|
|
||||||
|
memset(&cmd, 0, sizeof(cmd));
|
||||||
|
cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
|
||||||
|
cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
|
||||||
|
|
||||||
|
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
|
||||||
|
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
|
||||||
|
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
|
||||||
|
}
|
||||||
|
|
||||||
void dcn31_z10_restore(struct dc *dc)
|
void dcn31_z10_restore(struct dc *dc)
|
||||||
{
|
{
|
||||||
|
|
|
@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
|
||||||
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
|
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
|
||||||
|
|
||||||
void dcn31_z10_restore(struct dc *dc);
|
void dcn31_z10_restore(struct dc *dc);
|
||||||
|
void dcn31_z10_save_init(struct dc *dc);
|
||||||
|
|
||||||
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
|
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
|
||||||
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
|
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
|
||||||
|
|
|
@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
|
||||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||||
.set_pipe = dcn21_set_pipe,
|
.set_pipe = dcn21_set_pipe,
|
||||||
.z10_restore = dcn31_z10_restore,
|
.z10_restore = dcn31_z10_restore,
|
||||||
|
.z10_save_init = dcn31_z10_save_init,
|
||||||
.is_abm_supported = dcn31_is_abm_supported,
|
.is_abm_supported = dcn31_is_abm_supported,
|
||||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||||
|
|
|
@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
|
||||||
int width, int height, int offset);
|
int width, int height, int offset);
|
||||||
|
|
||||||
void (*z10_restore)(struct dc *dc);
|
void (*z10_restore)(struct dc *dc);
|
||||||
|
void (*z10_save_init)(struct dc *dc);
|
||||||
|
|
||||||
void (*update_visual_confirm_color)(struct dc *dc,
|
void (*update_visual_confirm_color)(struct dc *dc,
|
||||||
struct pipe_ctx *pipe_ctx,
|
struct pipe_ctx *pipe_ctx,
|
||||||
|
|
|
@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type {
|
||||||
* DCN hardware restore.
|
* DCN hardware restore.
|
||||||
*/
|
*/
|
||||||
DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
|
DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DCN hardware save.
|
||||||
|
*/
|
||||||
|
DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = hwmgr->adev;
|
||||||
|
|
||||||
|
return (adev->pdev->device == 0x6860);
|
||||||
|
}
|
||||||
|
|
||||||
static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
|
static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
|
||||||
{
|
{
|
||||||
struct vega10_hwmgr *data = hwmgr->backend;
|
struct vega10_hwmgr *data = hwmgr->backend;
|
||||||
|
@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
if (vega10_get_power_profile_mode_quirks(hwmgr))
|
||||||
|
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
||||||
|
1 << power_profile_mode,
|
||||||
|
NULL);
|
||||||
|
else
|
||||||
|
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
||||||
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
|
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
hwmgr->power_profile_mode = power_profile_mode;
|
hwmgr->power_profile_mode = power_profile_mode;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
||||||
req.request.sequence = req32.request.sequence;
|
req.request.sequence = req32.request.sequence;
|
||||||
req.request.signal = req32.request.signal;
|
req.request.signal = req32.request.signal;
|
||||||
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
|
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
req32.reply.type = req.reply.type;
|
req32.reply.type = req.reply.type;
|
||||||
req32.reply.sequence = req.reply.sequence;
|
req32.reply.sequence = req.reply.sequence;
|
||||||
|
@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
||||||
if (copy_to_user(argp, &req32, sizeof(req32)))
|
if (copy_to_user(argp, &req32, sizeof(req32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
return 0;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_X86)
|
#if defined(CONFIG_X86)
|
||||||
|
|
|
@ -2463,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Splitter enable for eDP MSO is limited to certain pipes. */
|
||||||
|
static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
if (IS_ALDERLAKE_P(i915))
|
||||||
|
return BIT(PIPE_A) | BIT(PIPE_B);
|
||||||
|
else
|
||||||
|
return BIT(PIPE_A);
|
||||||
|
}
|
||||||
|
|
||||||
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
|
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
|
||||||
struct intel_crtc_state *pipe_config)
|
struct intel_crtc_state *pipe_config)
|
||||||
{
|
{
|
||||||
|
@ -2480,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
|
||||||
if (!pipe_config->splitter.enable)
|
if (!pipe_config->splitter.enable)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Splitter enable is supported for pipe A only. */
|
if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
|
||||||
if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
|
|
||||||
pipe_config->splitter.enable = false;
|
pipe_config->splitter.enable = false;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -2513,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (crtc_state->splitter.enable) {
|
if (crtc_state->splitter.enable) {
|
||||||
/* Splitter enable is supported for pipe A only. */
|
|
||||||
if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
|
|
||||||
return;
|
|
||||||
|
|
||||||
dss1 |= SPLITTER_ENABLE;
|
dss1 |= SPLITTER_ENABLE;
|
||||||
dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
|
dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
|
||||||
if (crtc_state->splitter.link_count == 2)
|
if (crtc_state->splitter.link_count == 2)
|
||||||
|
@ -4743,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||||
|
|
||||||
dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||||
|
|
||||||
/* Splitter enable for eDP MSO is limited to certain pipes. */
|
if (dig_port->dp.mso_link_count)
|
||||||
if (dig_port->dp.mso_link_count) {
|
encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
|
||||||
encoder->pipe_mask = BIT(PIPE_A);
|
|
||||||
if (IS_ALDERLAKE_P(dev_priv))
|
|
||||||
encoder->pipe_mask |= BIT(PIPE_B);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* In theory we don't need the encoder->type check, but leave it just in
|
/* In theory we don't need the encoder->type check, but leave it just in
|
||||||
|
|
|
@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
|
||||||
if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
|
if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
|
||||||
IS_BROXTON(i915)) {
|
IS_BROXTON(i915)) {
|
||||||
bxt_enable_dc9(i915);
|
bxt_enable_dc9(i915);
|
||||||
/* Tweaked Wa_14010685332:icp,jsp,mcc */
|
|
||||||
if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
|
|
||||||
intel_de_rmw(i915, SOUTH_CHICKEN1,
|
|
||||||
SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
|
|
||||||
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
|
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
|
||||||
hsw_enable_pc8(i915);
|
hsw_enable_pc8(i915);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
|
||||||
|
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
|
||||||
|
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_display_power_resume_early(struct drm_i915_private *i915)
|
void intel_display_power_resume_early(struct drm_i915_private *i915)
|
||||||
|
@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
|
||||||
IS_BROXTON(i915)) {
|
IS_BROXTON(i915)) {
|
||||||
gen9_sanitize_dc_state(i915);
|
gen9_sanitize_dc_state(i915);
|
||||||
bxt_disable_dc9(i915);
|
bxt_disable_dc9(i915);
|
||||||
/* Tweaked Wa_14010685332:icp,jsp,mcc */
|
|
||||||
if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
|
|
||||||
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
|
|
||||||
|
|
||||||
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
|
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
|
||||||
hsw_disable_pc8(i915);
|
hsw_disable_pc8(i915);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
|
||||||
|
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
|
||||||
|
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_display_power_suspend(struct drm_i915_private *i915)
|
void intel_display_power_suspend(struct drm_i915_private *i915)
|
||||||
|
|
|
@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
|
||||||
|
|
||||||
return lttpr_count;
|
return lttpr_count;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
|
|
||||||
|
|
||||||
static u8 dp_voltage_max(u8 preemph)
|
static u8 dp_voltage_max(u8 preemph)
|
||||||
{
|
{
|
||||||
|
|
|
@ -3064,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
|
||||||
spin_unlock_irq(&dev_priv->irq_lock);
|
spin_unlock_irq(&dev_priv->irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
|
|
||||||
{
|
|
||||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wa_14010685332:cnp/cmp,tgp,adp
|
|
||||||
* TODO: Clarify which platforms this applies to
|
|
||||||
* TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
|
|
||||||
* on earlier platforms and whether the workaround is also needed for runtime suspend/resume
|
|
||||||
*/
|
|
||||||
if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
|
|
||||||
(INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
|
|
||||||
intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
|
|
||||||
SBCLK_RUN_REFCLK_DIS);
|
|
||||||
intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
|
static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||||
|
@ -3115,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
|
||||||
if (HAS_PCH_SPLIT(dev_priv))
|
if (HAS_PCH_SPLIT(dev_priv))
|
||||||
ibx_irq_reset(dev_priv);
|
ibx_irq_reset(dev_priv);
|
||||||
|
|
||||||
cnp_display_clock_wa(dev_priv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
|
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
|
||||||
|
@ -3159,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||||
GEN3_IRQ_RESET(uncore, SDE);
|
GEN3_IRQ_RESET(uncore, SDE);
|
||||||
|
|
||||||
cnp_display_clock_wa(dev_priv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
|
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
|
||||||
|
|
|
@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
static int mtk_disp_color_remove(struct platform_device *pdev)
|
static int mtk_disp_color_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
|
component_del(&pdev->dev, &mtk_disp_color_component_ops);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
static int mtk_disp_ovl_remove(struct platform_device *pdev)
|
static int mtk_disp_ovl_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
|
component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
|
|
||||||
#define DISP_AAL_EN 0x0000
|
#define DISP_AAL_EN 0x0000
|
||||||
#define DISP_AAL_SIZE 0x0030
|
#define DISP_AAL_SIZE 0x0030
|
||||||
|
#define DISP_AAL_OUTPUT_SIZE 0x04d8
|
||||||
|
|
||||||
#define DISP_DITHER_EN 0x0000
|
#define DISP_DITHER_EN 0x0000
|
||||||
#define DITHER_EN BIT(0)
|
#define DITHER_EN BIT(0)
|
||||||
|
@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w,
|
||||||
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
|
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
|
||||||
|
|
||||||
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
|
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
|
||||||
|
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
|
static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
|
||||||
|
|
|
@ -2237,6 +2237,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
|
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Finish updating head(s)...
|
||||||
|
*
|
||||||
|
* NVD is rather picky about both where window assignments can change,
|
||||||
|
* *and* about certain core and window channel states matching.
|
||||||
|
*
|
||||||
|
* The EFI GOP driver on newer GPUs configures window channels with a
|
||||||
|
* different output format to what we do, and the core channel update
|
||||||
|
* in the assign_windows case above would result in a state mismatch.
|
||||||
|
*
|
||||||
|
* Delay some of the head update until after that point to workaround
|
||||||
|
* the issue. This only affects the initial modeset.
|
||||||
|
*
|
||||||
|
* TODO: handle this better when adding flexible window mapping
|
||||||
|
*/
|
||||||
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||||
|
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
|
||||||
|
struct nv50_head *head = nv50_head(crtc);
|
||||||
|
|
||||||
|
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
|
||||||
|
asyh->set.mask, asyh->clr.mask);
|
||||||
|
|
||||||
|
if (asyh->set.mask) {
|
||||||
|
nv50_head_flush_set_wndw(head, asyh);
|
||||||
|
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Update plane(s). */
|
/* Update plane(s). */
|
||||||
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
||||||
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
||||||
|
|
|
@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head,
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||||
{
|
{
|
||||||
if (asyh->set.view ) head->func->view (head, asyh);
|
|
||||||
if (asyh->set.mode ) head->func->mode (head, asyh);
|
|
||||||
if (asyh->set.core ) head->func->core_set(head, asyh);
|
|
||||||
if (asyh->set.olut ) {
|
if (asyh->set.olut ) {
|
||||||
asyh->olut.offset = nv50_lut_load(&head->olut,
|
asyh->olut.offset = nv50_lut_load(&head->olut,
|
||||||
asyh->olut.buffer,
|
asyh->olut.buffer,
|
||||||
|
@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||||
asyh->olut.load);
|
asyh->olut.load);
|
||||||
head->func->olut_set(head, asyh);
|
head->func->olut_set(head, asyh);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||||
|
{
|
||||||
|
if (asyh->set.view ) head->func->view (head, asyh);
|
||||||
|
if (asyh->set.mode ) head->func->mode (head, asyh);
|
||||||
|
if (asyh->set.core ) head->func->core_set(head, asyh);
|
||||||
if (asyh->set.curs ) head->func->curs_set(head, asyh);
|
if (asyh->set.curs ) head->func->curs_set(head, asyh);
|
||||||
if (asyh->set.base ) head->func->base (head, asyh);
|
if (asyh->set.base ) head->func->base (head, asyh);
|
||||||
if (asyh->set.ovly ) head->func->ovly (head, asyh);
|
if (asyh->set.ovly ) head->func->ovly (head, asyh);
|
||||||
|
|
|
@ -21,6 +21,7 @@ struct nv50_head {
|
||||||
|
|
||||||
struct nv50_head *nv50_head_create(struct drm_device *, int index);
|
struct nv50_head *nv50_head_create(struct drm_device *, int index);
|
||||||
void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
|
void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
|
||||||
|
void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
|
||||||
void nv50_head_flush_clr(struct nv50_head *head,
|
void nv50_head_flush_clr(struct nv50_head *head,
|
||||||
struct nv50_head_atom *asyh, bool flush);
|
struct nv50_head_atom *asyh, bool flush);
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,8 @@
|
||||||
|
|
||||||
struct nv_device_v0 {
|
struct nv_device_v0 {
|
||||||
__u8 version;
|
__u8 version;
|
||||||
__u8 pad01[7];
|
__u8 priv;
|
||||||
|
__u8 pad02[6];
|
||||||
__u64 device; /* device identifier, ~0 for client default */
|
__u64 device; /* device identifier, ~0 for client default */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -61,8 +61,6 @@
|
||||||
#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
|
#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
|
||||||
#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
|
#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
|
||||||
#define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e
|
#define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e
|
||||||
#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e
|
|
||||||
#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e
|
|
||||||
|
|
||||||
#define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f
|
#define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f
|
||||||
#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
|
#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
|
||||||
|
|
|
@ -9,7 +9,6 @@ struct nvif_client {
|
||||||
const struct nvif_driver *driver;
|
const struct nvif_driver *driver;
|
||||||
u64 version;
|
u64 version;
|
||||||
u8 route;
|
u8 route;
|
||||||
bool super;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
|
int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
|
||||||
|
|
|
@ -11,7 +11,7 @@ struct nvif_driver {
|
||||||
void (*fini)(void *priv);
|
void (*fini)(void *priv);
|
||||||
int (*suspend)(void *priv);
|
int (*suspend)(void *priv);
|
||||||
int (*resume)(void *priv);
|
int (*resume)(void *priv);
|
||||||
int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
|
int (*ioctl)(void *priv, void *data, u32 size, void **hack);
|
||||||
void __iomem *(*map)(void *priv, u64 handle, u32 size);
|
void __iomem *(*map)(void *priv, u64 handle, u32 size);
|
||||||
void (*unmap)(void *priv, void __iomem *ptr, u32 size);
|
void (*unmap)(void *priv, void __iomem *ptr, u32 size);
|
||||||
bool keep;
|
bool keep;
|
||||||
|
|
|
@ -13,7 +13,6 @@ struct nvkm_client {
|
||||||
struct nvkm_client_notify *notify[32];
|
struct nvkm_client_notify *notify[32];
|
||||||
struct rb_root objroot;
|
struct rb_root objroot;
|
||||||
|
|
||||||
bool super;
|
|
||||||
void *data;
|
void *data;
|
||||||
int (*ntfy)(const void *, u32, const void *, u32);
|
int (*ntfy)(const void *, u32, const void *, u32);
|
||||||
|
|
||||||
|
|
|
@ -4,5 +4,5 @@
|
||||||
#include <core/os.h>
|
#include <core/os.h>
|
||||||
struct nvkm_client;
|
struct nvkm_client;
|
||||||
|
|
||||||
int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **);
|
int nvkm_ioctl(struct nvkm_client *, void *, u32, void **);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -15,7 +15,6 @@ struct nvkm_vma {
|
||||||
u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
|
u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
|
||||||
bool used:1; /* Region allocated. */
|
bool used:1; /* Region allocated. */
|
||||||
bool part:1; /* Region was split from an allocated region by map(). */
|
bool part:1; /* Region was split from an allocated region by map(). */
|
||||||
bool user:1; /* Region user-allocated. */
|
|
||||||
bool busy:1; /* Region busy (for temporarily preventing user access). */
|
bool busy:1; /* Region busy (for temporarily preventing user access). */
|
||||||
bool mapped:1; /* Region contains valid pages. */
|
bool mapped:1; /* Region contains valid pages. */
|
||||||
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
|
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
|
||||||
|
|
|
@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
|
||||||
}
|
}
|
||||||
|
|
||||||
client->route = NVDRM_OBJECT_ABI16;
|
client->route = NVDRM_OBJECT_ABI16;
|
||||||
client->super = true;
|
|
||||||
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
|
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
|
||||||
NV_DMA_IN_MEMORY, &args, sizeof(args),
|
NV_DMA_IN_MEMORY, &args, sizeof(args),
|
||||||
&ntfy->object);
|
&ntfy->object);
|
||||||
client->super = false;
|
|
||||||
client->route = NVDRM_OBJECT_NVIF;
|
client->route = NVDRM_OBJECT_NVIF;
|
||||||
if (ret)
|
if (ret)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
|
@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
|
||||||
struct nouveau_channel *chan = *pchan;
|
struct nouveau_channel *chan = *pchan;
|
||||||
if (chan) {
|
if (chan) {
|
||||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||||
bool super;
|
|
||||||
|
|
||||||
if (cli) {
|
|
||||||
super = cli->base.super;
|
|
||||||
cli->base.super = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (chan->fence)
|
if (chan->fence)
|
||||||
nouveau_fence(chan->drm)->context_del(chan);
|
nouveau_fence(chan->drm)->context_del(chan);
|
||||||
|
@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
|
||||||
nouveau_bo_unpin(chan->push.buffer);
|
nouveau_bo_unpin(chan->push.buffer);
|
||||||
nouveau_bo_ref(NULL, &chan->push.buffer);
|
nouveau_bo_ref(NULL, &chan->push.buffer);
|
||||||
kfree(chan);
|
kfree(chan);
|
||||||
|
|
||||||
if (cli)
|
|
||||||
cli->base.super = super;
|
|
||||||
}
|
}
|
||||||
*pchan = NULL;
|
*pchan = NULL;
|
||||||
}
|
}
|
||||||
|
@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
|
||||||
struct nouveau_channel **pchan)
|
struct nouveau_channel **pchan)
|
||||||
{
|
{
|
||||||
struct nouveau_cli *cli = (void *)device->object.client;
|
struct nouveau_cli *cli = (void *)device->object.client;
|
||||||
bool super;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* hack until fencenv50 is fixed, and agp access relaxed */
|
/* hack until fencenv50 is fixed, and agp access relaxed */
|
||||||
super = cli->base.super;
|
|
||||||
cli->base.super = true;
|
|
||||||
|
|
||||||
ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
|
ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
|
NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
|
||||||
ret = nouveau_channel_dma(drm, device, pchan);
|
ret = nouveau_channel_dma(drm, device, pchan);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
|
NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
|
||||||
goto done;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
|
||||||
if (ret) {
|
if (ret) {
|
||||||
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
|
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
|
||||||
nouveau_channel_del(pchan);
|
nouveau_channel_del(pchan);
|
||||||
goto done;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
|
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
|
||||||
if (ret)
|
if (ret)
|
||||||
nouveau_channel_del(pchan);
|
nouveau_channel_del(pchan);
|
||||||
|
|
||||||
done:
|
|
||||||
cli->base.super = super;
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
|
||||||
ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
|
ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
|
||||||
&(struct nv_device_v0) {
|
&(struct nv_device_v0) {
|
||||||
.device = ~0,
|
.device = ~0,
|
||||||
|
.priv = true,
|
||||||
}, sizeof(struct nv_device_v0),
|
}, sizeof(struct nv_device_v0),
|
||||||
&cli->device);
|
&cli->device);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -1086,8 +1087,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
cli->base.super = false;
|
|
||||||
|
|
||||||
fpriv->driver_priv = cli;
|
fpriv->driver_priv = cli;
|
||||||
|
|
||||||
mutex_lock(&drm->client.mutex);
|
mutex_lock(&drm->client.mutex);
|
||||||
|
|
|
@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem,
|
||||||
struct gf100_vmm_map_v0 gf100;
|
struct gf100_vmm_map_v0 gf100;
|
||||||
} args;
|
} args;
|
||||||
u32 argc = 0;
|
u32 argc = 0;
|
||||||
bool super;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
switch (vmm->object.oclass) {
|
switch (vmm->object.oclass) {
|
||||||
case NVIF_CLASS_VMM_NV04:
|
case NVIF_CLASS_VMM_NV04:
|
||||||
|
@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem,
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
super = vmm->object.client->super;
|
return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0);
|
||||||
vmm->object.client->super = true;
|
|
||||||
ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
|
|
||||||
&mem->mem, 0);
|
|
||||||
vmm->object.client->super = super;
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
|
||||||
struct nouveau_drm *drm = cli->drm;
|
struct nouveau_drm *drm = cli->drm;
|
||||||
struct nvif_mmu *mmu = &cli->mmu;
|
struct nvif_mmu *mmu = &cli->mmu;
|
||||||
struct nvif_mem_ram_v0 args = {};
|
struct nvif_mem_ram_v0 args = {};
|
||||||
bool super = cli->base.super;
|
|
||||||
u8 type;
|
u8 type;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
|
||||||
args.dma = tt->dma_address;
|
args.dma = tt->dma_address;
|
||||||
|
|
||||||
mutex_lock(&drm->master.lock);
|
mutex_lock(&drm->master.lock);
|
||||||
cli->base.super = true;
|
|
||||||
ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
|
ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
|
||||||
reg->num_pages << PAGE_SHIFT,
|
reg->num_pages << PAGE_SHIFT,
|
||||||
&args, sizeof(args), &mem->mem);
|
&args, sizeof(args), &mem->mem);
|
||||||
cli->base.super = super;
|
|
||||||
mutex_unlock(&drm->master.lock);
|
mutex_unlock(&drm->master.lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
|
||||||
struct nouveau_cli *cli = mem->cli;
|
struct nouveau_cli *cli = mem->cli;
|
||||||
struct nouveau_drm *drm = cli->drm;
|
struct nouveau_drm *drm = cli->drm;
|
||||||
struct nvif_mmu *mmu = &cli->mmu;
|
struct nvif_mmu *mmu = &cli->mmu;
|
||||||
bool super = cli->base.super;
|
|
||||||
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
|
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&drm->master.lock);
|
mutex_lock(&drm->master.lock);
|
||||||
cli->base.super = true;
|
|
||||||
switch (cli->mem->oclass) {
|
switch (cli->mem->oclass) {
|
||||||
case NVIF_CLASS_MEM_GF100:
|
case NVIF_CLASS_MEM_GF100:
|
||||||
ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
|
ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
|
||||||
|
@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
cli->base.super = super;
|
|
||||||
mutex_unlock(&drm->master.lock);
|
mutex_unlock(&drm->master.lock);
|
||||||
|
|
||||||
reg->start = mem->mem.addr >> PAGE_SHIFT;
|
reg->start = mem->mem.addr >> PAGE_SHIFT;
|
||||||
|
|
|
@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
|
nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack)
|
||||||
{
|
{
|
||||||
return nvkm_ioctl(priv, super, data, size, hack);
|
return nvkm_ioctl(priv, data, size, hack);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|
|
@ -237,14 +237,11 @@ void
|
||||||
nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
|
nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
|
||||||
{
|
{
|
||||||
if (limit > start) {
|
if (limit > start) {
|
||||||
bool super = svmm->vmm->vmm.object.client->super;
|
|
||||||
svmm->vmm->vmm.object.client->super = true;
|
|
||||||
nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
|
nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
|
||||||
&(struct nvif_vmm_pfnclr_v0) {
|
&(struct nvif_vmm_pfnclr_v0) {
|
||||||
.addr = start,
|
.addr = start,
|
||||||
.size = limit - start,
|
.size = limit - start,
|
||||||
}, sizeof(struct nvif_vmm_pfnclr_v0));
|
}, sizeof(struct nvif_vmm_pfnclr_v0));
|
||||||
svmm->vmm->vmm.object.client->super = super;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -634,9 +631,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
|
||||||
NVIF_VMM_PFNMAP_V0_A |
|
NVIF_VMM_PFNMAP_V0_A |
|
||||||
NVIF_VMM_PFNMAP_V0_HOST;
|
NVIF_VMM_PFNMAP_V0_HOST;
|
||||||
|
|
||||||
svmm->vmm->vmm.object.client->super = true;
|
|
||||||
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
|
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
|
||||||
svmm->vmm->vmm.object.client->super = false;
|
|
||||||
mutex_unlock(&svmm->mutex);
|
mutex_unlock(&svmm->mutex);
|
||||||
|
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
|
@ -702,9 +697,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
|
||||||
|
|
||||||
nouveau_hmm_convert_pfn(drm, &range, args);
|
nouveau_hmm_convert_pfn(drm, &range, args);
|
||||||
|
|
||||||
svmm->vmm->vmm.object.client->super = true;
|
|
||||||
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
|
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
|
||||||
svmm->vmm->vmm.object.client->super = false;
|
|
||||||
mutex_unlock(&svmm->mutex);
|
mutex_unlock(&svmm->mutex);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -928,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
|
||||||
|
|
||||||
mutex_lock(&svmm->mutex);
|
mutex_lock(&svmm->mutex);
|
||||||
|
|
||||||
svmm->vmm->vmm.object.client->super = true;
|
|
||||||
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
|
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
|
||||||
npages * sizeof(args->p.phys[0]), NULL);
|
npages * sizeof(args->p.phys[0]), NULL);
|
||||||
svmm->vmm->vmm.object.client->super = false;
|
|
||||||
|
|
||||||
mutex_unlock(&svmm->mutex);
|
mutex_unlock(&svmm->mutex);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,9 @@
|
||||||
#include <nvif/event.h>
|
#include <nvif/event.h>
|
||||||
#include <nvif/ioctl.h>
|
#include <nvif/ioctl.h>
|
||||||
|
|
||||||
|
#include <nvif/class.h>
|
||||||
|
#include <nvif/cl0080.h>
|
||||||
|
|
||||||
struct usif_notify_p {
|
struct usif_notify_p {
|
||||||
struct drm_pending_event base;
|
struct drm_pending_event base;
|
||||||
struct {
|
struct {
|
||||||
|
@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
|
usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
|
||||||
{
|
{
|
||||||
struct nouveau_cli *cli = nouveau_cli(f);
|
struct nouveau_cli *cli = nouveau_cli(f);
|
||||||
struct nvif_client *client = &cli->base;
|
struct nvif_client *client = &cli->base;
|
||||||
|
@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
|
||||||
struct usif_object *object;
|
struct usif_object *object;
|
||||||
int ret = -ENOSYS;
|
int ret = -ENOSYS;
|
||||||
|
|
||||||
|
if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
switch (args->v0.oclass) {
|
||||||
|
case NV_DMA_FROM_MEMORY:
|
||||||
|
case NV_DMA_TO_MEMORY:
|
||||||
|
case NV_DMA_IN_MEMORY:
|
||||||
|
return -EINVAL;
|
||||||
|
case NV_DEVICE: {
|
||||||
|
union {
|
||||||
|
struct nv_device_v0 v0;
|
||||||
|
} *args = data;
|
||||||
|
|
||||||
|
if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
args->v0.priv = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if (!parent_abi16)
|
||||||
|
return -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
|
if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
list_add(&object->head, &cli->objects);
|
list_add(&object->head, &cli->objects);
|
||||||
|
|
||||||
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
|
object->route = args->v0.route;
|
||||||
object->route = args->v0.route;
|
object->token = args->v0.token;
|
||||||
object->token = args->v0.token;
|
args->v0.route = NVDRM_OBJECT_USIF;
|
||||||
args->v0.route = NVDRM_OBJECT_USIF;
|
args->v0.token = (unsigned long)(void *)object;
|
||||||
args->v0.token = (unsigned long)(void *)object;
|
ret = nvif_client_ioctl(client, argv, argc);
|
||||||
ret = nvif_client_ioctl(client, argv, argc);
|
if (ret) {
|
||||||
args->v0.token = object->token;
|
usif_object_dtor(object);
|
||||||
args->v0.route = object->route;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret)
|
args->v0.token = object->token;
|
||||||
usif_object_dtor(object);
|
args->v0.route = object->route;
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
|
||||||
struct nvif_ioctl_v0 v0;
|
struct nvif_ioctl_v0 v0;
|
||||||
} *argv = data;
|
} *argv = data;
|
||||||
struct usif_object *object;
|
struct usif_object *object;
|
||||||
|
bool abi16 = false;
|
||||||
u8 owner;
|
u8 owner;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
|
||||||
mutex_unlock(&cli->mutex);
|
mutex_unlock(&cli->mutex);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
abi16 = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (argv->v0.type) {
|
switch (argv->v0.type) {
|
||||||
case NVIF_IOCTL_V0_NEW:
|
case NVIF_IOCTL_V0_NEW:
|
||||||
ret = usif_object_new(filp, data, size, argv, argc);
|
ret = usif_object_new(filp, data, size, argv, argc, abi16);
|
||||||
break;
|
break;
|
||||||
case NVIF_IOCTL_V0_NTFY_NEW:
|
case NVIF_IOCTL_V0_NTFY_NEW:
|
||||||
ret = usif_notify_new(filp, data, size, argv, argc);
|
ret = usif_notify_new(filp, data, size, argv, argc);
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
int
|
int
|
||||||
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
|
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
|
||||||
{
|
{
|
||||||
return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
|
return client->driver->ioctl(client->object.priv, data, size, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
|
||||||
client->object.client = client;
|
client->object.client = client;
|
||||||
client->object.handle = ~0;
|
client->object.handle = ~0;
|
||||||
client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
|
client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
|
||||||
client->super = true;
|
|
||||||
client->driver = parent->driver;
|
client->driver = parent->driver;
|
||||||
|
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
|
|
|
@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
|
||||||
} else
|
} else
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
|
|
||||||
return client->driver->ioctl(client->object.priv, client->super,
|
return client->driver->ioctl(client->object.priv, data, size, hack);
|
||||||
data, size, hack);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
nvkm_ioctl(struct nvkm_client *client, bool supervisor,
|
nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
|
||||||
void *data, u32 size, void **hack)
|
|
||||||
{
|
{
|
||||||
struct nvkm_object *object = &client->object;
|
struct nvkm_object *object = &client->object;
|
||||||
union {
|
union {
|
||||||
|
@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
|
||||||
} *args = data;
|
} *args = data;
|
||||||
int ret = -ENOSYS;
|
int ret = -ENOSYS;
|
||||||
|
|
||||||
client->super = supervisor;
|
|
||||||
nvif_ioctl(object, "size %d\n", size);
|
nvif_ioctl(object, "size %d\n", size);
|
||||||
|
|
||||||
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
|
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
|
||||||
|
|
|
@ -2624,6 +2624,26 @@ nv174_chipset = {
|
||||||
.dma = { 0x00000001, gv100_dma_new },
|
.dma = { 0x00000001, gv100_dma_new },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct nvkm_device_chip
|
||||||
|
nv177_chipset = {
|
||||||
|
.name = "GA107",
|
||||||
|
.bar = { 0x00000001, tu102_bar_new },
|
||||||
|
.bios = { 0x00000001, nvkm_bios_new },
|
||||||
|
.devinit = { 0x00000001, ga100_devinit_new },
|
||||||
|
.fb = { 0x00000001, ga102_fb_new },
|
||||||
|
.gpio = { 0x00000001, ga102_gpio_new },
|
||||||
|
.i2c = { 0x00000001, gm200_i2c_new },
|
||||||
|
.imem = { 0x00000001, nv50_instmem_new },
|
||||||
|
.mc = { 0x00000001, ga100_mc_new },
|
||||||
|
.mmu = { 0x00000001, tu102_mmu_new },
|
||||||
|
.pci = { 0x00000001, gp100_pci_new },
|
||||||
|
.privring = { 0x00000001, gm200_privring_new },
|
||||||
|
.timer = { 0x00000001, gk20a_timer_new },
|
||||||
|
.top = { 0x00000001, ga100_top_new },
|
||||||
|
.disp = { 0x00000001, ga102_disp_new },
|
||||||
|
.dma = { 0x00000001, gv100_dma_new },
|
||||||
|
};
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||||
struct nvkm_notify *notify)
|
struct nvkm_notify *notify)
|
||||||
|
@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||||
case 0x168: device->chip = &nv168_chipset; break;
|
case 0x168: device->chip = &nv168_chipset; break;
|
||||||
case 0x172: device->chip = &nv172_chipset; break;
|
case 0x172: device->chip = &nv172_chipset; break;
|
||||||
case 0x174: device->chip = &nv174_chipset; break;
|
case 0x174: device->chip = &nv174_chipset; break;
|
||||||
|
case 0x177: device->chip = &nv177_chipset; break;
|
||||||
default:
|
default:
|
||||||
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
|
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
|
||||||
switch (device->chipset) {
|
switch (device->chipset) {
|
||||||
|
|
|
@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* give priviledged clients register access */
|
/* give priviledged clients register access */
|
||||||
if (client->super)
|
if (args->v0.priv)
|
||||||
func = &nvkm_udevice_super;
|
func = &nvkm_udevice_super;
|
||||||
else
|
else
|
||||||
func = &nvkm_udevice;
|
func = &nvkm_udevice;
|
||||||
|
|
|
@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void
|
||||||
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
|
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
|
||||||
{
|
{
|
||||||
struct nvkm_dp *dp = nvkm_dp(outp);
|
struct nvkm_dp *dp = nvkm_dp(outp);
|
||||||
|
|
|
@ -32,6 +32,7 @@ struct nvkm_dp {
|
||||||
|
|
||||||
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
|
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
|
||||||
struct nvkm_outp **);
|
struct nvkm_outp **);
|
||||||
|
void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
|
||||||
|
|
||||||
/* DPCD Receiver Capabilities */
|
/* DPCD Receiver Capabilities */
|
||||||
#define DPCD_RC00_DPCD_REV 0x00000
|
#define DPCD_RC00_DPCD_REV 0x00000
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
* Authors: Ben Skeggs
|
* Authors: Ben Skeggs
|
||||||
*/
|
*/
|
||||||
#include "outp.h"
|
#include "outp.h"
|
||||||
|
#include "dp.h"
|
||||||
#include "ior.h"
|
#include "ior.h"
|
||||||
|
|
||||||
#include <subdev/bios.h>
|
#include <subdev/bios.h>
|
||||||
|
@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
|
||||||
if (!ior->arm.head || ior->arm.proto != proto) {
|
if (!ior->arm.head || ior->arm.proto != proto) {
|
||||||
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
|
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
|
||||||
ior->arm.proto, proto);
|
ior->arm.proto, proto);
|
||||||
|
|
||||||
|
/* The EFI GOP driver on Ampere can leave unused DP links routed,
|
||||||
|
* which we don't expect. The DisableLT IED script *should* get
|
||||||
|
* us back to where we need to be.
|
||||||
|
*/
|
||||||
|
if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
|
||||||
|
nvkm_dp_disable(outp, ior);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
#include <core/client.h>
|
#include <core/client.h>
|
||||||
#include <core/gpuobj.h>
|
#include <core/gpuobj.h>
|
||||||
#include <subdev/fb.h>
|
#include <subdev/fb.h>
|
||||||
#include <subdev/instmem.h>
|
|
||||||
|
|
||||||
#include <nvif/cl0002.h>
|
#include <nvif/cl0002.h>
|
||||||
#include <nvif/unpack.h>
|
#include <nvif/unpack.h>
|
||||||
|
@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
|
||||||
union {
|
union {
|
||||||
struct nv_dma_v0 v0;
|
struct nv_dma_v0 v0;
|
||||||
} *args = *pdata;
|
} *args = *pdata;
|
||||||
struct nvkm_device *device = dma->engine.subdev.device;
|
|
||||||
struct nvkm_client *client = oclass->client;
|
|
||||||
struct nvkm_object *parent = oclass->parent;
|
struct nvkm_object *parent = oclass->parent;
|
||||||
struct nvkm_instmem *instmem = device->imem;
|
|
||||||
struct nvkm_fb *fb = device->fb;
|
|
||||||
void *data = *pdata;
|
void *data = *pdata;
|
||||||
u32 size = *psize;
|
u32 size = *psize;
|
||||||
int ret = -ENOSYS;
|
int ret = -ENOSYS;
|
||||||
|
@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
|
||||||
dmaobj->target = NV_MEM_TARGET_VM;
|
dmaobj->target = NV_MEM_TARGET_VM;
|
||||||
break;
|
break;
|
||||||
case NV_DMA_V0_TARGET_VRAM:
|
case NV_DMA_V0_TARGET_VRAM:
|
||||||
if (!client->super) {
|
|
||||||
if (dmaobj->limit >= fb->ram->size - instmem->reserved)
|
|
||||||
return -EACCES;
|
|
||||||
if (device->card_type >= NV_50)
|
|
||||||
return -EACCES;
|
|
||||||
}
|
|
||||||
dmaobj->target = NV_MEM_TARGET_VRAM;
|
dmaobj->target = NV_MEM_TARGET_VRAM;
|
||||||
break;
|
break;
|
||||||
case NV_DMA_V0_TARGET_PCI:
|
case NV_DMA_V0_TARGET_PCI:
|
||||||
if (!client->super)
|
|
||||||
return -EACCES;
|
|
||||||
dmaobj->target = NV_MEM_TARGET_PCI;
|
dmaobj->target = NV_MEM_TARGET_PCI;
|
||||||
break;
|
break;
|
||||||
case NV_DMA_V0_TARGET_PCI_US:
|
case NV_DMA_V0_TARGET_PCI_US:
|
||||||
case NV_DMA_V0_TARGET_AGP:
|
case NV_DMA_V0_TARGET_AGP:
|
||||||
if (!client->super)
|
|
||||||
return -EACCES;
|
|
||||||
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
|
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o
|
||||||
nvkm-y += nvkm/engine/fifo/dmanv10.o
|
nvkm-y += nvkm/engine/fifo/dmanv10.o
|
||||||
nvkm-y += nvkm/engine/fifo/dmanv17.o
|
nvkm-y += nvkm/engine/fifo/dmanv17.o
|
||||||
nvkm-y += nvkm/engine/fifo/dmanv40.o
|
nvkm-y += nvkm/engine/fifo/dmanv40.o
|
||||||
nvkm-y += nvkm/engine/fifo/dmanv50.o
|
|
||||||
nvkm-y += nvkm/engine/fifo/dmag84.o
|
|
||||||
|
|
||||||
nvkm-y += nvkm/engine/fifo/gpfifonv50.o
|
nvkm-y += nvkm/engine/fifo/gpfifonv50.o
|
||||||
nvkm-y += nvkm/engine/fifo/gpfifog84.o
|
nvkm-y += nvkm/engine/fifo/gpfifog84.o
|
||||||
|
|
|
@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
|
||||||
int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
|
int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
|
||||||
const struct nvkm_oclass *, struct nv50_fifo_chan *);
|
const struct nvkm_oclass *, struct nv50_fifo_chan *);
|
||||||
|
|
||||||
extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
|
|
||||||
extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
|
extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
|
||||||
extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
|
|
||||||
extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
|
extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,94 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Red Hat Inc.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
* Authors: Ben Skeggs
|
|
||||||
*/
|
|
||||||
#include "channv50.h"
|
|
||||||
|
|
||||||
#include <core/client.h>
|
|
||||||
#include <core/ramht.h>
|
|
||||||
|
|
||||||
#include <nvif/class.h>
|
|
||||||
#include <nvif/cl826e.h>
|
|
||||||
#include <nvif/unpack.h>
|
|
||||||
|
|
||||||
static int
|
|
||||||
g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
|
|
||||||
void *data, u32 size, struct nvkm_object **pobject)
|
|
||||||
{
|
|
||||||
struct nvkm_object *parent = oclass->parent;
|
|
||||||
union {
|
|
||||||
struct g82_channel_dma_v0 v0;
|
|
||||||
} *args = data;
|
|
||||||
struct nv50_fifo *fifo = nv50_fifo(base);
|
|
||||||
struct nv50_fifo_chan *chan;
|
|
||||||
int ret = -ENOSYS;
|
|
||||||
|
|
||||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
|
||||||
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
|
|
||||||
nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
|
|
||||||
"pushbuf %llx offset %016llx\n",
|
|
||||||
args->v0.version, args->v0.vmm, args->v0.pushbuf,
|
|
||||||
args->v0.offset);
|
|
||||||
if (!args->v0.pushbuf)
|
|
||||||
return -EINVAL;
|
|
||||||
} else
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
|
|
||||||
return -ENOMEM;
|
|
||||||
*pobject = &chan->base.object;
|
|
||||||
|
|
||||||
ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
|
|
||||||
oclass, chan);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
args->v0.chid = chan->base.chid;
|
|
||||||
|
|
||||||
nvkm_kmap(chan->ramfc);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
|
|
||||||
nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
|
|
||||||
nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
|
|
||||||
nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
|
|
||||||
nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
|
||||||
(4 << 24) /* SEARCH_FULL */ |
|
|
||||||
(chan->ramht->gpuobj->node->offset >> 4));
|
|
||||||
nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
|
|
||||||
nvkm_done(chan->ramfc);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct nvkm_fifo_chan_oclass
|
|
||||||
g84_fifo_dma_oclass = {
|
|
||||||
.base.oclass = G82_CHANNEL_DMA,
|
|
||||||
.base.minver = 0,
|
|
||||||
.base.maxver = 0,
|
|
||||||
.ctor = g84_fifo_dma_new,
|
|
||||||
};
|
|
|
@ -1,92 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Red Hat Inc.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
* Authors: Ben Skeggs
|
|
||||||
*/
|
|
||||||
#include "channv50.h"
|
|
||||||
|
|
||||||
#include <core/client.h>
|
|
||||||
#include <core/ramht.h>
|
|
||||||
|
|
||||||
#include <nvif/class.h>
|
|
||||||
#include <nvif/cl506e.h>
|
|
||||||
#include <nvif/unpack.h>
|
|
||||||
|
|
||||||
static int
|
|
||||||
nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
|
|
||||||
void *data, u32 size, struct nvkm_object **pobject)
|
|
||||||
{
|
|
||||||
struct nvkm_object *parent = oclass->parent;
|
|
||||||
union {
|
|
||||||
struct nv50_channel_dma_v0 v0;
|
|
||||||
} *args = data;
|
|
||||||
struct nv50_fifo *fifo = nv50_fifo(base);
|
|
||||||
struct nv50_fifo_chan *chan;
|
|
||||||
int ret = -ENOSYS;
|
|
||||||
|
|
||||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
|
||||||
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
|
|
||||||
nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
|
|
||||||
"pushbuf %llx offset %016llx\n",
|
|
||||||
args->v0.version, args->v0.vmm, args->v0.pushbuf,
|
|
||||||
args->v0.offset);
|
|
||||||
if (!args->v0.pushbuf)
|
|
||||||
return -EINVAL;
|
|
||||||
} else
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
|
|
||||||
return -ENOMEM;
|
|
||||||
*pobject = &chan->base.object;
|
|
||||||
|
|
||||||
ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
|
|
||||||
oclass, chan);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
args->v0.chid = chan->base.chid;
|
|
||||||
|
|
||||||
nvkm_kmap(chan->ramfc);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
|
|
||||||
nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
|
|
||||||
nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
|
|
||||||
nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
|
|
||||||
nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
|
|
||||||
nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
|
||||||
(4 << 24) /* SEARCH_FULL */ |
|
|
||||||
(chan->ramht->gpuobj->node->offset >> 4));
|
|
||||||
nvkm_done(chan->ramfc);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct nvkm_fifo_chan_oclass
|
|
||||||
nv50_fifo_dma_oclass = {
|
|
||||||
.base.oclass = NV50_CHANNEL_DMA,
|
|
||||||
.base.minver = 0,
|
|
||||||
.base.maxver = 0,
|
|
||||||
.ctor = nv50_fifo_dma_new,
|
|
||||||
};
|
|
|
@ -119,7 +119,6 @@ g84_fifo = {
|
||||||
.uevent_init = g84_fifo_uevent_init,
|
.uevent_init = g84_fifo_uevent_init,
|
||||||
.uevent_fini = g84_fifo_uevent_fini,
|
.uevent_fini = g84_fifo_uevent_fini,
|
||||||
.chan = {
|
.chan = {
|
||||||
&g84_fifo_dma_oclass,
|
|
||||||
&g84_fifo_gpfifo_oclass,
|
&g84_fifo_gpfifo_oclass,
|
||||||
NULL
|
NULL
|
||||||
},
|
},
|
||||||
|
|
|
@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
|
||||||
"runlist %016llx priv %d\n",
|
"runlist %016llx priv %d\n",
|
||||||
args->v0.version, args->v0.vmm, args->v0.ioffset,
|
args->v0.version, args->v0.vmm, args->v0.ioffset,
|
||||||
args->v0.ilength, args->v0.runlist, args->v0.priv);
|
args->v0.ilength, args->v0.runlist, args->v0.priv);
|
||||||
if (args->v0.priv && !oclass->client->super)
|
|
||||||
return -EINVAL;
|
|
||||||
return gk104_fifo_gpfifo_new_(fifo,
|
return gk104_fifo_gpfifo_new_(fifo,
|
||||||
&args->v0.runlist,
|
&args->v0.runlist,
|
||||||
&args->v0.chid,
|
&args->v0.chid,
|
||||||
|
|
|
@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
|
||||||
"runlist %016llx priv %d\n",
|
"runlist %016llx priv %d\n",
|
||||||
args->v0.version, args->v0.vmm, args->v0.ioffset,
|
args->v0.version, args->v0.vmm, args->v0.ioffset,
|
||||||
args->v0.ilength, args->v0.runlist, args->v0.priv);
|
args->v0.ilength, args->v0.runlist, args->v0.priv);
|
||||||
if (args->v0.priv && !oclass->client->super)
|
|
||||||
return -EINVAL;
|
|
||||||
return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
|
return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
|
||||||
&args->v0.runlist,
|
&args->v0.runlist,
|
||||||
&args->v0.chid,
|
&args->v0.chid,
|
||||||
|
|
|
@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
|
||||||
"runlist %016llx priv %d\n",
|
"runlist %016llx priv %d\n",
|
||||||
args->v0.version, args->v0.vmm, args->v0.ioffset,
|
args->v0.version, args->v0.vmm, args->v0.ioffset,
|
||||||
args->v0.ilength, args->v0.runlist, args->v0.priv);
|
args->v0.ilength, args->v0.runlist, args->v0.priv);
|
||||||
if (args->v0.priv && !oclass->client->super)
|
|
||||||
return -EINVAL;
|
|
||||||
return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
|
return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
|
||||||
&args->v0.runlist,
|
&args->v0.runlist,
|
||||||
&args->v0.chid,
|
&args->v0.chid,
|
||||||
|
|
|
@ -136,7 +136,6 @@ nv50_fifo = {
|
||||||
.pause = nv04_fifo_pause,
|
.pause = nv04_fifo_pause,
|
||||||
.start = nv04_fifo_start,
|
.start = nv04_fifo_start,
|
||||||
.chan = {
|
.chan = {
|
||||||
&nv50_fifo_dma_oclass,
|
|
||||||
&nv50_fifo_gpfifo_oclass,
|
&nv50_fifo_gpfifo_oclass,
|
||||||
NULL
|
NULL
|
||||||
},
|
},
|
||||||
|
|
|
@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
|
||||||
|
|
||||||
object = nvkm_object_search(client, handle, &nvkm_umem);
|
object = nvkm_object_search(client, handle, &nvkm_umem);
|
||||||
if (IS_ERR(object)) {
|
if (IS_ERR(object)) {
|
||||||
if (client->super && client != master) {
|
if (client != master) {
|
||||||
spin_lock(&master->lock);
|
spin_lock(&master->lock);
|
||||||
list_for_each_entry(umem, &master->umem, head) {
|
list_for_each_entry(umem, &master->umem, head) {
|
||||||
if (umem->object.object == handle) {
|
if (umem->object.object == handle) {
|
||||||
|
@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
umem = nvkm_umem(object);
|
umem = nvkm_umem(object);
|
||||||
if (!umem->priv || client->super)
|
memory = nvkm_memory_ref(umem->memory);
|
||||||
memory = nvkm_memory_ref(umem->memory);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return memory ? memory : ERR_PTR(-ENOENT);
|
return memory ? memory : ERR_PTR(-ENOENT);
|
||||||
|
@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
|
||||||
nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
|
nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
|
||||||
umem->mmu = mmu;
|
umem->mmu = mmu;
|
||||||
umem->type = mmu->type[type].type;
|
umem->type = mmu->type[type].type;
|
||||||
umem->priv = oclass->client->super;
|
|
||||||
INIT_LIST_HEAD(&umem->head);
|
INIT_LIST_HEAD(&umem->head);
|
||||||
*pobject = &umem->object;
|
*pobject = &umem->object;
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,6 @@ struct nvkm_umem {
|
||||||
struct nvkm_object object;
|
struct nvkm_object object;
|
||||||
struct nvkm_mmu *mmu;
|
struct nvkm_mmu *mmu;
|
||||||
u8 type:8;
|
u8 type:8;
|
||||||
bool priv:1;
|
|
||||||
bool mappable:1;
|
bool mappable:1;
|
||||||
bool io:1;
|
bool io:1;
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index,
|
||||||
{
|
{
|
||||||
struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
|
struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
|
||||||
|
|
||||||
if (mmu->func->mem.user.oclass && oclass->client->super) {
|
if (mmu->func->mem.user.oclass) {
|
||||||
if (index-- == 0) {
|
if (index-- == 0) {
|
||||||
oclass->base = mmu->func->mem.user;
|
oclass->base = mmu->func->mem.user;
|
||||||
oclass->ctor = nvkm_umem_new;
|
oclass->ctor = nvkm_umem_new;
|
||||||
|
|
|
@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
|
||||||
static int
|
static int
|
||||||
nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
{
|
{
|
||||||
struct nvkm_client *client = uvmm->object.client;
|
|
||||||
union {
|
union {
|
||||||
struct nvif_vmm_pfnclr_v0 v0;
|
struct nvif_vmm_pfnclr_v0 v0;
|
||||||
} *args = argv;
|
} *args = argv;
|
||||||
|
@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
} else
|
} else
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!client->super)
|
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
if (size) {
|
if (size) {
|
||||||
mutex_lock(&vmm->mutex);
|
mutex_lock(&vmm->mutex);
|
||||||
ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
|
ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
|
||||||
|
@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
static int
|
static int
|
||||||
nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
{
|
{
|
||||||
struct nvkm_client *client = uvmm->object.client;
|
|
||||||
union {
|
union {
|
||||||
struct nvif_vmm_pfnmap_v0 v0;
|
struct nvif_vmm_pfnmap_v0 v0;
|
||||||
} *args = argv;
|
} *args = argv;
|
||||||
|
@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
} else
|
} else
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!client->super)
|
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
if (size) {
|
if (size) {
|
||||||
mutex_lock(&vmm->mutex);
|
mutex_lock(&vmm->mutex);
|
||||||
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
|
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
|
||||||
|
@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
static int
|
static int
|
||||||
nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
{
|
{
|
||||||
struct nvkm_client *client = uvmm->object.client;
|
|
||||||
union {
|
union {
|
||||||
struct nvif_vmm_unmap_v0 v0;
|
struct nvif_vmm_unmap_v0 v0;
|
||||||
} *args = argv;
|
} *args = argv;
|
||||||
|
@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
|
if (ret = -ENOENT, vma->busy) {
|
||||||
VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
|
VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
|
||||||
vma->user, !client->super, vma->busy);
|
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
|
if (ret = -ENOENT, vma->busy) {
|
||||||
VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
|
VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
|
||||||
vma->user, !client->super, vma->busy);
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,7 +219,6 @@ fail:
|
||||||
static int
|
static int
|
||||||
nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
{
|
{
|
||||||
struct nvkm_client *client = uvmm->object.client;
|
|
||||||
union {
|
union {
|
||||||
struct nvif_vmm_put_v0 v0;
|
struct nvif_vmm_put_v0 v0;
|
||||||
} *args = argv;
|
} *args = argv;
|
||||||
|
@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
|
if (ret = -ENOENT, vma->busy) {
|
||||||
VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
|
VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
|
||||||
vma->user, !client->super, vma->busy);
|
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -268,7 +255,6 @@ done:
|
||||||
static int
|
static int
|
||||||
nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
{
|
{
|
||||||
struct nvkm_client *client = uvmm->object.client;
|
|
||||||
union {
|
union {
|
||||||
struct nvif_vmm_get_v0 v0;
|
struct nvif_vmm_get_v0 v0;
|
||||||
} *args = argv;
|
} *args = argv;
|
||||||
|
@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
args->v0.addr = vma->addr;
|
args->v0.addr = vma->addr;
|
||||||
vma->user = !client->super;
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
|
||||||
new->refd = vma->refd;
|
new->refd = vma->refd;
|
||||||
new->used = vma->used;
|
new->used = vma->used;
|
||||||
new->part = vma->part;
|
new->part = vma->part;
|
||||||
new->user = vma->user;
|
|
||||||
new->busy = vma->busy;
|
new->busy = vma->busy;
|
||||||
new->mapped = vma->mapped;
|
new->mapped = vma->mapped;
|
||||||
list_add(&new->head, &vma->head);
|
list_add(&new->head, &vma->head);
|
||||||
|
@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
|
||||||
static void
|
static void
|
||||||
nvkm_vma_dump(struct nvkm_vma *vma)
|
nvkm_vma_dump(struct nvkm_vma *vma)
|
||||||
{
|
{
|
||||||
printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
|
printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
|
||||||
vma->addr, (u64)vma->size,
|
vma->addr, (u64)vma->size,
|
||||||
vma->used ? '-' : 'F',
|
vma->used ? '-' : 'F',
|
||||||
vma->mapref ? 'R' : '-',
|
vma->mapref ? 'R' : '-',
|
||||||
|
@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma)
|
||||||
vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
|
vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
|
||||||
vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
|
vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
|
||||||
vma->part ? 'P' : '-',
|
vma->part ? 'P' : '-',
|
||||||
vma->user ? 'U' : '-',
|
|
||||||
vma->busy ? 'B' : '-',
|
vma->busy ? 'B' : '-',
|
||||||
vma->mapped ? 'M' : '-',
|
vma->mapped ? 'M' : '-',
|
||||||
vma->memory);
|
vma->memory);
|
||||||
|
@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
|
||||||
vma->mapref = true;
|
vma->mapref = true;
|
||||||
vma->sparse = false;
|
vma->sparse = false;
|
||||||
vma->used = true;
|
vma->used = true;
|
||||||
vma->user = true;
|
|
||||||
nvkm_vmm_node_insert(vmm, vma);
|
nvkm_vmm_node_insert(vmm, vma);
|
||||||
list_add_tail(&vma->head, &vmm->list);
|
list_add_tail(&vma->head, &vmm->list);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
|
||||||
vma->page = NVKM_VMA_PAGE_NONE;
|
vma->page = NVKM_VMA_PAGE_NONE;
|
||||||
vma->refd = NVKM_VMA_PAGE_NONE;
|
vma->refd = NVKM_VMA_PAGE_NONE;
|
||||||
vma->used = false;
|
vma->used = false;
|
||||||
vma->user = false;
|
|
||||||
nvkm_vmm_put_region(vmm, vma);
|
nvkm_vmm_put_region(vmm, vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -534,15 +534,13 @@ int
|
||||||
gp100_vmm_mthd(struct nvkm_vmm *vmm,
|
gp100_vmm_mthd(struct nvkm_vmm *vmm,
|
||||||
struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
|
struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
|
||||||
{
|
{
|
||||||
if (client->super) {
|
switch (mthd) {
|
||||||
switch (mthd) {
|
case GP100_VMM_VN_FAULT_REPLAY:
|
||||||
case GP100_VMM_VN_FAULT_REPLAY:
|
return gp100_vmm_fault_replay(vmm, argv, argc);
|
||||||
return gp100_vmm_fault_replay(vmm, argv, argc);
|
case GP100_VMM_VN_FAULT_CANCEL:
|
||||||
case GP100_VMM_VN_FAULT_CANCEL:
|
return gp100_vmm_fault_cancel(vmm, argv, argc);
|
||||||
return gp100_vmm_fault_cancel(vmm, argv, argc);
|
default:
|
||||||
default:
|
break;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,9 +78,7 @@ static int ttm_global_init(void)
|
||||||
|
|
||||||
ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
|
ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
|
||||||
if (IS_ERR(ttm_debugfs_root)) {
|
if (IS_ERR(ttm_debugfs_root)) {
|
||||||
ret = PTR_ERR(ttm_debugfs_root);
|
|
||||||
ttm_debugfs_root = NULL;
|
ttm_debugfs_root = NULL;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Limit the number of pages in the pool to about 50% of the total
|
/* Limit the number of pages in the pool to about 50% of the total
|
||||||
|
|
|
@ -249,6 +249,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
|
||||||
mr->uobject = uobj;
|
mr->uobject = uobj;
|
||||||
atomic_inc(&pd->usecnt);
|
atomic_inc(&pd->usecnt);
|
||||||
|
|
||||||
|
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
|
||||||
|
rdma_restrack_set_name(&mr->res, NULL);
|
||||||
|
rdma_restrack_add(&mr->res);
|
||||||
uobj->object = mr;
|
uobj->object = mr;
|
||||||
|
|
||||||
uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
|
uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
|
||||||
|
|
|
@ -1681,6 +1681,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
|
||||||
if (nq)
|
if (nq)
|
||||||
nq->budget++;
|
nq->budget++;
|
||||||
atomic_inc(&rdev->srq_count);
|
atomic_inc(&rdev->srq_count);
|
||||||
|
spin_lock_init(&srq->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -1397,7 +1397,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
||||||
memset(&rattr, 0, sizeof(rattr));
|
memset(&rattr, 0, sizeof(rattr));
|
||||||
rc = bnxt_re_register_netdev(rdev);
|
rc = bnxt_re_register_netdev(rdev);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
rtnl_unlock();
|
|
||||||
ibdev_err(&rdev->ibdev,
|
ibdev_err(&rdev->ibdev,
|
||||||
"Failed to register with netedev: %#x\n", rc);
|
"Failed to register with netedev: %#x\n", rc);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -357,6 +357,7 @@ static int efa_enable_msix(struct efa_dev *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (irq_num != msix_vecs) {
|
if (irq_num != msix_vecs) {
|
||||||
|
efa_disable_msix(dev);
|
||||||
dev_err(&dev->pdev->dev,
|
dev_err(&dev->pdev->dev,
|
||||||
"Allocated %d MSI-X (out of %d requested)\n",
|
"Allocated %d MSI-X (out of %d requested)\n",
|
||||||
irq_num, msix_vecs);
|
irq_num, msix_vecs);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue