mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-17 20:54:10 +00:00
Merge branch 'linus' into x86/asm, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
b2c16e1efd
136 changed files with 1225 additions and 836 deletions
|
@ -10,7 +10,7 @@ Required properties:
|
||||||
subsystem (mmcss) inside the FlashSS (available in STiH407 SoC
|
subsystem (mmcss) inside the FlashSS (available in STiH407 SoC
|
||||||
family).
|
family).
|
||||||
|
|
||||||
- clock-names: Should be "mmc".
|
- clock-names: Should be "mmc" and "icn". (NB: The latter is not compulsory)
|
||||||
See: Documentation/devicetree/bindings/resource-names.txt
|
See: Documentation/devicetree/bindings/resource-names.txt
|
||||||
- clocks: Phandle to the clock.
|
- clocks: Phandle to the clock.
|
||||||
See: Documentation/devicetree/bindings/clock/clock-bindings.txt
|
See: Documentation/devicetree/bindings/clock/clock-bindings.txt
|
||||||
|
|
|
@ -1625,6 +1625,7 @@ N: rockchip
|
||||||
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
|
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
|
||||||
M: Kukjin Kim <kgene@kernel.org>
|
M: Kukjin Kim <kgene@kernel.org>
|
||||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||||
|
R: Javier Martinez Canillas <javier@osg.samsung.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -6102,7 +6103,7 @@ S: Supported
|
||||||
F: drivers/cpufreq/intel_pstate.c
|
F: drivers/cpufreq/intel_pstate.c
|
||||||
|
|
||||||
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
|
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
|
||||||
M: Maik Broemme <mbroemme@plusserver.de>
|
M: Maik Broemme <mbroemme@libmpq.org>
|
||||||
L: linux-fbdev@vger.kernel.org
|
L: linux-fbdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/fb/intelfb.txt
|
F: Documentation/fb/intelfb.txt
|
||||||
|
@ -12568,7 +12569,7 @@ F: include/linux/if_*vlan.h
|
||||||
F: net/8021q/
|
F: net/8021q/
|
||||||
|
|
||||||
VLYNQ BUS
|
VLYNQ BUS
|
||||||
M: Florian Fainelli <florian@openwrt.org>
|
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||||
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/vlynq/vlynq.c
|
F: drivers/vlynq/vlynq.c
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 8
|
PATCHLEVEL = 8
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Psychotic Stoned Sheep
|
NAME = Psychotic Stoned Sheep
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
memory {
|
memory {
|
||||||
|
device_type = "memory";
|
||||||
reg = <0 0x10000000>;
|
reg = <0 0x10000000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
#include <dt-bindings/clock/bcm2835.h>
|
#include <dt-bindings/clock/bcm2835.h>
|
||||||
#include <dt-bindings/clock/bcm2835-aux.h>
|
#include <dt-bindings/clock/bcm2835-aux.h>
|
||||||
#include <dt-bindings/gpio/gpio.h>
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
#include "skeleton.dtsi"
|
|
||||||
|
|
||||||
/* This include file covers the common peripherals and configuration between
|
/* This include file covers the common peripherals and configuration between
|
||||||
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
|
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
|
||||||
|
@ -13,6 +12,8 @@
|
||||||
compatible = "brcm,bcm2835";
|
compatible = "brcm,bcm2835";
|
||||||
model = "BCM2835";
|
model = "BCM2835";
|
||||||
interrupt-parent = <&intc>;
|
interrupt-parent = <&intc>;
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <1>;
|
||||||
|
|
||||||
chosen {
|
chosen {
|
||||||
bootargs = "earlyprintk console=ttyAMA0";
|
bootargs = "earlyprintk console=ttyAMA0";
|
||||||
|
|
|
@ -550,8 +550,9 @@
|
||||||
interrupt-names = "mmcirq";
|
interrupt-names = "mmcirq";
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&pinctrl_mmc0>;
|
pinctrl-0 = <&pinctrl_mmc0>;
|
||||||
clock-names = "mmc";
|
clock-names = "mmc", "icn";
|
||||||
clocks = <&clk_s_c0_flexgen CLK_MMC_0>;
|
clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
|
||||||
|
<&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
|
||||||
bus-width = <8>;
|
bus-width = <8>;
|
||||||
non-removable;
|
non-removable;
|
||||||
};
|
};
|
||||||
|
@ -565,8 +566,9 @@
|
||||||
interrupt-names = "mmcirq";
|
interrupt-names = "mmcirq";
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&pinctrl_sd1>;
|
pinctrl-0 = <&pinctrl_sd1>;
|
||||||
clock-names = "mmc";
|
clock-names = "mmc", "icn";
|
||||||
clocks = <&clk_s_c0_flexgen CLK_MMC_1>;
|
clocks = <&clk_s_c0_flexgen CLK_MMC_1>,
|
||||||
|
<&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
|
||||||
resets = <&softreset STIH407_MMC1_SOFTRESET>;
|
resets = <&softreset STIH407_MMC1_SOFTRESET>;
|
||||||
bus-width = <4>;
|
bus-width = <4>;
|
||||||
};
|
};
|
||||||
|
|
|
@ -41,7 +41,8 @@
|
||||||
compatible = "st,st-ohci-300x";
|
compatible = "st,st-ohci-300x";
|
||||||
reg = <0x9a03c00 0x100>;
|
reg = <0x9a03c00 0x100>;
|
||||||
interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
|
interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
|
||||||
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
|
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
|
||||||
|
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
|
||||||
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
|
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
|
||||||
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
|
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
|
||||||
reset-names = "power", "softreset";
|
reset-names = "power", "softreset";
|
||||||
|
@ -57,7 +58,8 @@
|
||||||
interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
|
interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&pinctrl_usb0>;
|
pinctrl-0 = <&pinctrl_usb0>;
|
||||||
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
|
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
|
||||||
|
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
|
||||||
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
|
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
|
||||||
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
|
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
|
||||||
reset-names = "power", "softreset";
|
reset-names = "power", "softreset";
|
||||||
|
@ -71,7 +73,8 @@
|
||||||
compatible = "st,st-ohci-300x";
|
compatible = "st,st-ohci-300x";
|
||||||
reg = <0x9a83c00 0x100>;
|
reg = <0x9a83c00 0x100>;
|
||||||
interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
|
interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
|
||||||
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
|
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
|
||||||
|
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
|
||||||
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
|
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
|
||||||
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
|
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
|
||||||
reset-names = "power", "softreset";
|
reset-names = "power", "softreset";
|
||||||
|
@ -87,7 +90,8 @@
|
||||||
interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
|
interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&pinctrl_usb1>;
|
pinctrl-0 = <&pinctrl_usb1>;
|
||||||
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
|
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
|
||||||
|
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
|
||||||
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
|
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
|
||||||
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
|
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
|
||||||
reset-names = "power", "softreset";
|
reset-names = "power", "softreset";
|
||||||
|
|
|
@ -140,7 +140,7 @@ static struct locomo_dev_info locomo_devices[] = {
|
||||||
|
|
||||||
static void locomo_handler(struct irq_desc *desc)
|
static void locomo_handler(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
struct locomo *lchip = irq_desc_get_chip_data(desc);
|
struct locomo *lchip = irq_desc_get_handler_data(desc);
|
||||||
int req, i;
|
int req, i;
|
||||||
|
|
||||||
/* Acknowledge the parent IRQ */
|
/* Acknowledge the parent IRQ */
|
||||||
|
@ -200,8 +200,7 @@ static void locomo_setup_irq(struct locomo *lchip)
|
||||||
* Install handler for IRQ_LOCOMO_HW.
|
* Install handler for IRQ_LOCOMO_HW.
|
||||||
*/
|
*/
|
||||||
irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
|
irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
|
||||||
irq_set_chip_data(lchip->irq, lchip);
|
irq_set_chained_handler_and_data(lchip->irq, locomo_handler, lchip);
|
||||||
irq_set_chained_handler(lchip->irq, locomo_handler);
|
|
||||||
|
|
||||||
/* Install handlers for IRQ_LOCOMO_* */
|
/* Install handlers for IRQ_LOCOMO_* */
|
||||||
for ( ; irq <= lchip->irq_base + 3; irq++) {
|
for ( ; irq <= lchip->irq_base + 3; irq++) {
|
||||||
|
|
|
@ -472,8 +472,8 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
|
||||||
* specifies that S0ReadyInt and S1ReadyInt should be '1'.
|
* specifies that S0ReadyInt and S1ReadyInt should be '1'.
|
||||||
*/
|
*/
|
||||||
sa1111_writel(0, irqbase + SA1111_INTPOL0);
|
sa1111_writel(0, irqbase + SA1111_INTPOL0);
|
||||||
sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) |
|
sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
|
||||||
SA1111_IRQMASK_HI(IRQ_S1_READY_NINT),
|
BIT(IRQ_S1_READY_NINT & 31),
|
||||||
irqbase + SA1111_INTPOL1);
|
irqbase + SA1111_INTPOL1);
|
||||||
|
|
||||||
/* clear all IRQs */
|
/* clear all IRQs */
|
||||||
|
@ -754,7 +754,7 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
|
||||||
if (sachip->irq != NO_IRQ) {
|
if (sachip->irq != NO_IRQ) {
|
||||||
ret = sa1111_setup_irq(sachip, pd->irq_base);
|
ret = sa1111_setup_irq(sachip, pd->irq_base);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unmap;
|
goto err_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_SA1100
|
#ifdef CONFIG_ARCH_SA1100
|
||||||
|
@ -799,6 +799,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_clk:
|
||||||
|
clk_disable(sachip->clk);
|
||||||
err_unmap:
|
err_unmap:
|
||||||
iounmap(sachip->base);
|
iounmap(sachip->base);
|
||||||
err_clk_unprep:
|
err_clk_unprep:
|
||||||
|
@ -869,9 +871,9 @@ struct sa1111_save_data {
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
|
|
||||||
static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
|
static int sa1111_suspend_noirq(struct device *dev)
|
||||||
{
|
{
|
||||||
struct sa1111 *sachip = platform_get_drvdata(dev);
|
struct sa1111 *sachip = dev_get_drvdata(dev);
|
||||||
struct sa1111_save_data *save;
|
struct sa1111_save_data *save;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int val;
|
unsigned int val;
|
||||||
|
@ -934,9 +936,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
|
||||||
* restored by their respective drivers, and must be called
|
* restored by their respective drivers, and must be called
|
||||||
* via LDM after this function.
|
* via LDM after this function.
|
||||||
*/
|
*/
|
||||||
static int sa1111_resume(struct platform_device *dev)
|
static int sa1111_resume_noirq(struct device *dev)
|
||||||
{
|
{
|
||||||
struct sa1111 *sachip = platform_get_drvdata(dev);
|
struct sa1111 *sachip = dev_get_drvdata(dev);
|
||||||
struct sa1111_save_data *save;
|
struct sa1111_save_data *save;
|
||||||
unsigned long flags, id;
|
unsigned long flags, id;
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
|
@ -952,7 +954,7 @@ static int sa1111_resume(struct platform_device *dev)
|
||||||
id = sa1111_readl(sachip->base + SA1111_SKID);
|
id = sa1111_readl(sachip->base + SA1111_SKID);
|
||||||
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
|
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
|
||||||
__sa1111_remove(sachip);
|
__sa1111_remove(sachip);
|
||||||
platform_set_drvdata(dev, NULL);
|
dev_set_drvdata(dev, NULL);
|
||||||
kfree(save);
|
kfree(save);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1003,8 +1005,8 @@ static int sa1111_resume(struct platform_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define sa1111_suspend NULL
|
#define sa1111_suspend_noirq NULL
|
||||||
#define sa1111_resume NULL
|
#define sa1111_resume_noirq NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int sa1111_probe(struct platform_device *pdev)
|
static int sa1111_probe(struct platform_device *pdev)
|
||||||
|
@ -1017,7 +1019,7 @@ static int sa1111_probe(struct platform_device *pdev)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
irq = platform_get_irq(pdev, 0);
|
irq = platform_get_irq(pdev, 0);
|
||||||
if (irq < 0)
|
if (irq < 0)
|
||||||
return -ENXIO;
|
return irq;
|
||||||
|
|
||||||
return __sa1111_probe(&pdev->dev, mem, irq);
|
return __sa1111_probe(&pdev->dev, mem, irq);
|
||||||
}
|
}
|
||||||
|
@ -1038,6 +1040,11 @@ static int sa1111_remove(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct dev_pm_ops sa1111_pm_ops = {
|
||||||
|
.suspend_noirq = sa1111_suspend_noirq,
|
||||||
|
.resume_noirq = sa1111_resume_noirq,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not sure if this should be on the system bus or not yet.
|
* Not sure if this should be on the system bus or not yet.
|
||||||
* We really want some way to register a system device at
|
* We really want some way to register a system device at
|
||||||
|
@ -1050,10 +1057,9 @@ static int sa1111_remove(struct platform_device *pdev)
|
||||||
static struct platform_driver sa1111_device_driver = {
|
static struct platform_driver sa1111_device_driver = {
|
||||||
.probe = sa1111_probe,
|
.probe = sa1111_probe,
|
||||||
.remove = sa1111_remove,
|
.remove = sa1111_remove,
|
||||||
.suspend = sa1111_suspend,
|
|
||||||
.resume = sa1111_resume,
|
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "sa1111",
|
.name = "sa1111",
|
||||||
|
.pm = &sa1111_pm_ops,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -161,6 +161,7 @@ CONFIG_USB_MON=y
|
||||||
CONFIG_USB_XHCI_HCD=y
|
CONFIG_USB_XHCI_HCD=y
|
||||||
CONFIG_USB_STORAGE=y
|
CONFIG_USB_STORAGE=y
|
||||||
CONFIG_USB_DWC3=y
|
CONFIG_USB_DWC3=y
|
||||||
|
CONFIG_NOP_USB_XCEIV=y
|
||||||
CONFIG_KEYSTONE_USB_PHY=y
|
CONFIG_KEYSTONE_USB_PHY=y
|
||||||
CONFIG_NEW_LEDS=y
|
CONFIG_NEW_LEDS=y
|
||||||
CONFIG_LEDS_CLASS=y
|
CONFIG_LEDS_CLASS=y
|
||||||
|
|
|
@ -781,7 +781,7 @@ CONFIG_MXS_DMA=y
|
||||||
CONFIG_DMA_BCM2835=y
|
CONFIG_DMA_BCM2835=y
|
||||||
CONFIG_DMA_OMAP=y
|
CONFIG_DMA_OMAP=y
|
||||||
CONFIG_QCOM_BAM_DMA=y
|
CONFIG_QCOM_BAM_DMA=y
|
||||||
CONFIG_XILINX_VDMA=y
|
CONFIG_XILINX_DMA=y
|
||||||
CONFIG_DMA_SUN6I=y
|
CONFIG_DMA_SUN6I=y
|
||||||
CONFIG_STAGING=y
|
CONFIG_STAGING=y
|
||||||
CONFIG_SENSORS_ISL29018=y
|
CONFIG_SENSORS_ISL29018=y
|
||||||
|
|
|
@ -284,7 +284,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||||
err = blkcipher_walk_done(desc, &walk,
|
err = blkcipher_walk_done(desc, &walk,
|
||||||
walk.nbytes % AES_BLOCK_SIZE);
|
walk.nbytes % AES_BLOCK_SIZE);
|
||||||
}
|
}
|
||||||
if (nbytes) {
|
if (walk.nbytes % AES_BLOCK_SIZE) {
|
||||||
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||||
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||||
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
||||||
|
|
|
@ -47,6 +47,7 @@
|
||||||
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
|
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
|
||||||
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
|
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
|
||||||
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
|
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
|
||||||
|
#define PMD_SECT_CACHE_MASK (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
|
||||||
#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
|
#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -62,6 +62,7 @@
|
||||||
#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
|
#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
|
||||||
#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
|
#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
|
||||||
#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
|
#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
|
||||||
|
#define PMD_SECT_CACHE_MASK (_AT(pmdval_t, 7) << 2)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* + Level 3 descriptor (PTE)
|
* + Level 3 descriptor (PTE)
|
||||||
|
|
|
@ -255,6 +255,12 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clear the OF_POPULATED flag set in of_irq_init so that
|
||||||
|
* later the Exynos PMU platform device won't be skipped.
|
||||||
|
*/
|
||||||
|
of_node_clear_flag(node, OF_POPULATED);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,6 +137,18 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
|
||||||
// no D+ pullup; lubbock can't connect/disconnect in software
|
// no D+ pullup; lubbock can't connect/disconnect in software
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void lubbock_init_pcmcia(void)
|
||||||
|
{
|
||||||
|
struct clk *clk;
|
||||||
|
|
||||||
|
/* Add an alias for the SA1111 PCMCIA clock */
|
||||||
|
clk = clk_get_sys("pxa2xx-pcmcia", NULL);
|
||||||
|
if (!IS_ERR(clk)) {
|
||||||
|
clkdev_create(clk, NULL, "1800");
|
||||||
|
clk_put(clk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static struct resource sa1111_resources[] = {
|
static struct resource sa1111_resources[] = {
|
||||||
[0] = {
|
[0] = {
|
||||||
.start = 0x10000000,
|
.start = 0x10000000,
|
||||||
|
@ -467,6 +479,8 @@ static void __init lubbock_init(void)
|
||||||
pxa_set_btuart_info(NULL);
|
pxa_set_btuart_info(NULL);
|
||||||
pxa_set_stuart_info(NULL);
|
pxa_set_stuart_info(NULL);
|
||||||
|
|
||||||
|
lubbock_init_pcmcia();
|
||||||
|
|
||||||
clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
|
clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
|
||||||
pxa_set_udc_info(&udc_info);
|
pxa_set_udc_info(&udc_info);
|
||||||
pxa_set_fb_info(NULL, &sharp_lm8v31);
|
pxa_set_fb_info(NULL, &sharp_lm8v31);
|
||||||
|
|
|
@ -41,40 +41,27 @@
|
||||||
|
|
||||||
#define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */
|
#define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */
|
||||||
|
|
||||||
|
/* start of DA9210 System Control and Event Registers */
|
||||||
|
#define DA9210_REG_MASK_A 0x54
|
||||||
|
|
||||||
static void __iomem *irqc;
|
static void __iomem *irqc;
|
||||||
|
|
||||||
static const u8 da9063_mask_regs[] = {
|
/* first byte sets the memory pointer, following are consecutive reg values */
|
||||||
DA9063_REG_IRQ_MASK_A,
|
static u8 da9063_irq_clr[] = { DA9063_REG_IRQ_MASK_A, 0xff, 0xff, 0xff, 0xff };
|
||||||
DA9063_REG_IRQ_MASK_B,
|
static u8 da9210_irq_clr[] = { DA9210_REG_MASK_A, 0xff, 0xff };
|
||||||
DA9063_REG_IRQ_MASK_C,
|
|
||||||
DA9063_REG_IRQ_MASK_D,
|
static struct i2c_msg da9xxx_msgs[2] = {
|
||||||
|
{
|
||||||
|
.addr = 0x58,
|
||||||
|
.len = ARRAY_SIZE(da9063_irq_clr),
|
||||||
|
.buf = da9063_irq_clr,
|
||||||
|
}, {
|
||||||
|
.addr = 0x68,
|
||||||
|
.len = ARRAY_SIZE(da9210_irq_clr),
|
||||||
|
.buf = da9210_irq_clr,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
/* DA9210 System Control and Event Registers */
|
|
||||||
#define DA9210_REG_MASK_A 0x54
|
|
||||||
#define DA9210_REG_MASK_B 0x55
|
|
||||||
|
|
||||||
static const u8 da9210_mask_regs[] = {
|
|
||||||
DA9210_REG_MASK_A,
|
|
||||||
DA9210_REG_MASK_B,
|
|
||||||
};
|
|
||||||
|
|
||||||
static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
|
|
||||||
unsigned int nregs)
|
|
||||||
{
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
dev_info(&client->dev, "Masking %s interrupt sources\n", client->name);
|
|
||||||
|
|
||||||
for (i = 0; i < nregs; i++) {
|
|
||||||
int error = i2c_smbus_write_byte_data(client, regs[i], ~0);
|
|
||||||
if (error) {
|
|
||||||
dev_err(&client->dev, "i2c error %d\n", error);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int regulator_quirk_notify(struct notifier_block *nb,
|
static int regulator_quirk_notify(struct notifier_block *nb,
|
||||||
unsigned long action, void *data)
|
unsigned long action, void *data)
|
||||||
{
|
{
|
||||||
|
@ -93,12 +80,15 @@ static int regulator_quirk_notify(struct notifier_block *nb,
|
||||||
client = to_i2c_client(dev);
|
client = to_i2c_client(dev);
|
||||||
dev_dbg(dev, "Detected %s\n", client->name);
|
dev_dbg(dev, "Detected %s\n", client->name);
|
||||||
|
|
||||||
if ((client->addr == 0x58 && !strcmp(client->name, "da9063")))
|
if ((client->addr == 0x58 && !strcmp(client->name, "da9063")) ||
|
||||||
da9xxx_mask_irqs(client, da9063_mask_regs,
|
(client->addr == 0x68 && !strcmp(client->name, "da9210"))) {
|
||||||
ARRAY_SIZE(da9063_mask_regs));
|
int ret;
|
||||||
else if (client->addr == 0x68 && !strcmp(client->name, "da9210"))
|
|
||||||
da9xxx_mask_irqs(client, da9210_mask_regs,
|
dev_info(&client->dev, "clearing da9063/da9210 interrupts\n");
|
||||||
ARRAY_SIZE(da9210_mask_regs));
|
ret = i2c_transfer(client->adapter, da9xxx_msgs, ARRAY_SIZE(da9xxx_msgs));
|
||||||
|
if (ret != ARRAY_SIZE(da9xxx_msgs))
|
||||||
|
dev_err(&client->dev, "i2c error %d\n", ret);
|
||||||
|
}
|
||||||
|
|
||||||
mon = ioread32(irqc + IRQC_MONITOR);
|
mon = ioread32(irqc + IRQC_MONITOR);
|
||||||
if (mon & REGULATOR_IRQ_MASK)
|
if (mon & REGULATOR_IRQ_MASK)
|
||||||
|
|
|
@ -137,7 +137,7 @@ void __init init_default_cache_policy(unsigned long pmd)
|
||||||
|
|
||||||
initial_pmd_value = pmd;
|
initial_pmd_value = pmd;
|
||||||
|
|
||||||
pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
|
pmd &= PMD_SECT_CACHE_MASK;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
|
for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
|
||||||
if (cache_policies[i].pmd == pmd) {
|
if (cache_policies[i].pmd == pmd) {
|
||||||
|
|
|
@ -255,10 +255,10 @@
|
||||||
/* Local timer */
|
/* Local timer */
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <1 13 0xf01>,
|
interrupts = <1 13 0xf08>,
|
||||||
<1 14 0xf01>,
|
<1 14 0xf08>,
|
||||||
<1 11 0xf01>,
|
<1 11 0xf08>,
|
||||||
<1 10 0xf01>;
|
<1 10 0xf08>;
|
||||||
};
|
};
|
||||||
|
|
||||||
timer0: timer0@ffc03000 {
|
timer0: timer0@ffc03000 {
|
||||||
|
|
|
@ -102,13 +102,13 @@
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <GIC_PPI 13
|
interrupts = <GIC_PPI 13
|
||||||
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
|
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
|
||||||
<GIC_PPI 14
|
<GIC_PPI 14
|
||||||
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
|
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
|
||||||
<GIC_PPI 11
|
<GIC_PPI 11
|
||||||
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
|
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
|
||||||
<GIC_PPI 10
|
<GIC_PPI 10
|
||||||
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>;
|
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
|
||||||
};
|
};
|
||||||
|
|
||||||
xtal: xtal-clk {
|
xtal: xtal-clk {
|
||||||
|
|
|
@ -110,10 +110,10 @@
|
||||||
|
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <1 0 0xff01>, /* Secure Phys IRQ */
|
interrupts = <1 0 0xff08>, /* Secure Phys IRQ */
|
||||||
<1 13 0xff01>, /* Non-secure Phys IRQ */
|
<1 13 0xff08>, /* Non-secure Phys IRQ */
|
||||||
<1 14 0xff01>, /* Virt IRQ */
|
<1 14 0xff08>, /* Virt IRQ */
|
||||||
<1 15 0xff01>; /* Hyp IRQ */
|
<1 15 0xff08>; /* Hyp IRQ */
|
||||||
clock-frequency = <50000000>;
|
clock-frequency = <50000000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
1
arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi
Symbolic link
1
arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../../../arm/boot/dts/bcm2835-rpi.dtsi
|
|
@ -1,7 +1,7 @@
|
||||||
/dts-v1/;
|
/dts-v1/;
|
||||||
#include "bcm2837.dtsi"
|
#include "bcm2837.dtsi"
|
||||||
#include "../../../../arm/boot/dts/bcm2835-rpi.dtsi"
|
#include "bcm2835-rpi.dtsi"
|
||||||
#include "../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi"
|
#include "bcm283x-rpi-smsc9514.dtsi"
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
|
compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#include "../../../../arm/boot/dts/bcm283x.dtsi"
|
#include "bcm283x.dtsi"
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
compatible = "brcm,bcm2836";
|
compatible = "brcm,bcm2836";
|
||||||
|
|
1
arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi
Symbolic link
1
arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
|
1
arch/arm64/boot/dts/broadcom/bcm283x.dtsi
Symbolic link
1
arch/arm64/boot/dts/broadcom/bcm283x.dtsi
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../../../arm/boot/dts/bcm283x.dtsi
|
|
@ -88,13 +88,13 @@
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
|
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
|
||||||
IRQ_TYPE_EDGE_RISING)>,
|
IRQ_TYPE_LEVEL_LOW)>,
|
||||||
<GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
|
<GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
|
||||||
IRQ_TYPE_EDGE_RISING)>,
|
IRQ_TYPE_LEVEL_LOW)>,
|
||||||
<GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
|
<GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
|
||||||
IRQ_TYPE_EDGE_RISING)>,
|
IRQ_TYPE_LEVEL_LOW)>,
|
||||||
<GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
|
<GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
|
||||||
IRQ_TYPE_EDGE_RISING)>;
|
IRQ_TYPE_LEVEL_LOW)>;
|
||||||
};
|
};
|
||||||
|
|
||||||
pmu {
|
pmu {
|
||||||
|
|
|
@ -354,10 +354,10 @@
|
||||||
|
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <1 13 0xff01>,
|
interrupts = <1 13 4>,
|
||||||
<1 14 0xff01>,
|
<1 14 4>,
|
||||||
<1 11 0xff01>,
|
<1 11 4>,
|
||||||
<1 10 0xff01>;
|
<1 10 4>;
|
||||||
};
|
};
|
||||||
|
|
||||||
pmu {
|
pmu {
|
||||||
|
|
|
@ -473,10 +473,10 @@
|
||||||
|
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <1 13 0xff01>,
|
interrupts = <1 13 0xff08>,
|
||||||
<1 14 0xff01>,
|
<1 14 0xff08>,
|
||||||
<1 11 0xff01>,
|
<1 11 0xff08>,
|
||||||
<1 10 0xff01>;
|
<1 10 0xff08>;
|
||||||
};
|
};
|
||||||
|
|
||||||
pmu_system_controller: system-controller@105c0000 {
|
pmu_system_controller: system-controller@105c0000 {
|
||||||
|
|
|
@ -119,10 +119,10 @@
|
||||||
|
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <1 13 0x1>, /* Physical Secure PPI */
|
interrupts = <1 13 0xf08>, /* Physical Secure PPI */
|
||||||
<1 14 0x1>, /* Physical Non-Secure PPI */
|
<1 14 0xf08>, /* Physical Non-Secure PPI */
|
||||||
<1 11 0x1>, /* Virtual PPI */
|
<1 11 0xf08>, /* Virtual PPI */
|
||||||
<1 10 0x1>; /* Hypervisor PPI */
|
<1 10 0xf08>; /* Hypervisor PPI */
|
||||||
};
|
};
|
||||||
|
|
||||||
pmu {
|
pmu {
|
||||||
|
|
|
@ -191,10 +191,10 @@
|
||||||
|
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <1 13 0x8>, /* Physical Secure PPI, active-low */
|
interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
|
||||||
<1 14 0x8>, /* Physical Non-Secure PPI, active-low */
|
<1 14 4>, /* Physical Non-Secure PPI, active-low */
|
||||||
<1 11 0x8>, /* Virtual PPI, active-low */
|
<1 11 4>, /* Virtual PPI, active-low */
|
||||||
<1 10 0x8>; /* Hypervisor PPI, active-low */
|
<1 10 4>; /* Hypervisor PPI, active-low */
|
||||||
};
|
};
|
||||||
|
|
||||||
pmu {
|
pmu {
|
||||||
|
|
|
@ -122,10 +122,10 @@
|
||||||
|
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
|
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
|
||||||
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
|
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
|
||||||
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
|
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
|
||||||
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
|
||||||
};
|
};
|
||||||
|
|
||||||
odmi: odmi@300000 {
|
odmi: odmi@300000 {
|
||||||
|
|
|
@ -129,10 +129,10 @@
|
||||||
|
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupts = <1 13 0xf01>,
|
interrupts = <1 13 4>,
|
||||||
<1 14 0xf01>,
|
<1 14 4>,
|
||||||
<1 11 0xf01>,
|
<1 11 4>,
|
||||||
<1 10 0xf01>;
|
<1 10 4>;
|
||||||
};
|
};
|
||||||
|
|
||||||
soc {
|
soc {
|
||||||
|
|
|
@ -65,10 +65,10 @@
|
||||||
timer {
|
timer {
|
||||||
compatible = "arm,armv8-timer";
|
compatible = "arm,armv8-timer";
|
||||||
interrupt-parent = <&gic>;
|
interrupt-parent = <&gic>;
|
||||||
interrupts = <1 13 0xf01>,
|
interrupts = <1 13 0xf08>,
|
||||||
<1 14 0xf01>,
|
<1 14 0xf08>,
|
||||||
<1 11 0xf01>,
|
<1 11 0xf08>,
|
||||||
<1 10 0xf01>;
|
<1 10 0xf08>;
|
||||||
};
|
};
|
||||||
|
|
||||||
amba_apu {
|
amba_apu {
|
||||||
|
|
|
@ -216,7 +216,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||||
err = blkcipher_walk_done(desc, &walk,
|
err = blkcipher_walk_done(desc, &walk,
|
||||||
walk.nbytes % AES_BLOCK_SIZE);
|
walk.nbytes % AES_BLOCK_SIZE);
|
||||||
}
|
}
|
||||||
if (nbytes) {
|
if (walk.nbytes % AES_BLOCK_SIZE) {
|
||||||
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||||
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||||
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
||||||
|
|
|
@ -23,8 +23,8 @@
|
||||||
*/
|
*/
|
||||||
.text
|
.text
|
||||||
.align 1
|
.align 1
|
||||||
.global copy_from_user
|
.global ___copy_from_user
|
||||||
.type copy_from_user, @function
|
.type ___copy_from_user, @function
|
||||||
___copy_from_user:
|
___copy_from_user:
|
||||||
branch_if_kernel r8, __copy_user
|
branch_if_kernel r8, __copy_user
|
||||||
ret_if_privileged r8, r11, r10, r10
|
ret_if_privileged r8, r11, r10, r10
|
||||||
|
|
|
@ -276,7 +276,7 @@ copy_from_user(void *to, const void *from, unsigned long n)
|
||||||
unsigned long res = n;
|
unsigned long res = n;
|
||||||
|
|
||||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||||
n = __copy_tofrom_user(to, from, n);
|
res = __copy_tofrom_user(to, from, n);
|
||||||
if (unlikely(res))
|
if (unlikely(res))
|
||||||
memset(to + (n - res), 0, res);
|
memset(to + (n - res), 0, res);
|
||||||
return res;
|
return res;
|
||||||
|
|
|
@ -15,7 +15,7 @@ static inline bool early_cpu_has_feature(unsigned long feature)
|
||||||
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
|
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
|
|
||||||
#define NUM_CPU_FTR_KEYS 64
|
#define NUM_CPU_FTR_KEYS BITS_PER_LONG
|
||||||
|
|
||||||
extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
|
extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
|
||||||
|
|
||||||
|
|
|
@ -411,7 +411,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||||
*
|
*
|
||||||
* r13 - PACA
|
* r13 - PACA
|
||||||
* cr3 - gt if waking up with partial/complete hypervisor state loss
|
* cr3 - gt if waking up with partial/complete hypervisor state loss
|
||||||
* cr4 - eq if waking up from complete hypervisor state loss.
|
* cr4 - gt or eq if waking up from complete hypervisor state loss.
|
||||||
*/
|
*/
|
||||||
_GLOBAL(pnv_wakeup_tb_loss)
|
_GLOBAL(pnv_wakeup_tb_loss)
|
||||||
ld r1,PACAR1(r13)
|
ld r1,PACAR1(r13)
|
||||||
|
@ -453,7 +453,7 @@ lwarx_loop2:
|
||||||
* At this stage
|
* At this stage
|
||||||
* cr2 - eq if first thread to wakeup in core
|
* cr2 - eq if first thread to wakeup in core
|
||||||
* cr3- gt if waking up with partial/complete hypervisor state loss
|
* cr3- gt if waking up with partial/complete hypervisor state loss
|
||||||
* cr4 - eq if waking up from complete hypervisor state loss.
|
* cr4 - gt or eq if waking up from complete hypervisor state loss.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
|
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
|
||||||
|
@ -481,7 +481,7 @@ first_thread_in_subcore:
|
||||||
* If waking up from sleep, subcore state is not lost. Hence
|
* If waking up from sleep, subcore state is not lost. Hence
|
||||||
* skip subcore state restore
|
* skip subcore state restore
|
||||||
*/
|
*/
|
||||||
bne cr4,subcore_state_restored
|
blt cr4,subcore_state_restored
|
||||||
|
|
||||||
/* Restore per-subcore state */
|
/* Restore per-subcore state */
|
||||||
ld r4,_SDR1(r1)
|
ld r4,_SDR1(r1)
|
||||||
|
@ -526,7 +526,7 @@ timebase_resync:
|
||||||
* If waking up from sleep, per core state is not lost, skip to
|
* If waking up from sleep, per core state is not lost, skip to
|
||||||
* clear_lock.
|
* clear_lock.
|
||||||
*/
|
*/
|
||||||
bne cr4,clear_lock
|
blt cr4,clear_lock
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First thread in the core to wake up and its waking up with
|
* First thread in the core to wake up and its waking up with
|
||||||
|
@ -557,7 +557,7 @@ common_exit:
|
||||||
* If waking up from sleep, hypervisor state is not lost. Hence
|
* If waking up from sleep, hypervisor state is not lost. Hence
|
||||||
* skip hypervisor state restore.
|
* skip hypervisor state restore.
|
||||||
*/
|
*/
|
||||||
bne cr4,hypervisor_state_restored
|
blt cr4,hypervisor_state_restored
|
||||||
|
|
||||||
/* Waking up from winkle */
|
/* Waking up from winkle */
|
||||||
|
|
||||||
|
|
|
@ -2217,7 +2217,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
|
||||||
|
|
||||||
pnv_pci_link_table_and_group(phb->hose->node, num,
|
pnv_pci_link_table_and_group(phb->hose->node, num,
|
||||||
tbl, &pe->table_group);
|
tbl, &pe->table_group);
|
||||||
pnv_pci_phb3_tce_invalidate_pe(pe);
|
pnv_pci_ioda2_tce_invalidate_pe(pe);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2355,7 +2355,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
|
||||||
if (ret)
|
if (ret)
|
||||||
pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
|
pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
|
||||||
else
|
else
|
||||||
pnv_pci_phb3_tce_invalidate_pe(pe);
|
pnv_pci_ioda2_tce_invalidate_pe(pe);
|
||||||
|
|
||||||
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
|
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
|
||||||
|
|
||||||
|
@ -3426,7 +3426,17 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pnv_ioda_free_pe(pe);
|
/*
|
||||||
|
* The PE for root bus can be removed because of hotplug in EEH
|
||||||
|
* recovery for fenced PHB error. We need to mark the PE dead so
|
||||||
|
* that it can be populated again in PCI hot add path. The PE
|
||||||
|
* shouldn't be destroyed as it's the global reserved resource.
|
||||||
|
*/
|
||||||
|
if (phb->ioda.root_pe_populated &&
|
||||||
|
phb->ioda.root_pe_idx == pe->pe_number)
|
||||||
|
phb->ioda.root_pe_populated = false;
|
||||||
|
else
|
||||||
|
pnv_ioda_free_pe(pe);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pnv_pci_release_device(struct pci_dev *pdev)
|
static void pnv_pci_release_device(struct pci_dev *pdev)
|
||||||
|
@ -3442,7 +3452,17 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
|
||||||
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PCI hotplug can happen as part of EEH error recovery. The @pdn
|
||||||
|
* isn't removed and added afterwards in this scenario. We should
|
||||||
|
* set the PE number in @pdn to an invalid one. Otherwise, the PE's
|
||||||
|
* device count is decreased on removing devices while failing to
|
||||||
|
* be increased on adding devices. It leads to unbalanced PE's device
|
||||||
|
* count and eventually make normal PCI hotplug path broken.
|
||||||
|
*/
|
||||||
pe = &phb->ioda.pe_array[pdn->pe_number];
|
pe = &phb->ioda.pe_array[pdn->pe_number];
|
||||||
|
pdn->pe_number = IODA_INVALID_PE;
|
||||||
|
|
||||||
WARN_ON(--pe->device_count < 0);
|
WARN_ON(--pe->device_count < 0);
|
||||||
if (pe->device_count == 0)
|
if (pe->device_count == 0)
|
||||||
pnv_ioda_release_pe(pe);
|
pnv_ioda_release_pe(pe);
|
||||||
|
|
|
@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||||
{
|
{
|
||||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
|
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
|
||||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
|
[PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
|
||||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
|
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
|
||||||
|
|
|
@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
|
||||||
* disabled state if called consecutively.
|
* disabled state if called consecutively.
|
||||||
*
|
*
|
||||||
* During consecutive calls, the same disable value will be written to related
|
* During consecutive calls, the same disable value will be written to related
|
||||||
* registers, so the PMU state remains unchanged. hw.state in
|
* registers, so the PMU state remains unchanged.
|
||||||
* intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
|
*
|
||||||
* calls.
|
* intel_bts events don't coexist with intel PMU's BTS events because of
|
||||||
|
* x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
|
||||||
|
* disabled around intel PMU's event batching etc, only inside the PMI handler.
|
||||||
*/
|
*/
|
||||||
static void __intel_pmu_disable_all(void)
|
static void __intel_pmu_disable_all(void)
|
||||||
{
|
{
|
||||||
|
@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
|
||||||
|
|
||||||
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
||||||
intel_pmu_disable_bts();
|
intel_pmu_disable_bts();
|
||||||
else
|
|
||||||
intel_bts_disable_local();
|
|
||||||
|
|
||||||
intel_pmu_pebs_disable_all();
|
intel_pmu_pebs_disable_all();
|
||||||
}
|
}
|
||||||
|
@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
intel_pmu_enable_bts(event->hw.config);
|
intel_pmu_enable_bts(event->hw.config);
|
||||||
} else
|
}
|
||||||
intel_bts_enable_local();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_enable_all(int added)
|
static void intel_pmu_enable_all(int added)
|
||||||
|
@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||||
*/
|
*/
|
||||||
if (!x86_pmu.late_ack)
|
if (!x86_pmu.late_ack)
|
||||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||||
|
intel_bts_disable_local();
|
||||||
__intel_pmu_disable_all();
|
__intel_pmu_disable_all();
|
||||||
handled = intel_pmu_drain_bts_buffer();
|
handled = intel_pmu_drain_bts_buffer();
|
||||||
handled += intel_bts_interrupt();
|
handled += intel_bts_interrupt();
|
||||||
|
@ -2172,6 +2172,7 @@ done:
|
||||||
/* Only restore PMU state when it's active. See x86_pmu_disable(). */
|
/* Only restore PMU state when it's active. See x86_pmu_disable(). */
|
||||||
if (cpuc->enabled)
|
if (cpuc->enabled)
|
||||||
__intel_pmu_enable_all(0, true);
|
__intel_pmu_enable_all(0, true);
|
||||||
|
intel_bts_enable_local();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only unmask the NMI after the overflow counters
|
* Only unmask the NMI after the overflow counters
|
||||||
|
|
|
@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
|
||||||
event->hw.addr_filters = NULL;
|
event->hw.addr_filters = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool valid_kernel_ip(unsigned long ip)
|
||||||
|
{
|
||||||
|
return virt_addr_valid(ip) && kernel_ip(ip);
|
||||||
|
}
|
||||||
|
|
||||||
static int pt_event_addr_filters_validate(struct list_head *filters)
|
static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||||
{
|
{
|
||||||
struct perf_addr_filter *filter;
|
struct perf_addr_filter *filter;
|
||||||
|
@ -1081,11 +1086,16 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||||
|
|
||||||
list_for_each_entry(filter, filters, entry) {
|
list_for_each_entry(filter, filters, entry) {
|
||||||
/* PT doesn't support single address triggers */
|
/* PT doesn't support single address triggers */
|
||||||
if (!filter->range)
|
if (!filter->range || !filter->size)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (!filter->inode && !kernel_ip(filter->offset))
|
if (!filter->inode) {
|
||||||
return -EINVAL;
|
if (!valid_kernel_ip(filter->offset))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!valid_kernel_ip(filter->offset + filter->size))
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (++range > pt_cap_get(PT_CAP_num_address_ranges))
|
if (++range > pt_cap_get(PT_CAP_num_address_ranges))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
|
||||||
} else {
|
} else {
|
||||||
/* apply the offset */
|
/* apply the offset */
|
||||||
msr_a = filter->offset + offs[range];
|
msr_a = filter->offset + offs[range];
|
||||||
msr_b = filter->size + msr_a;
|
msr_b = filter->size + msr_a - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
filters->filter[range].msr_a = msr_a;
|
filters->filter[range].msr_a = msr_a;
|
||||||
|
|
|
@ -433,7 +433,11 @@ do { \
|
||||||
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
|
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
|
||||||
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
|
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
|
||||||
"2:\n" \
|
"2:\n" \
|
||||||
_ASM_EXTABLE_EX(1b, 2b) \
|
".section .fixup,\"ax\"\n" \
|
||||||
|
"3:xor"itype" %"rtype"0,%"rtype"0\n" \
|
||||||
|
" jmp 2b\n" \
|
||||||
|
".previous\n" \
|
||||||
|
_ASM_EXTABLE_EX(1b, 3b) \
|
||||||
: ltype(x) : "m" (__m(addr)))
|
: ltype(x) : "m" (__m(addr)))
|
||||||
|
|
||||||
#define __put_user_nocheck(x, ptr, size) \
|
#define __put_user_nocheck(x, ptr, size) \
|
||||||
|
|
|
@ -109,6 +109,7 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
bool new_val, old_val;
|
bool new_val, old_val;
|
||||||
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
|
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
|
||||||
|
struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
|
||||||
union kvm_ioapic_redirect_entry *e;
|
union kvm_ioapic_redirect_entry *e;
|
||||||
|
|
||||||
e = &ioapic->redirtbl[RTC_GSI];
|
e = &ioapic->redirtbl[RTC_GSI];
|
||||||
|
@ -117,16 +118,17 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
|
new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
|
||||||
old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
|
old_val = test_bit(vcpu->vcpu_id, dest_map->map);
|
||||||
|
|
||||||
if (new_val == old_val)
|
if (new_val == old_val)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (new_val) {
|
if (new_val) {
|
||||||
__set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
|
__set_bit(vcpu->vcpu_id, dest_map->map);
|
||||||
|
dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
|
||||||
ioapic->rtc_status.pending_eoi++;
|
ioapic->rtc_status.pending_eoi++;
|
||||||
} else {
|
} else {
|
||||||
__clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
|
__clear_bit(vcpu->vcpu_id, dest_map->map);
|
||||||
ioapic->rtc_status.pending_eoi--;
|
ioapic->rtc_status.pending_eoi--;
|
||||||
rtc_status_pending_eoi_check_valid(ioapic);
|
rtc_status_pending_eoi_check_valid(ioapic);
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,8 +23,8 @@
|
||||||
static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
|
static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
|
||||||
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
|
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
|
||||||
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
|
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
|
||||||
[2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES },
|
[2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
|
||||||
[3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES },
|
[3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
|
||||||
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
|
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
|
||||||
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
|
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
|
||||||
[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
|
[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
|
||||||
|
|
|
@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
||||||
return blkcipher_walk_done(desc, walk, -EINVAL);
|
return blkcipher_walk_done(desc, walk, -EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bsize = min(walk->walk_blocksize, n);
|
||||||
|
|
||||||
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
|
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
|
||||||
BLKCIPHER_WALK_DIFF);
|
BLKCIPHER_WALK_DIFF);
|
||||||
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
|
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
|
||||||
|
@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bsize = min(walk->walk_blocksize, n);
|
|
||||||
n = scatterwalk_clamp(&walk->in, n);
|
n = scatterwalk_clamp(&walk->in, n);
|
||||||
n = scatterwalk_clamp(&walk->out, n);
|
n = scatterwalk_clamp(&walk->out, n);
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
/*
|
/*
|
||||||
* echainiv: Encrypted Chain IV Generator
|
* echainiv: Encrypted Chain IV Generator
|
||||||
*
|
*
|
||||||
* This generator generates an IV based on a sequence number by xoring it
|
* This generator generates an IV based on a sequence number by multiplying
|
||||||
* with a salt and then encrypting it with the same key as used to encrypt
|
* it with a salt and then encrypting it with the same key as used to encrypt
|
||||||
* the plain text. This algorithm requires that the block size be equal
|
* the plain text. This algorithm requires that the block size be equal
|
||||||
* to the IV size. It is mainly useful for CBC.
|
* to the IV size. It is mainly useful for CBC.
|
||||||
*
|
*
|
||||||
|
@ -24,81 +24,17 @@
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
|
||||||
#define MAX_IV_SIZE 16
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
|
|
||||||
|
|
||||||
/* We don't care if we get preempted and read/write IVs from the next CPU. */
|
|
||||||
static void echainiv_read_iv(u8 *dst, unsigned size)
|
|
||||||
{
|
|
||||||
u32 *a = (u32 *)dst;
|
|
||||||
u32 __percpu *b = echainiv_iv;
|
|
||||||
|
|
||||||
for (; size >= 4; size -= 4) {
|
|
||||||
*a++ = this_cpu_read(*b);
|
|
||||||
b++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void echainiv_write_iv(const u8 *src, unsigned size)
|
|
||||||
{
|
|
||||||
const u32 *a = (const u32 *)src;
|
|
||||||
u32 __percpu *b = echainiv_iv;
|
|
||||||
|
|
||||||
for (; size >= 4; size -= 4) {
|
|
||||||
this_cpu_write(*b, *a);
|
|
||||||
a++;
|
|
||||||
b++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void echainiv_encrypt_complete2(struct aead_request *req, int err)
|
|
||||||
{
|
|
||||||
struct aead_request *subreq = aead_request_ctx(req);
|
|
||||||
struct crypto_aead *geniv;
|
|
||||||
unsigned int ivsize;
|
|
||||||
|
|
||||||
if (err == -EINPROGRESS)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
geniv = crypto_aead_reqtfm(req);
|
|
||||||
ivsize = crypto_aead_ivsize(geniv);
|
|
||||||
|
|
||||||
echainiv_write_iv(subreq->iv, ivsize);
|
|
||||||
|
|
||||||
if (req->iv != subreq->iv)
|
|
||||||
memcpy(req->iv, subreq->iv, ivsize);
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (req->iv != subreq->iv)
|
|
||||||
kzfree(subreq->iv);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void echainiv_encrypt_complete(struct crypto_async_request *base,
|
|
||||||
int err)
|
|
||||||
{
|
|
||||||
struct aead_request *req = base->data;
|
|
||||||
|
|
||||||
echainiv_encrypt_complete2(req, err);
|
|
||||||
aead_request_complete(req, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int echainiv_encrypt(struct aead_request *req)
|
static int echainiv_encrypt(struct aead_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||||
struct aead_request *subreq = aead_request_ctx(req);
|
struct aead_request *subreq = aead_request_ctx(req);
|
||||||
crypto_completion_t compl;
|
__be64 nseqno;
|
||||||
void *data;
|
u64 seqno;
|
||||||
u8 *info;
|
u8 *info;
|
||||||
unsigned int ivsize = crypto_aead_ivsize(geniv);
|
unsigned int ivsize = crypto_aead_ivsize(geniv);
|
||||||
int err;
|
int err;
|
||||||
|
@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
|
||||||
|
|
||||||
aead_request_set_tfm(subreq, ctx->child);
|
aead_request_set_tfm(subreq, ctx->child);
|
||||||
|
|
||||||
compl = echainiv_encrypt_complete;
|
|
||||||
data = req;
|
|
||||||
info = req->iv;
|
info = req->iv;
|
||||||
|
|
||||||
if (req->src != req->dst) {
|
if (req->src != req->dst) {
|
||||||
|
@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!IS_ALIGNED((unsigned long)info,
|
aead_request_set_callback(subreq, req->base.flags,
|
||||||
crypto_aead_alignmask(geniv) + 1))) {
|
req->base.complete, req->base.data);
|
||||||
info = kmalloc(ivsize, req->base.flags &
|
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
|
|
||||||
GFP_ATOMIC);
|
|
||||||
if (!info)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
memcpy(info, req->iv, ivsize);
|
|
||||||
}
|
|
||||||
|
|
||||||
aead_request_set_callback(subreq, req->base.flags, compl, data);
|
|
||||||
aead_request_set_crypt(subreq, req->dst, req->dst,
|
aead_request_set_crypt(subreq, req->dst, req->dst,
|
||||||
req->cryptlen, info);
|
req->cryptlen, info);
|
||||||
aead_request_set_ad(subreq, req->assoclen);
|
aead_request_set_ad(subreq, req->assoclen);
|
||||||
|
|
||||||
crypto_xor(info, ctx->salt, ivsize);
|
memcpy(&nseqno, info + ivsize - 8, 8);
|
||||||
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
|
seqno = be64_to_cpu(nseqno);
|
||||||
echainiv_read_iv(info, ivsize);
|
memset(info, 0, ivsize);
|
||||||
|
|
||||||
err = crypto_aead_encrypt(subreq);
|
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
|
||||||
echainiv_encrypt_complete2(req, err);
|
|
||||||
return err;
|
do {
|
||||||
|
u64 a;
|
||||||
|
|
||||||
|
memcpy(&a, ctx->salt + ivsize - 8, 8);
|
||||||
|
|
||||||
|
a |= 1;
|
||||||
|
a *= seqno;
|
||||||
|
|
||||||
|
memcpy(info + ivsize - 8, &a, 8);
|
||||||
|
} while ((ivsize -= 8));
|
||||||
|
|
||||||
|
return crypto_aead_encrypt(subreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int echainiv_decrypt(struct aead_request *req)
|
static int echainiv_decrypt(struct aead_request *req)
|
||||||
|
@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
|
||||||
alg = crypto_spawn_aead_alg(spawn);
|
alg = crypto_spawn_aead_alg(spawn);
|
||||||
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
if (inst->alg.ivsize & (sizeof(u32) - 1) ||
|
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
|
||||||
inst->alg.ivsize > MAX_IV_SIZE)
|
|
||||||
goto free_inst;
|
goto free_inst;
|
||||||
|
|
||||||
inst->alg.encrypt = echainiv_encrypt;
|
inst->alg.encrypt = echainiv_encrypt;
|
||||||
|
@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
|
||||||
inst->alg.init = aead_init_geniv;
|
inst->alg.init = aead_init_geniv;
|
||||||
inst->alg.exit = aead_exit_geniv;
|
inst->alg.exit = aead_exit_geniv;
|
||||||
|
|
||||||
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
|
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
|
||||||
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
|
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
|
||||||
|
|
||||||
|
|
|
@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||||
struct device *parent = NULL;
|
struct device *parent = NULL;
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
trace_rpm_suspend(dev, rpmflags);
|
trace_rpm_suspend_rcuidle(dev, rpmflags);
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
retval = rpm_check_suspend_allowed(dev);
|
retval = rpm_check_suspend_allowed(dev);
|
||||||
|
@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
trace_rpm_return_int(dev, _THIS_IP_, retval);
|
trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
|
|
|
@ -387,7 +387,7 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
|
||||||
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
|
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
|
||||||
}
|
}
|
||||||
|
|
||||||
void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
|
static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct atmel_hlcdc_crtc_state *state;
|
struct atmel_hlcdc_crtc_state *state;
|
||||||
|
|
||||||
|
|
|
@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
|
||||||
u32 *coeff_tab = heo_upscaling_ycoef;
|
u32 *coeff_tab = heo_upscaling_ycoef;
|
||||||
u32 max_memsize;
|
u32 max_memsize;
|
||||||
|
|
||||||
if (state->crtc_w < state->src_w)
|
if (state->crtc_h < state->src_h)
|
||||||
coeff_tab = heo_downscaling_ycoef;
|
coeff_tab = heo_downscaling_ycoef;
|
||||||
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
|
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
|
||||||
atmel_hlcdc_layer_update_cfg(&plane->layer,
|
atmel_hlcdc_layer_update_cfg(&plane->layer,
|
||||||
33 + i,
|
33 + i,
|
||||||
0xffffffff,
|
0xffffffff,
|
||||||
coeff_tab[i]);
|
coeff_tab[i]);
|
||||||
factor = ((8 * 256 * state->src_w) - (256 * 4)) /
|
factor = ((8 * 256 * state->src_h) - (256 * 4)) /
|
||||||
state->crtc_w;
|
state->crtc_h;
|
||||||
factor++;
|
factor++;
|
||||||
max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
|
max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
|
||||||
2048;
|
2048;
|
||||||
if (max_memsize > state->src_w)
|
if (max_memsize > state->src_h)
|
||||||
factor--;
|
factor--;
|
||||||
factor_reg |= (factor << 16) | 0x80000000;
|
factor_reg |= (factor << 16) | 0x80000000;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1015,6 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
|
||||||
typedef struct drm_mode_fb_cmd232 {
|
typedef struct drm_mode_fb_cmd232 {
|
||||||
u32 fb_id;
|
u32 fb_id;
|
||||||
u32 width;
|
u32 width;
|
||||||
|
@ -1071,6 +1072,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
|
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
|
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
|
||||||
|
@ -1104,7 +1106,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
|
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
|
||||||
#endif
|
#endif
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
|
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
|
||||||
|
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
|
[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -55,11 +55,11 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
|
||||||
flags = exynos_gem->flags;
|
flags = exynos_gem->flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* without iommu support, not support physically non-continuous memory
|
* Physically non-contiguous memory type for framebuffer is not
|
||||||
* for framebuffer.
|
* supported without IOMMU.
|
||||||
*/
|
*/
|
||||||
if (IS_NONCONTIG_BUFFER(flags)) {
|
if (IS_NONCONTIG_BUFFER(flags)) {
|
||||||
DRM_ERROR("cannot use this gem memory type for fb.\n");
|
DRM_ERROR("Non-contiguous GEM memory is not supported.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1753,32 +1753,6 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
|
||||||
static int fimc_suspend(struct device *dev)
|
|
||||||
{
|
|
||||||
struct fimc_context *ctx = get_fimc_context(dev);
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("id[%d]\n", ctx->id);
|
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return fimc_clk_ctrl(ctx, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int fimc_resume(struct device *dev)
|
|
||||||
{
|
|
||||||
struct fimc_context *ctx = get_fimc_context(dev);
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("id[%d]\n", ctx->id);
|
|
||||||
|
|
||||||
if (!pm_runtime_suspended(dev))
|
|
||||||
return fimc_clk_ctrl(ctx, true);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int fimc_runtime_suspend(struct device *dev)
|
static int fimc_runtime_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct fimc_context *ctx = get_fimc_context(dev);
|
struct fimc_context *ctx = get_fimc_context(dev);
|
||||||
|
@ -1799,7 +1773,8 @@ static int fimc_runtime_resume(struct device *dev)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct dev_pm_ops fimc_pm_ops = {
|
static const struct dev_pm_ops fimc_pm_ops = {
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
|
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||||
|
pm_runtime_force_resume)
|
||||||
SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
|
SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1475,8 +1475,8 @@ static int g2d_remove(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM
|
||||||
static int g2d_suspend(struct device *dev)
|
static int g2d_runtime_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct g2d_data *g2d = dev_get_drvdata(dev);
|
struct g2d_data *g2d = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
@ -1490,25 +1490,6 @@ static int g2d_suspend(struct device *dev)
|
||||||
|
|
||||||
flush_work(&g2d->runqueue_work);
|
flush_work(&g2d->runqueue_work);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int g2d_resume(struct device *dev)
|
|
||||||
{
|
|
||||||
struct g2d_data *g2d = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
g2d->suspended = false;
|
|
||||||
g2d_exec_runqueue(g2d);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
static int g2d_runtime_suspend(struct device *dev)
|
|
||||||
{
|
|
||||||
struct g2d_data *g2d = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
clk_disable_unprepare(g2d->gate_clk);
|
clk_disable_unprepare(g2d->gate_clk);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1523,12 +1504,16 @@ static int g2d_runtime_resume(struct device *dev)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
dev_warn(dev, "failed to enable clock.\n");
|
dev_warn(dev, "failed to enable clock.\n");
|
||||||
|
|
||||||
|
g2d->suspended = false;
|
||||||
|
g2d_exec_runqueue(g2d);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct dev_pm_ops g2d_pm_ops = {
|
static const struct dev_pm_ops g2d_pm_ops = {
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
|
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||||
|
pm_runtime_force_resume)
|
||||||
SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
|
SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1760,34 +1760,7 @@ static int gsc_remove(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
static int __maybe_unused gsc_runtime_suspend(struct device *dev)
|
||||||
static int gsc_suspend(struct device *dev)
|
|
||||||
{
|
|
||||||
struct gsc_context *ctx = get_gsc_context(dev);
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("id[%d]\n", ctx->id);
|
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return gsc_clk_ctrl(ctx, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gsc_resume(struct device *dev)
|
|
||||||
{
|
|
||||||
struct gsc_context *ctx = get_gsc_context(dev);
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("id[%d]\n", ctx->id);
|
|
||||||
|
|
||||||
if (!pm_runtime_suspended(dev))
|
|
||||||
return gsc_clk_ctrl(ctx, true);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
static int gsc_runtime_suspend(struct device *dev)
|
|
||||||
{
|
{
|
||||||
struct gsc_context *ctx = get_gsc_context(dev);
|
struct gsc_context *ctx = get_gsc_context(dev);
|
||||||
|
|
||||||
|
@ -1796,7 +1769,7 @@ static int gsc_runtime_suspend(struct device *dev)
|
||||||
return gsc_clk_ctrl(ctx, false);
|
return gsc_clk_ctrl(ctx, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gsc_runtime_resume(struct device *dev)
|
static int __maybe_unused gsc_runtime_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct gsc_context *ctx = get_gsc_context(dev);
|
struct gsc_context *ctx = get_gsc_context(dev);
|
||||||
|
|
||||||
|
@ -1804,10 +1777,10 @@ static int gsc_runtime_resume(struct device *dev)
|
||||||
|
|
||||||
return gsc_clk_ctrl(ctx, true);
|
return gsc_clk_ctrl(ctx, true);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static const struct dev_pm_ops gsc_pm_ops = {
|
static const struct dev_pm_ops gsc_pm_ops = {
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
|
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||||
|
pm_runtime_force_resume)
|
||||||
SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
|
SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -794,29 +794,6 @@ static int rotator_clk_crtl(struct rot_context *rot, bool enable)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
|
||||||
static int rotator_suspend(struct device *dev)
|
|
||||||
{
|
|
||||||
struct rot_context *rot = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return rotator_clk_crtl(rot, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int rotator_resume(struct device *dev)
|
|
||||||
{
|
|
||||||
struct rot_context *rot = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
if (!pm_runtime_suspended(dev))
|
|
||||||
return rotator_clk_crtl(rot, true);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int rotator_runtime_suspend(struct device *dev)
|
static int rotator_runtime_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct rot_context *rot = dev_get_drvdata(dev);
|
struct rot_context *rot = dev_get_drvdata(dev);
|
||||||
|
@ -833,7 +810,8 @@ static int rotator_runtime_resume(struct device *dev)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct dev_pm_ops rotator_pm_ops = {
|
static const struct dev_pm_ops rotator_pm_ops = {
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
|
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||||
|
pm_runtime_force_resume)
|
||||||
SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
|
SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
|
||||||
NULL)
|
NULL)
|
||||||
};
|
};
|
||||||
|
|
|
@ -1281,6 +1281,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
|
|
||||||
intel_runtime_pm_enable(dev_priv);
|
intel_runtime_pm_enable(dev_priv);
|
||||||
|
|
||||||
|
/* Everything is in place, we can now relax! */
|
||||||
|
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
|
||||||
|
driver.name, driver.major, driver.minor, driver.patchlevel,
|
||||||
|
driver.date, pci_name(pdev), dev_priv->drm.primary->index);
|
||||||
|
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -122,8 +122,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||||
has_full_48bit_ppgtt =
|
has_full_48bit_ppgtt =
|
||||||
IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
|
IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
|
||||||
|
|
||||||
if (intel_vgpu_active(dev_priv))
|
if (intel_vgpu_active(dev_priv)) {
|
||||||
has_full_ppgtt = false; /* emulation is too hard */
|
/* emulation is too hard */
|
||||||
|
has_full_ppgtt = false;
|
||||||
|
has_full_48bit_ppgtt = false;
|
||||||
|
}
|
||||||
|
|
||||||
if (!has_aliasing_ppgtt)
|
if (!has_aliasing_ppgtt)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -158,7 +161,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
|
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
|
||||||
return has_full_48bit_ppgtt ? 3 : 2;
|
return has_full_48bit_ppgtt ? 3 : 2;
|
||||||
else
|
else
|
||||||
return has_aliasing_ppgtt ? 1 : 0;
|
return has_aliasing_ppgtt ? 1 : 0;
|
||||||
|
|
|
@ -65,9 +65,6 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
|
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
|
||||||
|
|
||||||
if (!IS_HASWELL(dev_priv))
|
|
||||||
return;
|
|
||||||
|
|
||||||
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
|
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
|
||||||
if (magic != VGT_MAGIC)
|
if (magic != VGT_MAGIC)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -554,7 +554,6 @@ void intel_dvo_init(struct drm_device *dev)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_encoder_cleanup(&intel_encoder->base);
|
|
||||||
kfree(intel_dvo);
|
kfree(intel_dvo);
|
||||||
kfree(intel_connector);
|
kfree(intel_connector);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1047,6 +1047,23 @@ err_out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
|
||||||
|
{
|
||||||
|
DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct dmi_system_id intel_use_opregion_panel_type[] = {
|
||||||
|
{
|
||||||
|
.callback = intel_use_opregion_panel_type_callback,
|
||||||
|
.ident = "Conrac GmbH IX45GM2",
|
||||||
|
.matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
|
intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
|
@ -1072,6 +1089,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* So far we know that some machined must use it, others must not use it.
|
||||||
|
* There doesn't seem to be any way to determine which way to go, except
|
||||||
|
* via a quirk list :(
|
||||||
|
*/
|
||||||
|
if (!dmi_check_system(intel_use_opregion_panel_type)) {
|
||||||
|
DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
|
* FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
|
||||||
* low vswing for eDP, whereas the VBT panel type (2) gives us normal
|
* low vswing for eDP, whereas the VBT panel type (2) gives us normal
|
||||||
|
|
|
@ -7859,6 +7859,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
|
||||||
case GEN6_PCODE_ILLEGAL_CMD:
|
case GEN6_PCODE_ILLEGAL_CMD:
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||||
|
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
case GEN6_PCODE_TIMEOUT:
|
case GEN6_PCODE_TIMEOUT:
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
|
|
|
@ -255,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
|
|
||||||
uint32_t max_sleep_time = 0x1f;
|
uint32_t max_sleep_time = 0x1f;
|
||||||
/* Lately it was identified that depending on panel idle frame count
|
/*
|
||||||
* calculated at HW can be off by 1. So let's use what came
|
* Let's respect VBT in case VBT asks a higher idle_frame value.
|
||||||
* from VBT + 1.
|
* Let's use 6 as the minimum to cover all known cases including
|
||||||
* There are also other cases where panel demands at least 4
|
* the off-by-one issue that HW has in some cases. Also there are
|
||||||
* but VBT is not being set. To cover these 2 cases lets use
|
* cases where sink should be able to train
|
||||||
* at least 5 when VBT isn't set to be on the safest side.
|
* with the 5 or 6 idle patterns.
|
||||||
*/
|
*/
|
||||||
uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
|
uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
|
||||||
uint32_t val = EDP_PSR_ENABLE;
|
uint32_t val = EDP_PSR_ENABLE;
|
||||||
|
|
||||||
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
|
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
|
||||||
|
|
|
@ -144,7 +144,7 @@ static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
|
||||||
return &vc4->bo_cache.size_list[page_index];
|
return &vc4->bo_cache.size_list[page_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
void vc4_bo_cache_purge(struct drm_device *dev)
|
static void vc4_bo_cache_purge(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||||
|
|
||||||
|
|
|
@ -309,8 +309,14 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
|
||||||
* of uniforms on each side. However, this scheme is easy to
|
* of uniforms on each side. However, this scheme is easy to
|
||||||
* validate so it's all we allow for now.
|
* validate so it's all we allow for now.
|
||||||
*/
|
*/
|
||||||
|
switch (QPU_GET_FIELD(inst, QPU_SIG)) {
|
||||||
if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
|
case QPU_SIG_NONE:
|
||||||
|
case QPU_SIG_SCOREBOARD_UNLOCK:
|
||||||
|
case QPU_SIG_COLOR_LOAD:
|
||||||
|
case QPU_SIG_LOAD_TMU0:
|
||||||
|
case QPU_SIG_LOAD_TMU1:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
DRM_ERROR("uniforms address change must be "
|
DRM_ERROR("uniforms address change must be "
|
||||||
"normal math\n");
|
"normal math\n");
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -333,6 +333,8 @@ static void remove_ep_tid(struct c4iw_ep *ep)
|
||||||
|
|
||||||
spin_lock_irqsave(&ep->com.dev->lock, flags);
|
spin_lock_irqsave(&ep->com.dev->lock, flags);
|
||||||
_remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
|
_remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
|
||||||
|
if (idr_is_empty(&ep->com.dev->hwtid_idr))
|
||||||
|
wake_up(&ep->com.dev->wait);
|
||||||
spin_unlock_irqrestore(&ep->com.dev->lock, flags);
|
spin_unlock_irqrestore(&ep->com.dev->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2117,8 +2119,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
||||||
}
|
}
|
||||||
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
||||||
n, pdev, rt_tos2priority(tos));
|
n, pdev, rt_tos2priority(tos));
|
||||||
if (!ep->l2t)
|
if (!ep->l2t) {
|
||||||
|
dev_put(pdev);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
ep->mtu = pdev->mtu;
|
ep->mtu = pdev->mtu;
|
||||||
ep->tx_chan = cxgb4_port_chan(pdev);
|
ep->tx_chan = cxgb4_port_chan(pdev);
|
||||||
ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
|
ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
|
||||||
|
|
|
@ -872,9 +872,13 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
||||||
static void c4iw_dealloc(struct uld_ctx *ctx)
|
static void c4iw_dealloc(struct uld_ctx *ctx)
|
||||||
{
|
{
|
||||||
c4iw_rdev_close(&ctx->dev->rdev);
|
c4iw_rdev_close(&ctx->dev->rdev);
|
||||||
|
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
|
||||||
idr_destroy(&ctx->dev->cqidr);
|
idr_destroy(&ctx->dev->cqidr);
|
||||||
|
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
|
||||||
idr_destroy(&ctx->dev->qpidr);
|
idr_destroy(&ctx->dev->qpidr);
|
||||||
|
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
|
||||||
idr_destroy(&ctx->dev->mmidr);
|
idr_destroy(&ctx->dev->mmidr);
|
||||||
|
wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
|
||||||
idr_destroy(&ctx->dev->hwtid_idr);
|
idr_destroy(&ctx->dev->hwtid_idr);
|
||||||
idr_destroy(&ctx->dev->stid_idr);
|
idr_destroy(&ctx->dev->stid_idr);
|
||||||
idr_destroy(&ctx->dev->atid_idr);
|
idr_destroy(&ctx->dev->atid_idr);
|
||||||
|
@ -992,6 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||||
mutex_init(&devp->rdev.stats.lock);
|
mutex_init(&devp->rdev.stats.lock);
|
||||||
mutex_init(&devp->db_mutex);
|
mutex_init(&devp->db_mutex);
|
||||||
INIT_LIST_HEAD(&devp->db_fc_list);
|
INIT_LIST_HEAD(&devp->db_fc_list);
|
||||||
|
init_waitqueue_head(&devp->wait);
|
||||||
devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
|
devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
|
||||||
|
|
||||||
if (c4iw_debugfs_root) {
|
if (c4iw_debugfs_root) {
|
||||||
|
|
|
@ -263,6 +263,7 @@ struct c4iw_dev {
|
||||||
struct idr stid_idr;
|
struct idr stid_idr;
|
||||||
struct list_head db_fc_list;
|
struct list_head db_fc_list;
|
||||||
u32 avail_ird;
|
u32 avail_ird;
|
||||||
|
wait_queue_head_t wait;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
|
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
|
||||||
|
|
|
@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
|
||||||
|
|
||||||
/* Generate GUID changed event */
|
/* Generate GUID changed event */
|
||||||
if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
|
if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
|
||||||
|
if (mlx4_is_master(dev->dev)) {
|
||||||
|
union ib_gid gid;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
|
||||||
|
err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
|
||||||
|
else
|
||||||
|
gid.global.subnet_prefix =
|
||||||
|
eqe->event.port_mgmt_change.params.port_info.gid_prefix;
|
||||||
|
if (err) {
|
||||||
|
pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
|
||||||
|
port, err);
|
||||||
|
} else {
|
||||||
|
pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
|
||||||
|
port,
|
||||||
|
(u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
|
||||||
|
be64_to_cpu(gid.global.subnet_prefix));
|
||||||
|
atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
|
||||||
|
be64_to_cpu(gid.global.subnet_prefix));
|
||||||
|
}
|
||||||
|
}
|
||||||
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
|
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
|
||||||
/*if master, notify all slaves*/
|
/*if master, notify all slaves*/
|
||||||
if (mlx4_is_master(dev->dev))
|
if (mlx4_is_master(dev->dev))
|
||||||
|
@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
|
||||||
if (err)
|
if (err)
|
||||||
goto demux_err;
|
goto demux_err;
|
||||||
dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
|
dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
|
||||||
|
atomic64_set(&dev->sriov.demux[i].subnet_prefix,
|
||||||
|
be64_to_cpu(gid.global.subnet_prefix));
|
||||||
err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
|
err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
|
||||||
&dev->sriov.sqps[i]);
|
&dev->sriov.sqps[i]);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
|
||||||
bool per_port = !!(ibdev->dev->caps.flags2 &
|
bool per_port = !!(ibdev->dev->caps.flags2 &
|
||||||
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
|
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
|
||||||
|
|
||||||
|
if (mlx4_is_slave(ibdev->dev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
|
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
|
||||||
/* i == 1 means we are building port counters */
|
/* i == 1 means we are building port counters */
|
||||||
if (i && !per_port)
|
if (i && !per_port)
|
||||||
|
|
|
@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
|
||||||
if (!group->members[i])
|
if (!group->members[i])
|
||||||
leave_state |= (1 << i);
|
leave_state |= (1 << i);
|
||||||
|
|
||||||
return leave_state & (group->rec.scope_join_state & 7);
|
return leave_state & (group->rec.scope_join_state & 0xf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int join_group(struct mcast_group *group, int slave, u8 join_mask)
|
static int join_group(struct mcast_group *group, int slave, u8 join_mask)
|
||||||
|
@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
|
||||||
} else
|
} else
|
||||||
mcg_warn_group(group, "DRIVER BUG\n");
|
mcg_warn_group(group, "DRIVER BUG\n");
|
||||||
} else if (group->state == MCAST_LEAVE_SENT) {
|
} else if (group->state == MCAST_LEAVE_SENT) {
|
||||||
if (group->rec.scope_join_state & 7)
|
if (group->rec.scope_join_state & 0xf)
|
||||||
group->rec.scope_join_state &= 0xf8;
|
group->rec.scope_join_state &= 0xf0;
|
||||||
group->state = MCAST_IDLE;
|
group->state = MCAST_IDLE;
|
||||||
mutex_unlock(&group->lock);
|
mutex_unlock(&group->lock);
|
||||||
if (release_group(group, 1))
|
if (release_group(group, 1))
|
||||||
|
@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
|
||||||
static int handle_join_req(struct mcast_group *group, u8 join_mask,
|
static int handle_join_req(struct mcast_group *group, u8 join_mask,
|
||||||
struct mcast_req *req)
|
struct mcast_req *req)
|
||||||
{
|
{
|
||||||
u8 group_join_state = group->rec.scope_join_state & 7;
|
u8 group_join_state = group->rec.scope_join_state & 0xf;
|
||||||
int ref = 0;
|
int ref = 0;
|
||||||
u16 status;
|
u16 status;
|
||||||
struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
|
struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
|
||||||
|
@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
|
||||||
u8 cur_join_state;
|
u8 cur_join_state;
|
||||||
|
|
||||||
resp_join_state = ((struct ib_sa_mcmember_data *)
|
resp_join_state = ((struct ib_sa_mcmember_data *)
|
||||||
group->response_sa_mad.data)->scope_join_state & 7;
|
group->response_sa_mad.data)->scope_join_state & 0xf;
|
||||||
cur_join_state = group->rec.scope_join_state & 7;
|
cur_join_state = group->rec.scope_join_state & 0xf;
|
||||||
|
|
||||||
if (method == IB_MGMT_METHOD_GET_RESP) {
|
if (method == IB_MGMT_METHOD_GET_RESP) {
|
||||||
/* successfull join */
|
/* successfull join */
|
||||||
|
@ -710,7 +710,7 @@ process_requests:
|
||||||
req = list_first_entry(&group->pending_list, struct mcast_req,
|
req = list_first_entry(&group->pending_list, struct mcast_req,
|
||||||
group_list);
|
group_list);
|
||||||
sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
|
sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
|
||||||
req_join_state = sa_data->scope_join_state & 0x7;
|
req_join_state = sa_data->scope_join_state & 0xf;
|
||||||
|
|
||||||
/* For a leave request, we will immediately answer the VF, and
|
/* For a leave request, we will immediately answer the VF, and
|
||||||
* update our internal counters. The actual leave will be sent
|
* update our internal counters. The actual leave will be sent
|
||||||
|
|
|
@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
struct workqueue_struct *ud_wq;
|
struct workqueue_struct *ud_wq;
|
||||||
spinlock_t ud_lock;
|
spinlock_t ud_lock;
|
||||||
__be64 subnet_prefix;
|
atomic64_t subnet_prefix;
|
||||||
__be64 guid_cache[128];
|
__be64 guid_cache[128];
|
||||||
struct mlx4_ib_dev *dev;
|
struct mlx4_ib_dev *dev;
|
||||||
/* the following lock protects both mcg_table and mcg_mgid0_list */
|
/* the following lock protects both mcg_table and mcg_mgid0_list */
|
||||||
|
|
|
@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
|
||||||
sqp->ud_header.grh.flow_label =
|
sqp->ud_header.grh.flow_label =
|
||||||
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
|
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
|
||||||
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
|
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
|
||||||
if (is_eth)
|
if (is_eth) {
|
||||||
memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
|
memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
|
||||||
else {
|
} else {
|
||||||
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
|
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
|
||||||
/* When multi-function is enabled, the ib_core gid
|
/* When multi-function is enabled, the ib_core gid
|
||||||
* indexes don't necessarily match the hw ones, so
|
* indexes don't necessarily match the hw ones, so
|
||||||
* we must use our own cache */
|
* we must use our own cache
|
||||||
sqp->ud_header.grh.source_gid.global.subnet_prefix =
|
*/
|
||||||
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
|
sqp->ud_header.grh.source_gid.global.subnet_prefix =
|
||||||
subnet_prefix;
|
cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
|
||||||
sqp->ud_header.grh.source_gid.global.interface_id =
|
demux[sqp->qp.port - 1].
|
||||||
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
|
subnet_prefix)));
|
||||||
guid_cache[ah->av.ib.gid_index];
|
sqp->ud_header.grh.source_gid.global.interface_id =
|
||||||
} else
|
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
|
||||||
ib_get_cached_gid(ib_dev,
|
guid_cache[ah->av.ib.gid_index];
|
||||||
be32_to_cpu(ah->av.ib.port_pd) >> 24,
|
} else {
|
||||||
ah->av.ib.gid_index,
|
ib_get_cached_gid(ib_dev,
|
||||||
&sqp->ud_header.grh.source_gid, NULL);
|
be32_to_cpu(ah->av.ib.port_pd) >> 24,
|
||||||
|
ah->av.ib.gid_index,
|
||||||
|
&sqp->ud_header.grh.source_gid, NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
memcpy(sqp->ud_header.grh.destination_gid.raw,
|
memcpy(sqp->ud_header.grh.destination_gid.raw,
|
||||||
ah->av.ib.dgid, 16);
|
ah->av.ib.dgid, 16);
|
||||||
|
|
|
@ -288,7 +288,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
|
||||||
|
|
||||||
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
|
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
|
||||||
{
|
{
|
||||||
return !MLX5_CAP_GEN(dev->mdev, ib_virt);
|
if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
|
||||||
|
return !MLX5_CAP_GEN(dev->mdev, ib_virt);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -1428,6 +1430,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
||||||
dmac_47_16),
|
dmac_47_16),
|
||||||
ib_spec->eth.val.dst_mac);
|
ib_spec->eth.val.dst_mac);
|
||||||
|
|
||||||
|
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||||
|
smac_47_16),
|
||||||
|
ib_spec->eth.mask.src_mac);
|
||||||
|
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
||||||
|
smac_47_16),
|
||||||
|
ib_spec->eth.val.src_mac);
|
||||||
|
|
||||||
if (ib_spec->eth.mask.vlan_tag) {
|
if (ib_spec->eth.mask.vlan_tag) {
|
||||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||||
vlan_tag, 1);
|
vlan_tag, 1);
|
||||||
|
|
|
@ -294,7 +294,7 @@ static void __rvt_free_mr(struct rvt_mr *mr)
|
||||||
{
|
{
|
||||||
rvt_deinit_mregion(&mr->mr);
|
rvt_deinit_mregion(&mr->mr);
|
||||||
rvt_free_lkey(&mr->mr);
|
rvt_free_lkey(&mr->mr);
|
||||||
vfree(mr);
|
kfree(mr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -362,15 +362,34 @@ static int __init rxe_module_init(void)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = rxe_net_init();
|
err = rxe_net_ipv4_init();
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("rxe: unable to init\n");
|
pr_err("rxe: unable to init ipv4 tunnel\n");
|
||||||
rxe_cache_exit();
|
rxe_cache_exit();
|
||||||
return err;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = rxe_net_ipv6_init();
|
||||||
|
if (err) {
|
||||||
|
pr_err("rxe: unable to init ipv6 tunnel\n");
|
||||||
|
rxe_cache_exit();
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = register_netdevice_notifier(&rxe_net_notifier);
|
||||||
|
if (err) {
|
||||||
|
pr_err("rxe: Failed to rigister netdev notifier\n");
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
pr_info("rxe: loaded\n");
|
pr_info("rxe: loaded\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
exit:
|
||||||
|
rxe_release_udp_tunnel(recv_sockets.sk4);
|
||||||
|
rxe_release_udp_tunnel(recv_sockets.sk6);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit rxe_module_exit(void)
|
static void __exit rxe_module_exit(void)
|
||||||
|
|
|
@ -689,7 +689,14 @@ int rxe_completer(void *arg)
|
||||||
qp->req.need_retry = 1;
|
qp->req.need_retry = 1;
|
||||||
rxe_run_task(&qp->req.task, 1);
|
rxe_run_task(&qp->req.task, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pkt) {
|
||||||
|
rxe_drop_ref(pkt->qp);
|
||||||
|
kfree_skb(skb);
|
||||||
|
}
|
||||||
|
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
wqe->status = IB_WC_RETRY_EXC_ERR;
|
wqe->status = IB_WC_RETRY_EXC_ERR;
|
||||||
state = COMPST_ERROR;
|
state = COMPST_ERROR;
|
||||||
|
@ -716,6 +723,12 @@ int rxe_completer(void *arg)
|
||||||
case COMPST_ERROR:
|
case COMPST_ERROR:
|
||||||
do_complete(qp, wqe);
|
do_complete(qp, wqe);
|
||||||
rxe_qp_error(qp);
|
rxe_qp_error(qp);
|
||||||
|
|
||||||
|
if (pkt) {
|
||||||
|
rxe_drop_ref(pkt->qp);
|
||||||
|
kfree_skb(skb);
|
||||||
|
}
|
||||||
|
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -275,9 +275,10 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
|
||||||
return sock;
|
return sock;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rxe_release_udp_tunnel(struct socket *sk)
|
void rxe_release_udp_tunnel(struct socket *sk)
|
||||||
{
|
{
|
||||||
udp_tunnel_sock_release(sk);
|
if (sk)
|
||||||
|
udp_tunnel_sock_release(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
|
static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
|
||||||
|
@ -658,51 +659,45 @@ out:
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block rxe_net_notifier = {
|
struct notifier_block rxe_net_notifier = {
|
||||||
.notifier_call = rxe_notify,
|
.notifier_call = rxe_notify,
|
||||||
};
|
};
|
||||||
|
|
||||||
int rxe_net_init(void)
|
int rxe_net_ipv4_init(void)
|
||||||
{
|
{
|
||||||
int err;
|
spin_lock_init(&dev_list_lock);
|
||||||
|
|
||||||
|
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
|
||||||
|
htons(ROCE_V2_UDP_DPORT), false);
|
||||||
|
if (IS_ERR(recv_sockets.sk4)) {
|
||||||
|
recv_sockets.sk4 = NULL;
|
||||||
|
pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int rxe_net_ipv6_init(void)
|
||||||
|
{
|
||||||
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|
|
||||||
spin_lock_init(&dev_list_lock);
|
spin_lock_init(&dev_list_lock);
|
||||||
|
|
||||||
recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
|
recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
|
||||||
htons(ROCE_V2_UDP_DPORT), true);
|
htons(ROCE_V2_UDP_DPORT), true);
|
||||||
if (IS_ERR(recv_sockets.sk6)) {
|
if (IS_ERR(recv_sockets.sk6)) {
|
||||||
recv_sockets.sk6 = NULL;
|
recv_sockets.sk6 = NULL;
|
||||||
pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
|
pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
|
return 0;
|
||||||
htons(ROCE_V2_UDP_DPORT), false);
|
|
||||||
if (IS_ERR(recv_sockets.sk4)) {
|
|
||||||
rxe_release_udp_tunnel(recv_sockets.sk6);
|
|
||||||
recv_sockets.sk4 = NULL;
|
|
||||||
recv_sockets.sk6 = NULL;
|
|
||||||
pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = register_netdevice_notifier(&rxe_net_notifier);
|
|
||||||
if (err) {
|
|
||||||
rxe_release_udp_tunnel(recv_sockets.sk6);
|
|
||||||
rxe_release_udp_tunnel(recv_sockets.sk4);
|
|
||||||
pr_err("rxe: Failed to rigister netdev notifier\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void rxe_net_exit(void)
|
void rxe_net_exit(void)
|
||||||
{
|
{
|
||||||
if (recv_sockets.sk6)
|
rxe_release_udp_tunnel(recv_sockets.sk6);
|
||||||
rxe_release_udp_tunnel(recv_sockets.sk6);
|
rxe_release_udp_tunnel(recv_sockets.sk4);
|
||||||
|
|
||||||
if (recv_sockets.sk4)
|
|
||||||
rxe_release_udp_tunnel(recv_sockets.sk4);
|
|
||||||
|
|
||||||
unregister_netdevice_notifier(&rxe_net_notifier);
|
unregister_netdevice_notifier(&rxe_net_notifier);
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,10 +44,13 @@ struct rxe_recv_sockets {
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct rxe_recv_sockets recv_sockets;
|
extern struct rxe_recv_sockets recv_sockets;
|
||||||
|
extern struct notifier_block rxe_net_notifier;
|
||||||
|
void rxe_release_udp_tunnel(struct socket *sk);
|
||||||
|
|
||||||
struct rxe_dev *rxe_net_add(struct net_device *ndev);
|
struct rxe_dev *rxe_net_add(struct net_device *ndev);
|
||||||
|
|
||||||
int rxe_net_init(void);
|
int rxe_net_ipv4_init(void);
|
||||||
|
int rxe_net_ipv6_init(void);
|
||||||
void rxe_net_exit(void);
|
void rxe_net_exit(void);
|
||||||
|
|
||||||
#endif /* RXE_NET_H */
|
#endif /* RXE_NET_H */
|
||||||
|
|
|
@ -312,7 +312,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
|
||||||
* make a copy of the skb to post to the next qp
|
* make a copy of the skb to post to the next qp
|
||||||
*/
|
*/
|
||||||
skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
|
skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
|
||||||
skb_clone(skb, GFP_KERNEL) : NULL;
|
skb_clone(skb, GFP_ATOMIC) : NULL;
|
||||||
|
|
||||||
pkt->qp = qp;
|
pkt->qp = qp;
|
||||||
rxe_add_ref(qp);
|
rxe_add_ref(qp);
|
||||||
|
|
|
@ -511,24 +511,21 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_wqe_state(struct rxe_qp *qp,
|
static void update_wqe_state(struct rxe_qp *qp,
|
||||||
struct rxe_send_wqe *wqe,
|
struct rxe_send_wqe *wqe,
|
||||||
struct rxe_pkt_info *pkt,
|
struct rxe_pkt_info *pkt)
|
||||||
enum wqe_state *prev_state)
|
|
||||||
{
|
{
|
||||||
enum wqe_state prev_state_ = wqe->state;
|
|
||||||
|
|
||||||
if (pkt->mask & RXE_END_MASK) {
|
if (pkt->mask & RXE_END_MASK) {
|
||||||
if (qp_type(qp) == IB_QPT_RC)
|
if (qp_type(qp) == IB_QPT_RC)
|
||||||
wqe->state = wqe_state_pending;
|
wqe->state = wqe_state_pending;
|
||||||
} else {
|
} else {
|
||||||
wqe->state = wqe_state_processing;
|
wqe->state = wqe_state_processing;
|
||||||
}
|
}
|
||||||
|
|
||||||
*prev_state = prev_state_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
static void update_wqe_psn(struct rxe_qp *qp,
|
||||||
struct rxe_pkt_info *pkt, int payload)
|
struct rxe_send_wqe *wqe,
|
||||||
|
struct rxe_pkt_info *pkt,
|
||||||
|
int payload)
|
||||||
{
|
{
|
||||||
/* number of packets left to send including current one */
|
/* number of packets left to send including current one */
|
||||||
int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
|
int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
|
||||||
|
@ -546,10 +543,35 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||||
qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
|
qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
|
||||||
else
|
else
|
||||||
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
|
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void save_state(struct rxe_send_wqe *wqe,
|
||||||
|
struct rxe_qp *qp,
|
||||||
|
struct rxe_send_wqe *rollback_wqe,
|
||||||
|
struct rxe_qp *rollback_qp)
|
||||||
|
{
|
||||||
|
rollback_wqe->state = wqe->state;
|
||||||
|
rollback_wqe->first_psn = wqe->first_psn;
|
||||||
|
rollback_wqe->last_psn = wqe->last_psn;
|
||||||
|
rollback_qp->req.psn = qp->req.psn;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rollback_state(struct rxe_send_wqe *wqe,
|
||||||
|
struct rxe_qp *qp,
|
||||||
|
struct rxe_send_wqe *rollback_wqe,
|
||||||
|
struct rxe_qp *rollback_qp)
|
||||||
|
{
|
||||||
|
wqe->state = rollback_wqe->state;
|
||||||
|
wqe->first_psn = rollback_wqe->first_psn;
|
||||||
|
wqe->last_psn = rollback_wqe->last_psn;
|
||||||
|
qp->req.psn = rollback_qp->req.psn;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||||
|
struct rxe_pkt_info *pkt, int payload)
|
||||||
|
{
|
||||||
qp->req.opcode = pkt->opcode;
|
qp->req.opcode = pkt->opcode;
|
||||||
|
|
||||||
|
|
||||||
if (pkt->mask & RXE_END_MASK)
|
if (pkt->mask & RXE_END_MASK)
|
||||||
qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
|
qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
|
||||||
|
|
||||||
|
@ -571,7 +593,8 @@ int rxe_requester(void *arg)
|
||||||
int mtu;
|
int mtu;
|
||||||
int opcode;
|
int opcode;
|
||||||
int ret;
|
int ret;
|
||||||
enum wqe_state prev_state;
|
struct rxe_qp rollback_qp;
|
||||||
|
struct rxe_send_wqe rollback_wqe;
|
||||||
|
|
||||||
next_wqe:
|
next_wqe:
|
||||||
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
|
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
|
||||||
|
@ -688,13 +711,21 @@ next_wqe:
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
update_wqe_state(qp, wqe, &pkt, &prev_state);
|
/*
|
||||||
|
* To prevent a race on wqe access between requester and completer,
|
||||||
|
* wqe members state and psn need to be set before calling
|
||||||
|
* rxe_xmit_packet().
|
||||||
|
* Otherwise, completer might initiate an unjustified retry flow.
|
||||||
|
*/
|
||||||
|
save_state(wqe, qp, &rollback_wqe, &rollback_qp);
|
||||||
|
update_wqe_state(qp, wqe, &pkt);
|
||||||
|
update_wqe_psn(qp, wqe, &pkt, payload);
|
||||||
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
|
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
qp->need_req_skb = 1;
|
qp->need_req_skb = 1;
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
|
|
||||||
wqe->state = prev_state;
|
rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
|
||||||
|
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
rxe_run_task(&qp->req.task, 1);
|
rxe_run_task(&qp->req.task, 1);
|
||||||
|
|
|
@ -972,11 +972,13 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
|
||||||
free_rd_atomic_resource(qp, res);
|
free_rd_atomic_resource(qp, res);
|
||||||
rxe_advance_resp_resource(qp);
|
rxe_advance_resp_resource(qp);
|
||||||
|
|
||||||
|
memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
|
||||||
|
|
||||||
res->type = RXE_ATOMIC_MASK;
|
res->type = RXE_ATOMIC_MASK;
|
||||||
res->atomic.skb = skb;
|
res->atomic.skb = skb;
|
||||||
res->first_psn = qp->resp.psn;
|
res->first_psn = ack_pkt.psn;
|
||||||
res->last_psn = qp->resp.psn;
|
res->last_psn = ack_pkt.psn;
|
||||||
res->cur_psn = qp->resp.psn;
|
res->cur_psn = ack_pkt.psn;
|
||||||
|
|
||||||
rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
|
rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
@ -1116,8 +1118,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
|
||||||
rc = RESPST_CLEANUP;
|
rc = RESPST_CLEANUP;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
bth_set_psn(SKB_TO_PKT(skb_copy),
|
|
||||||
qp->resp.psn - 1);
|
|
||||||
/* Resend the result. */
|
/* Resend the result. */
|
||||||
rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
|
rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
|
||||||
pkt, skb_copy);
|
pkt, skb_copy);
|
||||||
|
|
|
@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (level == IPOIB_FLUSH_LIGHT) {
|
if (level == IPOIB_FLUSH_LIGHT) {
|
||||||
|
int oper_up;
|
||||||
ipoib_mark_paths_invalid(dev);
|
ipoib_mark_paths_invalid(dev);
|
||||||
|
/* Set IPoIB operation as down to prevent races between:
|
||||||
|
* the flush flow which leaves MCG and on the fly joins
|
||||||
|
* which can happen during that time. mcast restart task
|
||||||
|
* should deal with join requests we missed.
|
||||||
|
*/
|
||||||
|
oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
|
||||||
ipoib_mcast_dev_flush(dev);
|
ipoib_mcast_dev_flush(dev);
|
||||||
|
if (oper_up)
|
||||||
|
set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
|
||||||
ipoib_flush_ah(dev);
|
ipoib_flush_ah(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
|
||||||
{
|
{
|
||||||
struct irq_domain_chip_generic *dgc = d->gc;
|
struct irq_domain_chip_generic *dgc = d->gc;
|
||||||
struct irq_chip_generic *gc;
|
struct irq_chip_generic *gc;
|
||||||
|
unsigned long flags;
|
||||||
unsigned smr;
|
unsigned smr;
|
||||||
int idx;
|
int idx;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -194,11 +195,11 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
|
||||||
|
|
||||||
gc = dgc->gc[idx];
|
gc = dgc->gc[idx];
|
||||||
|
|
||||||
irq_gc_lock(gc);
|
irq_gc_lock_irqsave(gc, flags);
|
||||||
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
|
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
|
||||||
aic_common_set_priority(intspec[2], &smr);
|
aic_common_set_priority(intspec[2], &smr);
|
||||||
irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
|
irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
|
||||||
irq_gc_unlock(gc);
|
irq_gc_unlock_irqrestore(gc, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
|
||||||
unsigned int *out_type)
|
unsigned int *out_type)
|
||||||
{
|
{
|
||||||
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
|
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
|
||||||
|
unsigned long flags;
|
||||||
unsigned smr;
|
unsigned smr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -269,12 +270,12 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
irq_gc_lock(bgc);
|
irq_gc_lock_irqsave(bgc, flags);
|
||||||
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
|
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
|
||||||
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
|
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
|
||||||
aic_common_set_priority(intspec[2], &smr);
|
aic_common_set_priority(intspec[2], &smr);
|
||||||
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
|
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
|
||||||
irq_gc_unlock(bgc);
|
irq_gc_unlock_irqrestore(bgc, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1016,14 +1016,16 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
|
||||||
|
|
||||||
/* Only reconfigure if we have a different burst size */
|
/* Only reconfigure if we have a different burst size */
|
||||||
if (*bp != burst) {
|
if (*bp != burst) {
|
||||||
struct dma_slave_config cfg;
|
struct dma_slave_config cfg = {
|
||||||
|
.src_addr = host->phys_base +
|
||||||
cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
|
OMAP_MMC_REG(host, DATA),
|
||||||
cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
|
.dst_addr = host->phys_base +
|
||||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
OMAP_MMC_REG(host, DATA),
|
||||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
|
||||||
cfg.src_maxburst = burst;
|
.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
|
||||||
cfg.dst_maxburst = burst;
|
.src_maxburst = burst,
|
||||||
|
.dst_maxburst = burst,
|
||||||
|
};
|
||||||
|
|
||||||
if (dmaengine_slave_config(c, &cfg))
|
if (dmaengine_slave_config(c, &cfg))
|
||||||
goto use_pio;
|
goto use_pio;
|
||||||
|
|
|
@ -1409,11 +1409,18 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
|
||||||
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
|
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
|
||||||
struct mmc_request *req)
|
struct mmc_request *req)
|
||||||
{
|
{
|
||||||
struct dma_slave_config cfg;
|
|
||||||
struct dma_async_tx_descriptor *tx;
|
struct dma_async_tx_descriptor *tx;
|
||||||
int ret = 0, i;
|
int ret = 0, i;
|
||||||
struct mmc_data *data = req->data;
|
struct mmc_data *data = req->data;
|
||||||
struct dma_chan *chan;
|
struct dma_chan *chan;
|
||||||
|
struct dma_slave_config cfg = {
|
||||||
|
.src_addr = host->mapbase + OMAP_HSMMC_DATA,
|
||||||
|
.dst_addr = host->mapbase + OMAP_HSMMC_DATA,
|
||||||
|
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
|
||||||
|
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
|
||||||
|
.src_maxburst = data->blksz / 4,
|
||||||
|
.dst_maxburst = data->blksz / 4,
|
||||||
|
};
|
||||||
|
|
||||||
/* Sanity check: all the SG entries must be aligned by block size. */
|
/* Sanity check: all the SG entries must be aligned by block size. */
|
||||||
for (i = 0; i < data->sg_len; i++) {
|
for (i = 0; i < data->sg_len; i++) {
|
||||||
|
@ -1433,13 +1440,6 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
|
||||||
|
|
||||||
chan = omap_hsmmc_get_dma_chan(host, data);
|
chan = omap_hsmmc_get_dma_chan(host, data);
|
||||||
|
|
||||||
cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
|
|
||||||
cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
|
|
||||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
||||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
||||||
cfg.src_maxburst = data->blksz / 4;
|
|
||||||
cfg.dst_maxburst = data->blksz / 4;
|
|
||||||
|
|
||||||
ret = dmaengine_slave_config(chan, &cfg);
|
ret = dmaengine_slave_config(chan, &cfg);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
|
|
||||||
struct st_mmc_platform_data {
|
struct st_mmc_platform_data {
|
||||||
struct reset_control *rstc;
|
struct reset_control *rstc;
|
||||||
|
struct clk *icnclk;
|
||||||
void __iomem *top_ioaddr;
|
void __iomem *top_ioaddr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -353,7 +354,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
|
||||||
struct sdhci_host *host;
|
struct sdhci_host *host;
|
||||||
struct st_mmc_platform_data *pdata;
|
struct st_mmc_platform_data *pdata;
|
||||||
struct sdhci_pltfm_host *pltfm_host;
|
struct sdhci_pltfm_host *pltfm_host;
|
||||||
struct clk *clk;
|
struct clk *clk, *icnclk;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
u16 host_version;
|
u16 host_version;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
|
@ -365,6 +366,11 @@ static int sdhci_st_probe(struct platform_device *pdev)
|
||||||
return PTR_ERR(clk);
|
return PTR_ERR(clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ICN clock isn't compulsory, but use it if it's provided. */
|
||||||
|
icnclk = devm_clk_get(&pdev->dev, "icn");
|
||||||
|
if (IS_ERR(icnclk))
|
||||||
|
icnclk = NULL;
|
||||||
|
|
||||||
rstc = devm_reset_control_get(&pdev->dev, NULL);
|
rstc = devm_reset_control_get(&pdev->dev, NULL);
|
||||||
if (IS_ERR(rstc))
|
if (IS_ERR(rstc))
|
||||||
rstc = NULL;
|
rstc = NULL;
|
||||||
|
@ -389,6 +395,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
clk_prepare_enable(clk);
|
clk_prepare_enable(clk);
|
||||||
|
clk_prepare_enable(icnclk);
|
||||||
|
|
||||||
/* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
|
/* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
|
||||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||||
|
@ -400,6 +407,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
pltfm_host->clk = clk;
|
pltfm_host->clk = clk;
|
||||||
|
pdata->icnclk = icnclk;
|
||||||
|
|
||||||
/* Configure the Arasan HC inside the flashSS */
|
/* Configure the Arasan HC inside the flashSS */
|
||||||
st_mmcss_cconfig(np, host);
|
st_mmcss_cconfig(np, host);
|
||||||
|
@ -422,6 +430,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
|
clk_disable_unprepare(icnclk);
|
||||||
clk_disable_unprepare(clk);
|
clk_disable_unprepare(clk);
|
||||||
err_of:
|
err_of:
|
||||||
sdhci_pltfm_free(pdev);
|
sdhci_pltfm_free(pdev);
|
||||||
|
@ -442,6 +451,8 @@ static int sdhci_st_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
ret = sdhci_pltfm_unregister(pdev);
|
ret = sdhci_pltfm_unregister(pdev);
|
||||||
|
|
||||||
|
clk_disable_unprepare(pdata->icnclk);
|
||||||
|
|
||||||
if (rstc)
|
if (rstc)
|
||||||
reset_control_assert(rstc);
|
reset_control_assert(rstc);
|
||||||
|
|
||||||
|
@ -462,6 +473,7 @@ static int sdhci_st_suspend(struct device *dev)
|
||||||
if (pdata->rstc)
|
if (pdata->rstc)
|
||||||
reset_control_assert(pdata->rstc);
|
reset_control_assert(pdata->rstc);
|
||||||
|
|
||||||
|
clk_disable_unprepare(pdata->icnclk);
|
||||||
clk_disable_unprepare(pltfm_host->clk);
|
clk_disable_unprepare(pltfm_host->clk);
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -475,6 +487,7 @@ static int sdhci_st_resume(struct device *dev)
|
||||||
struct device_node *np = dev->of_node;
|
struct device_node *np = dev->of_node;
|
||||||
|
|
||||||
clk_prepare_enable(pltfm_host->clk);
|
clk_prepare_enable(pltfm_host->clk);
|
||||||
|
clk_prepare_enable(pdata->icnclk);
|
||||||
|
|
||||||
if (pdata->rstc)
|
if (pdata->rstc)
|
||||||
reset_control_deassert(pdata->rstc);
|
reset_control_deassert(pdata->rstc);
|
||||||
|
|
|
@ -1693,7 +1693,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
||||||
nvme_suspend_queue(dev->queues[i]);
|
nvme_suspend_queue(dev->queues[i]);
|
||||||
|
|
||||||
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
|
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
|
||||||
nvme_suspend_queue(dev->queues[0]);
|
/* A device might become IO incapable very soon during
|
||||||
|
* probe, before the admin queue is configured. Thus,
|
||||||
|
* queue_count can be 0 here.
|
||||||
|
*/
|
||||||
|
if (dev->queue_count)
|
||||||
|
nvme_suspend_queue(dev->queues[0]);
|
||||||
} else {
|
} else {
|
||||||
nvme_disable_io_queues(dev);
|
nvme_disable_io_queues(dev);
|
||||||
nvme_disable_admin_queue(dev, shutdown);
|
nvme_disable_admin_queue(dev, shutdown);
|
||||||
|
@ -2112,6 +2117,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||||
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
|
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
|
||||||
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
|
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
|
||||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
||||||
|
{ PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
|
||||||
|
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
||||||
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
|
||||||
{ 0, }
|
{ 0, }
|
||||||
|
|
|
@ -82,6 +82,8 @@ struct nvme_rdma_request {
|
||||||
|
|
||||||
enum nvme_rdma_queue_flags {
|
enum nvme_rdma_queue_flags {
|
||||||
NVME_RDMA_Q_CONNECTED = (1 << 0),
|
NVME_RDMA_Q_CONNECTED = (1 << 0),
|
||||||
|
NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
|
||||||
|
NVME_RDMA_Q_DELETING = (1 << 2),
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvme_rdma_queue {
|
struct nvme_rdma_queue {
|
||||||
|
@ -291,6 +293,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
|
||||||
if (IS_ERR(req->mr)) {
|
if (IS_ERR(req->mr)) {
|
||||||
ret = PTR_ERR(req->mr);
|
ret = PTR_ERR(req->mr);
|
||||||
req->mr = NULL;
|
req->mr = NULL;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
req->mr->need_inval = false;
|
req->mr->need_inval = false;
|
||||||
|
@ -480,9 +483,14 @@ out_err:
|
||||||
|
|
||||||
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
|
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_device *dev = queue->device;
|
struct nvme_rdma_device *dev;
|
||||||
struct ib_device *ibdev = dev->dev;
|
struct ib_device *ibdev;
|
||||||
|
|
||||||
|
if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
|
||||||
|
return;
|
||||||
|
|
||||||
|
dev = queue->device;
|
||||||
|
ibdev = dev->dev;
|
||||||
rdma_destroy_qp(queue->cm_id);
|
rdma_destroy_qp(queue->cm_id);
|
||||||
ib_free_cq(queue->ib_cq);
|
ib_free_cq(queue->ib_cq);
|
||||||
|
|
||||||
|
@ -533,6 +541,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_destroy_qp;
|
goto out_destroy_qp;
|
||||||
}
|
}
|
||||||
|
set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -552,6 +561,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
|
||||||
|
|
||||||
queue = &ctrl->queues[idx];
|
queue = &ctrl->queues[idx];
|
||||||
queue->ctrl = ctrl;
|
queue->ctrl = ctrl;
|
||||||
|
queue->flags = 0;
|
||||||
init_completion(&queue->cm_done);
|
init_completion(&queue->cm_done);
|
||||||
|
|
||||||
if (idx > 0)
|
if (idx > 0)
|
||||||
|
@ -590,6 +600,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_destroy_cm_id:
|
out_destroy_cm_id:
|
||||||
|
nvme_rdma_destroy_queue_ib(queue);
|
||||||
rdma_destroy_id(queue->cm_id);
|
rdma_destroy_id(queue->cm_id);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -608,7 +619,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
|
||||||
|
|
||||||
static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
|
static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
|
||||||
{
|
{
|
||||||
if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
|
if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
|
||||||
return;
|
return;
|
||||||
nvme_rdma_stop_queue(queue);
|
nvme_rdma_stop_queue(queue);
|
||||||
nvme_rdma_free_queue(queue);
|
nvme_rdma_free_queue(queue);
|
||||||
|
@ -652,7 +663,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_queues:
|
out_free_queues:
|
||||||
for (; i >= 1; i--)
|
for (i--; i >= 1; i--)
|
||||||
nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
|
nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -761,8 +772,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_ctrl *ctrl = container_of(work,
|
struct nvme_rdma_ctrl *ctrl = container_of(work,
|
||||||
struct nvme_rdma_ctrl, err_work);
|
struct nvme_rdma_ctrl, err_work);
|
||||||
|
int i;
|
||||||
|
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||||
|
|
||||||
|
for (i = 0; i < ctrl->queue_count; i++)
|
||||||
|
clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
|
||||||
|
|
||||||
if (ctrl->queue_count > 1)
|
if (ctrl->queue_count > 1)
|
||||||
nvme_stop_queues(&ctrl->ctrl);
|
nvme_stop_queues(&ctrl->ctrl);
|
||||||
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
|
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
|
||||||
|
@ -1305,58 +1321,6 @@ out_destroy_queue_ib:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* nvme_rdma_device_unplug() - Handle RDMA device unplug
|
|
||||||
* @queue: Queue that owns the cm_id that caught the event
|
|
||||||
*
|
|
||||||
* DEVICE_REMOVAL event notifies us that the RDMA device is about
|
|
||||||
* to unplug so we should take care of destroying our RDMA resources.
|
|
||||||
* This event will be generated for each allocated cm_id.
|
|
||||||
*
|
|
||||||
* In our case, the RDMA resources are managed per controller and not
|
|
||||||
* only per queue. So the way we handle this is we trigger an implicit
|
|
||||||
* controller deletion upon the first DEVICE_REMOVAL event we see, and
|
|
||||||
* hold the event inflight until the controller deletion is completed.
|
|
||||||
*
|
|
||||||
* One exception that we need to handle is the destruction of the cm_id
|
|
||||||
* that caught the event. Since we hold the callout until the controller
|
|
||||||
* deletion is completed, we'll deadlock if the controller deletion will
|
|
||||||
* call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
|
|
||||||
* of destroying this queue before-hand, destroy the queue resources,
|
|
||||||
* then queue the controller deletion which won't destroy this queue and
|
|
||||||
* we destroy the cm_id implicitely by returning a non-zero rc to the callout.
|
|
||||||
*/
|
|
||||||
static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
|
|
||||||
{
|
|
||||||
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
/* Own the controller deletion */
|
|
||||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
dev_warn(ctrl->ctrl.device,
|
|
||||||
"Got rdma device removal event, deleting ctrl\n");
|
|
||||||
|
|
||||||
/* Get rid of reconnect work if its running */
|
|
||||||
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
|
||||||
|
|
||||||
/* Disable the queue so ctrl delete won't free it */
|
|
||||||
if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
|
|
||||||
/* Free this queue ourselves */
|
|
||||||
nvme_rdma_stop_queue(queue);
|
|
||||||
nvme_rdma_destroy_queue_ib(queue);
|
|
||||||
|
|
||||||
/* Return non-zero so the cm_id will destroy implicitly */
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Queue controller deletion */
|
|
||||||
queue_work(nvme_rdma_wq, &ctrl->delete_work);
|
|
||||||
flush_work(&ctrl->delete_work);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
||||||
struct rdma_cm_event *ev)
|
struct rdma_cm_event *ev)
|
||||||
{
|
{
|
||||||
|
@ -1398,8 +1362,8 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
||||||
nvme_rdma_error_recovery(queue->ctrl);
|
nvme_rdma_error_recovery(queue->ctrl);
|
||||||
break;
|
break;
|
||||||
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
||||||
/* return 1 means impliciy CM ID destroy */
|
/* device removal is handled via the ib_client API */
|
||||||
return nvme_rdma_device_unplug(queue);
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(queue->ctrl->ctrl.device,
|
dev_err(queue->ctrl->ctrl.device,
|
||||||
"Unexpected RDMA CM event (%d)\n", ev->event);
|
"Unexpected RDMA CM event (%d)\n", ev->event);
|
||||||
|
@ -1700,15 +1664,19 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
|
||||||
static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
|
static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep a reference until all work is flushed since
|
||||||
|
* __nvme_rdma_del_ctrl can free the ctrl mem
|
||||||
|
*/
|
||||||
|
if (!kref_get_unless_zero(&ctrl->ctrl.kref))
|
||||||
|
return -EBUSY;
|
||||||
ret = __nvme_rdma_del_ctrl(ctrl);
|
ret = __nvme_rdma_del_ctrl(ctrl);
|
||||||
if (ret)
|
if (!ret)
|
||||||
return ret;
|
flush_work(&ctrl->delete_work);
|
||||||
|
nvme_put_ctrl(&ctrl->ctrl);
|
||||||
flush_work(&ctrl->delete_work);
|
return ret;
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
|
static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
|
||||||
|
@ -2005,27 +1973,57 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
|
||||||
.create_ctrl = nvme_rdma_create_ctrl,
|
.create_ctrl = nvme_rdma_create_ctrl,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void nvme_rdma_add_one(struct ib_device *ib_device)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
|
||||||
|
{
|
||||||
|
struct nvme_rdma_ctrl *ctrl;
|
||||||
|
|
||||||
|
/* Delete all controllers using this device */
|
||||||
|
mutex_lock(&nvme_rdma_ctrl_mutex);
|
||||||
|
list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
|
||||||
|
if (ctrl->device->dev != ib_device)
|
||||||
|
continue;
|
||||||
|
dev_info(ctrl->ctrl.device,
|
||||||
|
"Removing ctrl: NQN \"%s\", addr %pISp\n",
|
||||||
|
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
|
||||||
|
__nvme_rdma_del_ctrl(ctrl);
|
||||||
|
}
|
||||||
|
mutex_unlock(&nvme_rdma_ctrl_mutex);
|
||||||
|
|
||||||
|
flush_workqueue(nvme_rdma_wq);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ib_client nvme_rdma_ib_client = {
|
||||||
|
.name = "nvme_rdma",
|
||||||
|
.add = nvme_rdma_add_one,
|
||||||
|
.remove = nvme_rdma_remove_one
|
||||||
|
};
|
||||||
|
|
||||||
static int __init nvme_rdma_init_module(void)
|
static int __init nvme_rdma_init_module(void)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
|
nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
|
||||||
if (!nvme_rdma_wq)
|
if (!nvme_rdma_wq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ret = ib_register_client(&nvme_rdma_ib_client);
|
||||||
|
if (ret) {
|
||||||
|
destroy_workqueue(nvme_rdma_wq);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
nvmf_register_transport(&nvme_rdma_transport);
|
nvmf_register_transport(&nvme_rdma_transport);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit nvme_rdma_cleanup_module(void)
|
static void __exit nvme_rdma_cleanup_module(void)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_ctrl *ctrl;
|
|
||||||
|
|
||||||
nvmf_unregister_transport(&nvme_rdma_transport);
|
nvmf_unregister_transport(&nvme_rdma_transport);
|
||||||
|
ib_unregister_client(&nvme_rdma_ib_client);
|
||||||
mutex_lock(&nvme_rdma_ctrl_mutex);
|
|
||||||
list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
|
|
||||||
__nvme_rdma_del_ctrl(ctrl);
|
|
||||||
mutex_unlock(&nvme_rdma_ctrl_mutex);
|
|
||||||
|
|
||||||
destroy_workqueue(nvme_rdma_wq);
|
destroy_workqueue(nvme_rdma_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -977,7 +977,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||||
|
|
||||||
/************************ runtime PM support ***************************/
|
/************************ runtime PM support ***************************/
|
||||||
|
|
||||||
static int pcmcia_dev_suspend(struct device *dev, pm_message_t state);
|
static int pcmcia_dev_suspend(struct device *dev);
|
||||||
static int pcmcia_dev_resume(struct device *dev);
|
static int pcmcia_dev_resume(struct device *dev);
|
||||||
|
|
||||||
static int runtime_suspend(struct device *dev)
|
static int runtime_suspend(struct device *dev)
|
||||||
|
@ -985,7 +985,7 @@ static int runtime_suspend(struct device *dev)
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
device_lock(dev);
|
device_lock(dev);
|
||||||
rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
|
rc = pcmcia_dev_suspend(dev);
|
||||||
device_unlock(dev);
|
device_unlock(dev);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -1135,7 +1135,7 @@ ATTRIBUTE_GROUPS(pcmcia_dev);
|
||||||
|
|
||||||
/* PM support, also needed for reset */
|
/* PM support, also needed for reset */
|
||||||
|
|
||||||
static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
|
static int pcmcia_dev_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
|
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
|
||||||
struct pcmcia_driver *p_drv = NULL;
|
struct pcmcia_driver *p_drv = NULL;
|
||||||
|
@ -1410,6 +1410,9 @@ static struct class_interface pcmcia_bus_interface __refdata = {
|
||||||
.remove_dev = &pcmcia_bus_remove_socket,
|
.remove_dev = &pcmcia_bus_remove_socket,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct dev_pm_ops pcmcia_bus_pm_ops = {
|
||||||
|
SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume)
|
||||||
|
};
|
||||||
|
|
||||||
struct bus_type pcmcia_bus_type = {
|
struct bus_type pcmcia_bus_type = {
|
||||||
.name = "pcmcia",
|
.name = "pcmcia",
|
||||||
|
@ -1418,8 +1421,7 @@ struct bus_type pcmcia_bus_type = {
|
||||||
.dev_groups = pcmcia_dev_groups,
|
.dev_groups = pcmcia_dev_groups,
|
||||||
.probe = pcmcia_device_probe,
|
.probe = pcmcia_device_probe,
|
||||||
.remove = pcmcia_device_remove,
|
.remove = pcmcia_device_remove,
|
||||||
.suspend = pcmcia_dev_suspend,
|
.pm = &pcmcia_bus_pm_ops,
|
||||||
.resume = pcmcia_dev_resume,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -214,9 +214,8 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void pxa2xx_configure_sockets(struct device *dev)
|
void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
|
||||||
{
|
{
|
||||||
struct pcmcia_low_level *ops = dev->platform_data;
|
|
||||||
/*
|
/*
|
||||||
* We have at least one socket, so set MECR:CIT
|
* We have at least one socket, so set MECR:CIT
|
||||||
* (Card Is There)
|
* (Card Is There)
|
||||||
|
@ -322,7 +321,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pxa2xx_configure_sockets(&dev->dev);
|
pxa2xx_configure_sockets(&dev->dev, ops);
|
||||||
dev_set_drvdata(&dev->dev, sinfo);
|
dev_set_drvdata(&dev->dev, sinfo);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -348,7 +347,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
|
||||||
|
|
||||||
static int pxa2xx_drv_pcmcia_resume(struct device *dev)
|
static int pxa2xx_drv_pcmcia_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
pxa2xx_configure_sockets(dev);
|
struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data;
|
||||||
|
|
||||||
|
pxa2xx_configure_sockets(dev, ops);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
|
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
|
||||||
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
|
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
|
||||||
void pxa2xx_configure_sockets(struct device *dev);
|
void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops);
|
||||||
|
|
||||||
|
|
|
@ -134,20 +134,14 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
|
||||||
|
|
||||||
int pcmcia_badge4_init(struct sa1111_dev *dev)
|
int pcmcia_badge4_init(struct sa1111_dev *dev)
|
||||||
{
|
{
|
||||||
int ret = -ENODEV;
|
printk(KERN_INFO
|
||||||
|
"%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
|
||||||
|
__func__,
|
||||||
|
badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
|
||||||
|
|
||||||
if (machine_is_badge4()) {
|
sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
|
||||||
printk(KERN_INFO
|
return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
|
||||||
"%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
|
sa11xx_drv_pcmcia_add_one);
|
||||||
__func__,
|
|
||||||
badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
|
|
||||||
|
|
||||||
sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
|
|
||||||
ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
|
|
||||||
sa11xx_drv_pcmcia_add_one);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init pcmv_setup(char *s)
|
static int __init pcmv_setup(char *s)
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
|
|
||||||
#include <mach/hardware.h>
|
#include <mach/hardware.h>
|
||||||
#include <asm/hardware/sa1111.h>
|
#include <asm/hardware/sa1111.h>
|
||||||
|
#include <asm/mach-types.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
|
||||||
#include "sa1111_generic.h"
|
#include "sa1111_generic.h"
|
||||||
|
@ -203,19 +204,30 @@ static int pcmcia_probe(struct sa1111_dev *dev)
|
||||||
sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
|
sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
|
||||||
sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
|
sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
|
||||||
|
|
||||||
|
ret = -ENODEV;
|
||||||
#ifdef CONFIG_SA1100_BADGE4
|
#ifdef CONFIG_SA1100_BADGE4
|
||||||
pcmcia_badge4_init(dev);
|
if (machine_is_badge4())
|
||||||
|
ret = pcmcia_badge4_init(dev);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SA1100_JORNADA720
|
#ifdef CONFIG_SA1100_JORNADA720
|
||||||
pcmcia_jornada720_init(dev);
|
if (machine_is_jornada720())
|
||||||
|
ret = pcmcia_jornada720_init(dev);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ARCH_LUBBOCK
|
#ifdef CONFIG_ARCH_LUBBOCK
|
||||||
pcmcia_lubbock_init(dev);
|
if (machine_is_lubbock())
|
||||||
|
ret = pcmcia_lubbock_init(dev);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ASSABET_NEPONSET
|
#ifdef CONFIG_ASSABET_NEPONSET
|
||||||
pcmcia_neponset_init(dev);
|
if (machine_is_assabet())
|
||||||
|
ret = pcmcia_neponset_init(dev);
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
|
||||||
|
if (ret) {
|
||||||
|
release_mem_region(dev->res.start, 512);
|
||||||
|
sa1111_disable_device(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pcmcia_remove(struct sa1111_dev *dev)
|
static int pcmcia_remove(struct sa1111_dev *dev)
|
||||||
|
|
|
@ -94,22 +94,17 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
|
||||||
|
|
||||||
int pcmcia_jornada720_init(struct sa1111_dev *sadev)
|
int pcmcia_jornada720_init(struct sa1111_dev *sadev)
|
||||||
{
|
{
|
||||||
int ret = -ENODEV;
|
unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
|
||||||
|
|
||||||
if (machine_is_jornada720()) {
|
/* Fixme: why messing around with SA11x0's GPIO1? */
|
||||||
unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
|
GRER |= 0x00000002;
|
||||||
|
|
||||||
GRER |= 0x00000002;
|
/* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
|
||||||
|
sa1111_set_io_dir(sadev, pin, 0, 0);
|
||||||
|
sa1111_set_io(sadev, pin, 0);
|
||||||
|
sa1111_set_sleep_io(sadev, pin, 0);
|
||||||
|
|
||||||
/* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
|
sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
|
||||||
sa1111_set_io_dir(sadev, pin, 0, 0);
|
return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
|
||||||
sa1111_set_io(sadev, pin, 0);
|
sa11xx_drv_pcmcia_add_one);
|
||||||
sa1111_set_sleep_io(sadev, pin, 0);
|
|
||||||
|
|
||||||
sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
|
|
||||||
ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
|
|
||||||
sa11xx_drv_pcmcia_add_one);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -210,27 +210,21 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = {
|
||||||
|
|
||||||
int pcmcia_lubbock_init(struct sa1111_dev *sadev)
|
int pcmcia_lubbock_init(struct sa1111_dev *sadev)
|
||||||
{
|
{
|
||||||
int ret = -ENODEV;
|
/*
|
||||||
|
* Set GPIO_A<3:0> to be outputs for the MAX1600,
|
||||||
|
* and switch to standby mode.
|
||||||
|
*/
|
||||||
|
sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
|
||||||
|
sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
|
||||||
|
sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
|
||||||
|
|
||||||
if (machine_is_lubbock()) {
|
/* Set CF Socket 1 power to standby mode. */
|
||||||
/*
|
lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
|
||||||
* Set GPIO_A<3:0> to be outputs for the MAX1600,
|
|
||||||
* and switch to standby mode.
|
|
||||||
*/
|
|
||||||
sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
|
|
||||||
sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
|
|
||||||
sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
|
|
||||||
|
|
||||||
/* Set CF Socket 1 power to standby mode. */
|
pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
|
||||||
lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
|
pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops);
|
||||||
|
return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
|
||||||
pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
|
pxa2xx_drv_pcmcia_add_one);
|
||||||
pxa2xx_configure_sockets(&sadev->dev);
|
|
||||||
ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
|
|
||||||
pxa2xx_drv_pcmcia_add_one);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
@ -110,20 +110,14 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
|
||||||
|
|
||||||
int pcmcia_neponset_init(struct sa1111_dev *sadev)
|
int pcmcia_neponset_init(struct sa1111_dev *sadev)
|
||||||
{
|
{
|
||||||
int ret = -ENODEV;
|
/*
|
||||||
|
* Set GPIO_A<3:0> to be outputs for the MAX1600,
|
||||||
if (machine_is_assabet()) {
|
* and switch to standby mode.
|
||||||
/*
|
*/
|
||||||
* Set GPIO_A<3:0> to be outputs for the MAX1600,
|
sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
|
||||||
* and switch to standby mode.
|
sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
|
||||||
*/
|
sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
|
||||||
sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
|
sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
|
||||||
sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
|
return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
|
||||||
sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
|
sa11xx_drv_pcmcia_add_one);
|
||||||
sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
|
|
||||||
ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
|
|
||||||
sa11xx_drv_pcmcia_add_one);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,19 +144,19 @@ static int
|
||||||
sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
|
sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
|
||||||
{
|
{
|
||||||
struct soc_pcmcia_timing timing;
|
struct soc_pcmcia_timing timing;
|
||||||
unsigned int clock = clk_get_rate(skt->clk);
|
unsigned int clock = clk_get_rate(skt->clk) / 1000;
|
||||||
unsigned long mecr = MECR;
|
unsigned long mecr = MECR;
|
||||||
char *p = buf;
|
char *p = buf;
|
||||||
|
|
||||||
soc_common_pcmcia_get_timing(skt, &timing);
|
soc_common_pcmcia_get_timing(skt, &timing);
|
||||||
|
|
||||||
p+=sprintf(p, "I/O : %u (%u)\n", timing.io,
|
p+=sprintf(p, "I/O : %uns (%uns)\n", timing.io,
|
||||||
sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
|
sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
|
||||||
|
|
||||||
p+=sprintf(p, "attribute: %u (%u)\n", timing.attr,
|
p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr,
|
||||||
sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
|
sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
|
||||||
|
|
||||||
p+=sprintf(p, "common : %u (%u)\n", timing.mem,
|
p+=sprintf(p, "common : %uns (%uns)\n", timing.mem,
|
||||||
sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
|
sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
|
||||||
|
|
||||||
return p - buf;
|
return p - buf;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue