mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-25 08:11:45 +00:00
10459 lines
333 KiB
Diff
10459 lines
333 KiB
Diff
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
|
|
index 402ab99e409f..6716413c17ba 100644
|
|
--- a/Documentation/filesystems/proc.txt
|
|
+++ b/Documentation/filesystems/proc.txt
|
|
@@ -346,7 +346,7 @@ address perms offset dev inode pathname
|
|
a7cb1000-a7cb2000 ---p 00000000 00:00 0
|
|
a7cb2000-a7eb2000 rw-p 00000000 00:00 0
|
|
a7eb2000-a7eb3000 ---p 00000000 00:00 0
|
|
-a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack:1001]
|
|
+a7eb3000-a7ed5000 rw-p 00000000 00:00 0
|
|
a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
|
|
a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6
|
|
a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6
|
|
@@ -378,7 +378,6 @@ is not associated with a file:
|
|
|
|
[heap] = the heap of the program
|
|
[stack] = the stack of the main process
|
|
- [stack:1001] = the stack of the thread with tid 1001
|
|
[vdso] = the "virtual dynamic shared object",
|
|
the kernel system call handler
|
|
|
|
@@ -386,10 +385,8 @@ is not associated with a file:
|
|
|
|
The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
|
|
of the individual tasks of a process. In this file you will see a mapping marked
|
|
-as [stack] if that task sees it as a stack. This is a key difference from the
|
|
-content of /proc/PID/maps, where you will see all mappings that are being used
|
|
-as stack by all of those tasks. Hence, for the example above, the task-level
|
|
-map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
|
|
+as [stack] if that task sees it as a stack. Hence, for the example above, the
|
|
+task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
|
|
|
|
08048000-08049000 r-xp 00000000 03:00 8312 /opt/test
|
|
08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
|
|
diff --git a/Makefile b/Makefile
|
|
index b74d60081a16..d1cc9e0b7473 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 4
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 20
|
|
+SUBLEVEL = 21
|
|
EXTRAVERSION =
|
|
NAME = Blurry Fish Butt
|
|
|
|
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
|
|
index 871f21783866..14cdc6dea493 100644
|
|
--- a/arch/arm64/Kconfig
|
|
+++ b/arch/arm64/Kconfig
|
|
@@ -391,6 +391,15 @@ config CAVIUM_ERRATUM_22375
|
|
|
|
If unsure, say Y.
|
|
|
|
+config CAVIUM_ERRATUM_23144
|
|
+ bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
|
|
+ depends on NUMA
|
|
+ default y
|
|
+ help
|
|
+ ITS SYNC command hang for cross node io and collections/cpu mapping.
|
|
+
|
|
+ If unsure, say Y.
|
|
+
|
|
config CAVIUM_ERRATUM_23154
|
|
bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
|
|
default y
|
|
@@ -401,6 +410,17 @@ config CAVIUM_ERRATUM_23154
|
|
|
|
If unsure, say Y.
|
|
|
|
+config CAVIUM_ERRATUM_27456
|
|
+ bool "Cavium erratum 27456: Broadcast TLBI instructions may cause icache corruption"
|
|
+ default y
|
|
+ help
|
|
+ On ThunderX T88 pass 1.x through 2.1 parts, broadcast TLBI
|
|
+ instructions may cause the icache to become corrupted if it
|
|
+ contains data for a non-current ASID. The fix is to
|
|
+ invalidate the icache when changing the mm context.
|
|
+
|
|
+ If unsure, say Y.
|
|
+
|
|
endmenu
|
|
|
|
|
|
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
|
|
index 8f271b83f910..8136afc9df0d 100644
|
|
--- a/arch/arm64/include/asm/cpufeature.h
|
|
+++ b/arch/arm64/include/asm/cpufeature.h
|
|
@@ -30,11 +30,12 @@
|
|
#define ARM64_HAS_LSE_ATOMICS 5
|
|
#define ARM64_WORKAROUND_CAVIUM_23154 6
|
|
#define ARM64_WORKAROUND_834220 7
|
|
-#define ARM64_HAS_NO_HW_PREFETCH 8
|
|
-#define ARM64_HAS_UAO 9
|
|
-#define ARM64_ALT_PAN_NOT_UAO 10
|
|
+#define ARM64_WORKAROUND_CAVIUM_27456 8
|
|
+#define ARM64_HAS_NO_HW_PREFETCH 9
|
|
+#define ARM64_HAS_UAO 10
|
|
+#define ARM64_ALT_PAN_NOT_UAO 11
|
|
|
|
-#define ARM64_NCAPS 11
|
|
+#define ARM64_NCAPS 12
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
|
|
index 5e6857b6bdc4..2d960f8588b0 100644
|
|
--- a/arch/arm64/include/asm/kvm_arm.h
|
|
+++ b/arch/arm64/include/asm/kvm_arm.h
|
|
@@ -107,8 +107,6 @@
|
|
#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
|
|
TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
|
|
|
|
-#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
|
|
-
|
|
/* VTCR_EL2 Registers bits */
|
|
#define VTCR_EL2_RES1 (1 << 31)
|
|
#define VTCR_EL2_PS_MASK (7 << 16)
|
|
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
|
|
index feb6b4efa641..a3e846a28b05 100644
|
|
--- a/arch/arm64/kernel/cpu_errata.c
|
|
+++ b/arch/arm64/kernel/cpu_errata.c
|
|
@@ -100,6 +100,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|
MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
|
|
},
|
|
#endif
|
|
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
|
+ {
|
|
+ /* Cavium ThunderX, T88 pass 1.x - 2.1 */
|
|
+ .desc = "Cavium erratum 27456",
|
|
+ .capability = ARM64_WORKAROUND_CAVIUM_27456,
|
|
+ MIDR_RANGE(MIDR_THUNDERX, 0x00,
|
|
+ (1 << MIDR_VARIANT_SHIFT) | 1),
|
|
+ },
|
|
+#endif
|
|
{
|
|
}
|
|
};
|
|
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
|
|
index 178ba2248a98..84c338f017b2 100644
|
|
--- a/arch/arm64/kvm/hyp-init.S
|
|
+++ b/arch/arm64/kvm/hyp-init.S
|
|
@@ -64,7 +64,7 @@ __do_hyp_init:
|
|
mrs x4, tcr_el1
|
|
ldr x5, =TCR_EL2_MASK
|
|
and x4, x4, x5
|
|
- ldr x5, =TCR_EL2_FLAGS
|
|
+ mov x5, #TCR_EL2_RES1
|
|
orr x4, x4, x5
|
|
|
|
#ifndef CONFIG_ARM64_VA_BITS_48
|
|
@@ -85,15 +85,18 @@ __do_hyp_init:
|
|
ldr_l x5, idmap_t0sz
|
|
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
|
|
#endif
|
|
- msr tcr_el2, x4
|
|
-
|
|
- ldr x4, =VTCR_EL2_FLAGS
|
|
/*
|
|
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
|
|
- * VTCR_EL2.
|
|
+ * TCR_EL2 and VTCR_EL2.
|
|
*/
|
|
mrs x5, ID_AA64MMFR0_EL1
|
|
bfi x4, x5, #16, #3
|
|
+
|
|
+ msr tcr_el2, x4
|
|
+
|
|
+ ldr x4, =VTCR_EL2_FLAGS
|
|
+ bfi x4, x5, #16, #3
|
|
+
|
|
msr vtcr_el2, x4
|
|
|
|
mrs x4, mair_el1
|
|
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
|
|
index 1f6bb29ca53b..18201e9e8cc7 100644
|
|
--- a/arch/arm64/mm/proc.S
|
|
+++ b/arch/arm64/mm/proc.S
|
|
@@ -25,6 +25,8 @@
|
|
#include <asm/hwcap.h>
|
|
#include <asm/pgtable-hwdef.h>
|
|
#include <asm/pgtable.h>
|
|
+#include <asm/cpufeature.h>
|
|
+#include <asm/alternative.h>
|
|
|
|
#include "proc-macros.S"
|
|
|
|
@@ -137,7 +139,17 @@ ENTRY(cpu_do_switch_mm)
|
|
bfi x0, x1, #48, #16 // set the ASID
|
|
msr ttbr0_el1, x0 // set TTBR0
|
|
isb
|
|
+alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
|
|
ret
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+alternative_else
|
|
+ ic iallu
|
|
+ dsb nsh
|
|
+ isb
|
|
+ ret
|
|
+alternative_endif
|
|
ENDPROC(cpu_do_switch_mm)
|
|
|
|
.section ".text.init", #alloc, #execinstr
|
|
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
|
|
index a62581815624..88fa25fae8bd 100644
|
|
--- a/arch/metag/include/asm/atomic_lnkget.h
|
|
+++ b/arch/metag/include/asm/atomic_lnkget.h
|
|
@@ -61,7 +61,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
" CMPT %0, #HI(0x02000000)\n" \
|
|
" BNZ 1b\n" \
|
|
: "=&d" (temp), "=&da" (result) \
|
|
- : "da" (&v->counter), "bd" (i) \
|
|
+ : "da" (&v->counter), "br" (i) \
|
|
: "cc"); \
|
|
\
|
|
smp_mb(); \
|
|
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
|
|
index 9f8402b35115..27e588f6c72e 100644
|
|
--- a/arch/powerpc/include/asm/icswx.h
|
|
+++ b/arch/powerpc/include/asm/icswx.h
|
|
@@ -164,6 +164,7 @@ struct coprocessor_request_block {
|
|
#define ICSWX_INITIATED (0x8)
|
|
#define ICSWX_BUSY (0x4)
|
|
#define ICSWX_REJECTED (0x2)
|
|
+#define ICSWX_XERS0 (0x1) /* undefined or set from XERSO. */
|
|
|
|
static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
|
|
{
|
|
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
|
|
index bf8f34a58670..b7019b559ddb 100644
|
|
--- a/arch/powerpc/kernel/tm.S
|
|
+++ b/arch/powerpc/kernel/tm.S
|
|
@@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
|
|
std r3, STK_PARAM(R3)(r1)
|
|
SAVE_NVGPRS(r1)
|
|
|
|
- /* We need to setup MSR for VSX register save instructions. Here we
|
|
- * also clear the MSR RI since when we do the treclaim, we won't have a
|
|
- * valid kernel pointer for a while. We clear RI here as it avoids
|
|
- * adding another mtmsr closer to the treclaim. This makes the region
|
|
- * maked as non-recoverable wider than it needs to be but it saves on
|
|
- * inserting another mtmsrd later.
|
|
- */
|
|
+ /* We need to setup MSR for VSX register save instructions. */
|
|
mfmsr r14
|
|
mr r15, r14
|
|
ori r15, r15, MSR_FP
|
|
- li r16, MSR_RI
|
|
+ li r16, 0
|
|
ori r16, r16, MSR_EE /* IRQs hard off */
|
|
andc r15, r15, r16
|
|
oris r15, r15, MSR_VEC@h
|
|
@@ -176,7 +170,17 @@ dont_backup_fp:
|
|
1: tdeqi r6, 0
|
|
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
|
|
|
|
- /* The moment we treclaim, ALL of our GPRs will switch
|
|
+ /* Clear MSR RI since we are about to change r1, EE is already off. */
|
|
+ li r4, 0
|
|
+ mtmsrd r4, 1
|
|
+
|
|
+ /*
|
|
+ * BE CAREFUL HERE:
|
|
+ * At this point we can't take an SLB miss since we have MSR_RI
|
|
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
|
|
+ * until we turn MSR RI back on.
|
|
+ *
|
|
+ * The moment we treclaim, ALL of our GPRs will switch
|
|
* to user register state. (FPRs, CCR etc. also!)
|
|
* Use an sprg and a tm_scratch in the PACA to shuffle.
|
|
*/
|
|
@@ -197,6 +201,11 @@ dont_backup_fp:
|
|
|
|
/* Store the PPR in r11 and reset to decent value */
|
|
std r11, GPR11(r1) /* Temporary stash */
|
|
+
|
|
+ /* Reset MSR RI so we can take SLB faults again */
|
|
+ li r11, MSR_RI
|
|
+ mtmsrd r11, 1
|
|
+
|
|
mfspr r11, SPRN_PPR
|
|
HMT_MEDIUM
|
|
|
|
@@ -397,11 +406,6 @@ restore_gprs:
|
|
ld r5, THREAD_TM_DSCR(r3)
|
|
ld r6, THREAD_TM_PPR(r3)
|
|
|
|
- /* Clear the MSR RI since we are about to change R1. EE is already off
|
|
- */
|
|
- li r4, 0
|
|
- mtmsrd r4, 1
|
|
-
|
|
REST_GPR(0, r7) /* GPR0 */
|
|
REST_2GPRS(2, r7) /* GPR2-3 */
|
|
REST_GPR(4, r7) /* GPR4 */
|
|
@@ -439,10 +443,33 @@ restore_gprs:
|
|
ld r6, _CCR(r7)
|
|
mtcr r6
|
|
|
|
- REST_GPR(1, r7) /* GPR1 */
|
|
- REST_GPR(5, r7) /* GPR5-7 */
|
|
REST_GPR(6, r7)
|
|
- ld r7, GPR7(r7)
|
|
+
|
|
+ /*
|
|
+ * Store r1 and r5 on the stack so that we can access them
|
|
+ * after we clear MSR RI.
|
|
+ */
|
|
+
|
|
+ REST_GPR(5, r7)
|
|
+ std r5, -8(r1)
|
|
+ ld r5, GPR1(r7)
|
|
+ std r5, -16(r1)
|
|
+
|
|
+ REST_GPR(7, r7)
|
|
+
|
|
+ /* Clear MSR RI since we are about to change r1. EE is already off */
|
|
+ li r5, 0
|
|
+ mtmsrd r5, 1
|
|
+
|
|
+ /*
|
|
+ * BE CAREFUL HERE:
|
|
+ * At this point we can't take an SLB miss since we have MSR_RI
|
|
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
|
|
+ * until we turn MSR RI back on.
|
|
+ */
|
|
+
|
|
+ ld r5, -8(r1)
|
|
+ ld r1, -16(r1)
|
|
|
|
/* Commit register state as checkpointed state: */
|
|
TRECHKPT
|
|
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
|
|
index b8045b97f4fb..d750cc0dfe30 100644
|
|
--- a/arch/s390/crypto/prng.c
|
|
+++ b/arch/s390/crypto/prng.c
|
|
@@ -669,11 +669,13 @@ static const struct file_operations prng_tdes_fops = {
|
|
static struct miscdevice prng_sha512_dev = {
|
|
.name = "prandom",
|
|
.minor = MISC_DYNAMIC_MINOR,
|
|
+ .mode = 0644,
|
|
.fops = &prng_sha512_fops,
|
|
};
|
|
static struct miscdevice prng_tdes_dev = {
|
|
.name = "prandom",
|
|
.minor = MISC_DYNAMIC_MINOR,
|
|
+ .mode = 0644,
|
|
.fops = &prng_tdes_fops,
|
|
};
|
|
|
|
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
|
|
index 1aac41e83ea1..92df3eb8d14e 100644
|
|
--- a/arch/s390/include/asm/pci_dma.h
|
|
+++ b/arch/s390/include/asm/pci_dma.h
|
|
@@ -23,6 +23,8 @@ enum zpci_ioat_dtype {
|
|
#define ZPCI_IOTA_FS_2G 2
|
|
#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
|
|
|
|
+#define ZPCI_TABLE_SIZE_RT (1UL << 42)
|
|
+
|
|
#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
|
|
#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
|
|
#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
|
|
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
|
|
index 19442395f413..f2f6720a3331 100644
|
|
--- a/arch/s390/pci/pci.c
|
|
+++ b/arch/s390/pci/pci.c
|
|
@@ -701,8 +701,7 @@ static int zpci_restore(struct device *dev)
|
|
goto out;
|
|
|
|
zpci_map_resources(pdev);
|
|
- zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
|
|
- zdev->start_dma + zdev->iommu_size - 1,
|
|
+ zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
|
(u64) zdev->dma_table);
|
|
|
|
out:
|
|
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
|
|
index d348f2c09a1e..3a40f718baef 100644
|
|
--- a/arch/s390/pci/pci_dma.c
|
|
+++ b/arch/s390/pci/pci_dma.c
|
|
@@ -458,7 +458,19 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
|
|
goto out_clean;
|
|
}
|
|
|
|
- zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
|
|
+ /*
|
|
+ * Restrict the iommu bitmap size to the minimum of the following:
|
|
+ * - main memory size
|
|
+ * - 3-level pagetable address limit minus start_dma offset
|
|
+ * - DMA address range allowed by the hardware (clp query pci fn)
|
|
+ *
|
|
+ * Also set zdev->end_dma to the actual end address of the usable
|
|
+ * range, instead of the theoretical maximum as reported by hardware.
|
|
+ */
|
|
+ zdev->iommu_size = min3((u64) high_memory,
|
|
+ ZPCI_TABLE_SIZE_RT - zdev->start_dma,
|
|
+ zdev->end_dma - zdev->start_dma + 1);
|
|
+ zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
|
|
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
|
|
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
|
|
if (!zdev->iommu_bitmap) {
|
|
@@ -466,10 +478,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
|
|
goto out_reg;
|
|
}
|
|
|
|
- rc = zpci_register_ioat(zdev,
|
|
- 0,
|
|
- zdev->start_dma + PAGE_OFFSET,
|
|
- zdev->start_dma + zdev->iommu_size - 1,
|
|
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
|
(u64) zdev->dma_table);
|
|
if (rc)
|
|
goto out_reg;
|
|
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
|
|
index 2f69e3b184f6..a3e1f8497f8c 100644
|
|
--- a/arch/x86/kernel/apic/apic.c
|
|
+++ b/arch/x86/kernel/apic/apic.c
|
|
@@ -1587,6 +1587,9 @@ void __init enable_IR_x2apic(void)
|
|
unsigned long flags;
|
|
int ret, ir_stat;
|
|
|
|
+ if (skip_ioapic_setup)
|
|
+ return;
|
|
+
|
|
ir_stat = irq_remapping_prepare();
|
|
if (ir_stat < 0 && !x2apic_supported())
|
|
return;
|
|
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
|
|
index 20e242ea1bc4..cfc4a966e2b9 100644
|
|
--- a/arch/x86/kernel/cpu/mshyperv.c
|
|
+++ b/arch/x86/kernel/cpu/mshyperv.c
|
|
@@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
};
|
|
|
|
+static unsigned char hv_get_nmi_reason(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static void __init ms_hyperv_init_platform(void)
|
|
{
|
|
/*
|
|
@@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
|
|
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
|
|
#endif
|
|
mark_tsc_unstable("running on Hyper-V");
|
|
+
|
|
+ /*
|
|
+ * Generation 2 instances don't support reading the NMI status from
|
|
+ * 0x61 port.
|
|
+ */
|
|
+ if (efi_enabled(EFI_BOOT))
|
|
+ x86_platform.get_nmi_reason = hv_get_nmi_reason;
|
|
}
|
|
|
|
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
|
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
|
|
index a316ca96f1b6..fc704ed587e8 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
|
|
@@ -211,6 +211,20 @@ static void __put_rmid(u32 rmid)
|
|
list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
|
|
}
|
|
|
|
+static void cqm_cleanup(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ if (!cqm_rmid_ptrs)
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i < cqm_max_rmid; i++)
|
|
+ kfree(cqm_rmid_ptrs[i]);
|
|
+
|
|
+ kfree(cqm_rmid_ptrs);
|
|
+ cqm_rmid_ptrs = NULL;
|
|
+}
|
|
+
|
|
static int intel_cqm_setup_rmid_cache(void)
|
|
{
|
|
struct cqm_rmid_entry *entry;
|
|
@@ -218,7 +232,7 @@ static int intel_cqm_setup_rmid_cache(void)
|
|
int r = 0;
|
|
|
|
nr_rmids = cqm_max_rmid + 1;
|
|
- cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) *
|
|
+ cqm_rmid_ptrs = kzalloc(sizeof(struct cqm_rmid_entry *) *
|
|
nr_rmids, GFP_KERNEL);
|
|
if (!cqm_rmid_ptrs)
|
|
return -ENOMEM;
|
|
@@ -249,11 +263,9 @@ static int intel_cqm_setup_rmid_cache(void)
|
|
mutex_unlock(&cache_mutex);
|
|
|
|
return 0;
|
|
-fail:
|
|
- while (r--)
|
|
- kfree(cqm_rmid_ptrs[r]);
|
|
|
|
- kfree(cqm_rmid_ptrs);
|
|
+fail:
|
|
+ cqm_cleanup();
|
|
return -ENOMEM;
|
|
}
|
|
|
|
@@ -281,9 +293,13 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
|
|
|
|
/*
|
|
* Events that target same task are placed into the same cache group.
|
|
+ * Mark it as a multi event group, so that we update ->count
|
|
+ * for every event rather than just the group leader later.
|
|
*/
|
|
- if (a->hw.target == b->hw.target)
|
|
+ if (a->hw.target == b->hw.target) {
|
|
+ b->hw.is_group_event = true;
|
|
return true;
|
|
+ }
|
|
|
|
/*
|
|
* Are we an inherited event?
|
|
@@ -849,6 +865,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
|
|
bool conflict = false;
|
|
u32 rmid;
|
|
|
|
+ event->hw.is_group_event = false;
|
|
list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
|
|
rmid = iter->hw.cqm_rmid;
|
|
|
|
@@ -940,7 +957,9 @@ static u64 intel_cqm_event_count(struct perf_event *event)
|
|
return __perf_event_count(event);
|
|
|
|
/*
|
|
- * Only the group leader gets to report values. This stops us
|
|
+ * Only the group leader gets to report values except in case of
|
|
+ * multiple events in the same group, we still need to read the
|
|
+ * other events.This stops us
|
|
* reporting duplicate values to userspace, and gives us a clear
|
|
* rule for which task gets to report the values.
|
|
*
|
|
@@ -948,7 +967,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
|
|
* specific packages - we forfeit that ability when we create
|
|
* task events.
|
|
*/
|
|
- if (!cqm_group_leader(event))
|
|
+ if (!cqm_group_leader(event) && !event->hw.is_group_event)
|
|
return 0;
|
|
|
|
/*
|
|
@@ -1315,7 +1334,7 @@ static const struct x86_cpu_id intel_cqm_match[] = {
|
|
|
|
static int __init intel_cqm_init(void)
|
|
{
|
|
- char *str, scale[20];
|
|
+ char *str = NULL, scale[20];
|
|
int i, cpu, ret;
|
|
|
|
if (!x86_match_cpu(intel_cqm_match))
|
|
@@ -1375,16 +1394,25 @@ static int __init intel_cqm_init(void)
|
|
cqm_pick_event_reader(i);
|
|
}
|
|
|
|
- __perf_cpu_notifier(intel_cqm_cpu_notifier);
|
|
-
|
|
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
pr_err("Intel CQM perf registration failed: %d\n", ret);
|
|
- else
|
|
- pr_info("Intel CQM monitoring enabled\n");
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ pr_info("Intel CQM monitoring enabled\n");
|
|
|
|
+ /*
|
|
+ * Register the hot cpu notifier once we are sure cqm
|
|
+ * is enabled to avoid notifier leak.
|
|
+ */
|
|
+ __perf_cpu_notifier(intel_cqm_cpu_notifier);
|
|
out:
|
|
cpu_notifier_register_done();
|
|
+ if (ret) {
|
|
+ kfree(str);
|
|
+ cqm_cleanup();
|
|
+ }
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/block/blk-core.c b/block/blk-core.c
|
|
index f8e64cac981a..4fab5d610805 100644
|
|
--- a/block/blk-core.c
|
|
+++ b/block/blk-core.c
|
|
@@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
|
|
|
|
void blk_set_queue_dying(struct request_queue *q)
|
|
{
|
|
- queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
|
|
+ spin_lock_irq(q->queue_lock);
|
|
+ queue_flag_set(QUEUE_FLAG_DYING, q);
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
|
if (q->mq_ops)
|
|
blk_mq_wake_waiters(q);
|
|
diff --git a/block/blk-merge.c b/block/blk-merge.c
|
|
index b966db8f3556..7225511cf0b4 100644
|
|
--- a/block/blk-merge.c
|
|
+++ b/block/blk-merge.c
|
|
@@ -92,9 +92,31 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|
bool do_split = true;
|
|
struct bio *new = NULL;
|
|
const unsigned max_sectors = get_max_io_size(q, bio);
|
|
+ unsigned bvecs = 0;
|
|
|
|
bio_for_each_segment(bv, bio, iter) {
|
|
/*
|
|
+ * With arbitrary bio size, the incoming bio may be very
|
|
+ * big. We have to split the bio into small bios so that
|
|
+ * each holds at most BIO_MAX_PAGES bvecs because
|
|
+ * bio_clone() can fail to allocate big bvecs.
|
|
+ *
|
|
+ * It should have been better to apply the limit per
|
|
+ * request queue in which bio_clone() is involved,
|
|
+ * instead of globally. The biggest blocker is the
|
|
+ * bio_clone() in bio bounce.
|
|
+ *
|
|
+ * If bio is splitted by this reason, we should have
|
|
+ * allowed to continue bios merging, but don't do
|
|
+ * that now for making the change simple.
|
|
+ *
|
|
+ * TODO: deal with bio bounce's bio_clone() gracefully
|
|
+ * and convert the global limit into per-queue limit.
|
|
+ */
|
|
+ if (bvecs++ >= BIO_MAX_PAGES)
|
|
+ goto split;
|
|
+
|
|
+ /*
|
|
* If the queue doesn't support SG gaps and adding this
|
|
* offset would create a gap, disallow it.
|
|
*/
|
|
diff --git a/block/blk-mq.c b/block/blk-mq.c
|
|
index 6d6f8feb48c0..839b1e17481b 100644
|
|
--- a/block/blk-mq.c
|
|
+++ b/block/blk-mq.c
|
|
@@ -601,8 +601,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|
* If a request wasn't started before the queue was
|
|
* marked dying, kill it here or it'll go unnoticed.
|
|
*/
|
|
- if (unlikely(blk_queue_dying(rq->q)))
|
|
- blk_mq_complete_request(rq, -EIO);
|
|
+ if (unlikely(blk_queue_dying(rq->q))) {
|
|
+ rq->errors = -EIO;
|
|
+ blk_mq_end_request(rq, rq->errors);
|
|
+ }
|
|
return;
|
|
}
|
|
if (rq->cmd_flags & REQ_NO_TIMEOUT)
|
|
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
|
|
index 79107597a594..c306b483de60 100644
|
|
--- a/drivers/bluetooth/btusb.c
|
|
+++ b/drivers/bluetooth/btusb.c
|
|
@@ -2056,12 +2056,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
|
|
- * supported by this firmware loading method. This check has been
|
|
- * put in place to ensure correct forward compatibility options
|
|
- * when newer hardware variants come along.
|
|
+ /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
|
|
+ * and 0x0c (WsP) are supported by this firmware loading method.
|
|
+ *
|
|
+ * This check has been put in place to ensure correct forward
|
|
+ * compatibility options when newer hardware variants come along.
|
|
*/
|
|
- if (ver->hw_variant != 0x0b) {
|
|
+ if (ver->hw_variant != 0x0b && ver->hw_variant != 0x0c) {
|
|
BT_ERR("%s: Unsupported Intel hardware variant (%u)",
|
|
hdev->name, ver->hw_variant);
|
|
kfree_skb(skb);
|
|
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
|
|
index aa30af5f0f2b..7845a38b6604 100644
|
|
--- a/drivers/char/hw_random/exynos-rng.c
|
|
+++ b/drivers/char/hw_random/exynos-rng.c
|
|
@@ -118,6 +118,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
|
|
{
|
|
struct exynos_rng *exynos_rng;
|
|
struct resource *res;
|
|
+ int ret;
|
|
|
|
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
|
|
GFP_KERNEL);
|
|
@@ -145,7 +146,13 @@ static int exynos_rng_probe(struct platform_device *pdev)
|
|
pm_runtime_use_autosuspend(&pdev->dev);
|
|
pm_runtime_enable(&pdev->dev);
|
|
|
|
- return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
|
|
+ ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
|
|
+ if (ret) {
|
|
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_PM
|
|
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
|
|
index 27c0da29eca3..10224b01b97c 100644
|
|
--- a/drivers/clk/clk-xgene.c
|
|
+++ b/drivers/clk/clk-xgene.c
|
|
@@ -351,7 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
/* Set new divider */
|
|
data = xgene_clk_read(pclk->param.divider_reg +
|
|
pclk->param.reg_divider_offset);
|
|
- data &= ~((1 << pclk->param.reg_divider_width) - 1);
|
|
+ data &= ~((1 << pclk->param.reg_divider_width) - 1)
|
|
+ << pclk->param.reg_divider_shift;
|
|
data |= divider;
|
|
xgene_clk_write(data, pclk->param.divider_reg +
|
|
pclk->param.reg_divider_offset);
|
|
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
|
|
index 4dbf1db16aca..9cc8abd3d116 100644
|
|
--- a/drivers/cpufreq/cpufreq_userspace.c
|
|
+++ b/drivers/cpufreq/cpufreq_userspace.c
|
|
@@ -17,6 +17,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mutex.h>
|
|
+#include <linux/slab.h>
|
|
|
|
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
|
|
static DEFINE_MUTEX(userspace_mutex);
|
|
@@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
|
|
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
|
|
{
|
|
int ret = -EINVAL;
|
|
+ unsigned int *setspeed = policy->governor_data;
|
|
|
|
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
|
|
|
|
@@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
|
|
if (!per_cpu(cpu_is_managed, policy->cpu))
|
|
goto err;
|
|
|
|
+ *setspeed = freq;
|
|
+
|
|
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
|
|
err:
|
|
mutex_unlock(&userspace_mutex);
|
|
@@ -49,19 +53,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
|
|
return sprintf(buf, "%u\n", policy->cur);
|
|
}
|
|
|
|
+static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
|
|
+{
|
|
+ unsigned int *setspeed;
|
|
+
|
|
+ setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
|
|
+ if (!setspeed)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ policy->governor_data = setspeed;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
|
|
unsigned int event)
|
|
{
|
|
+ unsigned int *setspeed = policy->governor_data;
|
|
unsigned int cpu = policy->cpu;
|
|
int rc = 0;
|
|
|
|
+ if (event == CPUFREQ_GOV_POLICY_INIT)
|
|
+ return cpufreq_userspace_policy_init(policy);
|
|
+
|
|
+ if (!setspeed)
|
|
+ return -EINVAL;
|
|
+
|
|
switch (event) {
|
|
+ case CPUFREQ_GOV_POLICY_EXIT:
|
|
+ mutex_lock(&userspace_mutex);
|
|
+ policy->governor_data = NULL;
|
|
+ kfree(setspeed);
|
|
+ mutex_unlock(&userspace_mutex);
|
|
+ break;
|
|
case CPUFREQ_GOV_START:
|
|
BUG_ON(!policy->cur);
|
|
pr_debug("started managing cpu %u\n", cpu);
|
|
|
|
mutex_lock(&userspace_mutex);
|
|
per_cpu(cpu_is_managed, cpu) = 1;
|
|
+ *setspeed = policy->cur;
|
|
mutex_unlock(&userspace_mutex);
|
|
break;
|
|
case CPUFREQ_GOV_STOP:
|
|
@@ -69,20 +99,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
|
|
|
|
mutex_lock(&userspace_mutex);
|
|
per_cpu(cpu_is_managed, cpu) = 0;
|
|
+ *setspeed = 0;
|
|
mutex_unlock(&userspace_mutex);
|
|
break;
|
|
case CPUFREQ_GOV_LIMITS:
|
|
mutex_lock(&userspace_mutex);
|
|
- pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
|
|
- cpu, policy->min, policy->max,
|
|
- policy->cur);
|
|
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
|
|
+ cpu, policy->min, policy->max, policy->cur, *setspeed);
|
|
|
|
- if (policy->max < policy->cur)
|
|
+ if (policy->max < *setspeed)
|
|
__cpufreq_driver_target(policy, policy->max,
|
|
CPUFREQ_RELATION_H);
|
|
- else if (policy->min > policy->cur)
|
|
+ else if (policy->min > *setspeed)
|
|
__cpufreq_driver_target(policy, policy->min,
|
|
CPUFREQ_RELATION_L);
|
|
+ else
|
|
+ __cpufreq_driver_target(policy, *setspeed,
|
|
+ CPUFREQ_RELATION_L);
|
|
mutex_unlock(&userspace_mutex);
|
|
break;
|
|
}
|
|
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
|
|
index 6dc597126b79..b3044219772c 100644
|
|
--- a/drivers/crypto/caam/caamalg.c
|
|
+++ b/drivers/crypto/caam/caamalg.c
|
|
@@ -556,7 +556,10 @@ skip_enc:
|
|
|
|
/* Read and write assoclen bytes */
|
|
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
|
|
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
|
|
+ if (alg->caam.geniv)
|
|
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
|
|
+ else
|
|
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
|
|
|
|
/* Skip assoc data */
|
|
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
|
|
@@ -565,6 +568,14 @@ skip_enc:
|
|
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
|
|
KEY_VLF);
|
|
|
|
+ if (alg->caam.geniv) {
|
|
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
|
|
+ LDST_SRCDST_BYTE_CONTEXT |
|
|
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
|
|
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
|
|
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
|
|
+ }
|
|
+
|
|
/* Load Counter into CONTEXT1 reg */
|
|
if (is_rfc3686)
|
|
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
|
|
@@ -2150,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
|
|
|
|
init_aead_job(req, edesc, all_contig, encrypt);
|
|
|
|
- if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
|
|
+ if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
|
|
append_load_as_imm(desc, req->iv, ivsize,
|
|
LDST_CLASS_1_CCB |
|
|
LDST_SRCDST_BYTE_CONTEXT |
|
|
@@ -2537,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
|
|
return ret;
|
|
}
|
|
|
|
-static int aead_givdecrypt(struct aead_request *req)
|
|
-{
|
|
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
|
- unsigned int ivsize = crypto_aead_ivsize(aead);
|
|
-
|
|
- if (req->cryptlen < ivsize)
|
|
- return -EINVAL;
|
|
-
|
|
- req->cryptlen -= ivsize;
|
|
- req->assoclen += ivsize;
|
|
-
|
|
- return aead_decrypt(req);
|
|
-}
|
|
-
|
|
/*
|
|
* allocate and map the ablkcipher extended descriptor for ablkcipher
|
|
*/
|
|
@@ -3210,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.maxauthsize = MD5_DIGEST_SIZE,
|
|
},
|
|
@@ -3256,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.maxauthsize = SHA1_DIGEST_SIZE,
|
|
},
|
|
@@ -3302,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.maxauthsize = SHA224_DIGEST_SIZE,
|
|
},
|
|
@@ -3348,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.maxauthsize = SHA256_DIGEST_SIZE,
|
|
},
|
|
@@ -3394,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.maxauthsize = SHA384_DIGEST_SIZE,
|
|
},
|
|
@@ -3440,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.maxauthsize = SHA512_DIGEST_SIZE,
|
|
},
|
|
@@ -3486,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
.maxauthsize = MD5_DIGEST_SIZE,
|
|
},
|
|
@@ -3534,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
.maxauthsize = SHA1_DIGEST_SIZE,
|
|
},
|
|
@@ -3582,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
.maxauthsize = SHA224_DIGEST_SIZE,
|
|
},
|
|
@@ -3630,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
.maxauthsize = SHA256_DIGEST_SIZE,
|
|
},
|
|
@@ -3678,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
.maxauthsize = SHA384_DIGEST_SIZE,
|
|
},
|
|
@@ -3726,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
.maxauthsize = SHA512_DIGEST_SIZE,
|
|
},
|
|
@@ -3772,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
.maxauthsize = MD5_DIGEST_SIZE,
|
|
},
|
|
@@ -3818,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
.maxauthsize = SHA1_DIGEST_SIZE,
|
|
},
|
|
@@ -3864,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
.maxauthsize = SHA224_DIGEST_SIZE,
|
|
},
|
|
@@ -3910,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
.maxauthsize = SHA256_DIGEST_SIZE,
|
|
},
|
|
@@ -3956,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
.maxauthsize = SHA384_DIGEST_SIZE,
|
|
},
|
|
@@ -4002,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
.maxauthsize = SHA512_DIGEST_SIZE,
|
|
},
|
|
@@ -4051,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = CTR_RFC3686_IV_SIZE,
|
|
.maxauthsize = MD5_DIGEST_SIZE,
|
|
},
|
|
@@ -4102,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = CTR_RFC3686_IV_SIZE,
|
|
.maxauthsize = SHA1_DIGEST_SIZE,
|
|
},
|
|
@@ -4153,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = CTR_RFC3686_IV_SIZE,
|
|
.maxauthsize = SHA224_DIGEST_SIZE,
|
|
},
|
|
@@ -4204,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = CTR_RFC3686_IV_SIZE,
|
|
.maxauthsize = SHA256_DIGEST_SIZE,
|
|
},
|
|
@@ -4255,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = CTR_RFC3686_IV_SIZE,
|
|
.maxauthsize = SHA384_DIGEST_SIZE,
|
|
},
|
|
@@ -4306,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|
.setkey = aead_setkey,
|
|
.setauthsize = aead_setauthsize,
|
|
.encrypt = aead_encrypt,
|
|
- .decrypt = aead_givdecrypt,
|
|
+ .decrypt = aead_decrypt,
|
|
.ivsize = CTR_RFC3686_IV_SIZE,
|
|
.maxauthsize = SHA512_DIGEST_SIZE,
|
|
},
|
|
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
|
|
index 9ef51fafdbff..6e105e87b8ff 100644
|
|
--- a/drivers/crypto/nx/nx-842-powernv.c
|
|
+++ b/drivers/crypto/nx/nx-842-powernv.c
|
|
@@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
|
|
(unsigned int)ccw,
|
|
(unsigned int)be32_to_cpu(crb->ccw));
|
|
|
|
+ /*
|
|
+ * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
|
|
+ * XER[S0] is the integer summary overflow bit which is nothing
|
|
+ * to do NX. Since this bit can be set with other return values,
|
|
+ * mask this bit.
|
|
+ */
|
|
+ ret &= ~ICSWX_XERS0;
|
|
+
|
|
switch (ret) {
|
|
case ICSWX_INITIATED:
|
|
ret = wait_for_csb(wmem, csb);
|
|
@@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
|
|
pr_err_ratelimited("ICSWX rejected\n");
|
|
ret = -EPROTO;
|
|
break;
|
|
- default:
|
|
- pr_err_ratelimited("Invalid ICSWX return code %x\n", ret);
|
|
- ret = -EPROTO;
|
|
- break;
|
|
}
|
|
|
|
if (!ret)
|
|
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
|
|
index f3801b983f42..3f8bb9a40df1 100644
|
|
--- a/drivers/crypto/vmx/aes_cbc.c
|
|
+++ b/drivers/crypto/vmx/aes_cbc.c
|
|
@@ -191,7 +191,7 @@ struct crypto_alg p8_aes_cbc_alg = {
|
|
.cra_init = p8_aes_cbc_init,
|
|
.cra_exit = p8_aes_cbc_exit,
|
|
.cra_blkcipher = {
|
|
- .ivsize = 0,
|
|
+ .ivsize = AES_BLOCK_SIZE,
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
.setkey = p8_aes_cbc_setkey,
|
|
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
|
|
index 404a1b69a3ab..72f138985e18 100644
|
|
--- a/drivers/crypto/vmx/aes_ctr.c
|
|
+++ b/drivers/crypto/vmx/aes_ctr.c
|
|
@@ -175,7 +175,7 @@ struct crypto_alg p8_aes_ctr_alg = {
|
|
.cra_init = p8_aes_ctr_init,
|
|
.cra_exit = p8_aes_ctr_exit,
|
|
.cra_blkcipher = {
|
|
- .ivsize = 0,
|
|
+ .ivsize = AES_BLOCK_SIZE,
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
.setkey = p8_aes_ctr_setkey,
|
|
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
|
|
index b9997335f193..b18e67d0e065 100644
|
|
--- a/drivers/crypto/vmx/ppc-xlate.pl
|
|
+++ b/drivers/crypto/vmx/ppc-xlate.pl
|
|
@@ -139,6 +139,26 @@ my $vmr = sub {
|
|
" vor $vx,$vy,$vy";
|
|
};
|
|
|
|
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
|
|
+# for system use.
|
|
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
|
|
+my $mtspr = sub {
|
|
+ my ($f,$idx,$ra) = @_;
|
|
+ if ($idx == 256 && $no_vrsave) {
|
|
+ " or $ra,$ra,$ra";
|
|
+ } else {
|
|
+ " mtspr $idx,$ra";
|
|
+ }
|
|
+};
|
|
+my $mfspr = sub {
|
|
+ my ($f,$rd,$idx) = @_;
|
|
+ if ($idx == 256 && $no_vrsave) {
|
|
+ " li $rd,-1";
|
|
+ } else {
|
|
+ " mfspr $rd,$idx";
|
|
+ }
|
|
+};
|
|
+
|
|
# PowerISA 2.06 stuff
|
|
sub vsxmem_op {
|
|
my ($f, $vrt, $ra, $rb, $op) = @_;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
|
|
index 92b6acadfc52..21aacc1f45c1 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
|
|
@@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
|
|
|
|
/* convert bits per color to bits per pixel */
|
|
/* get bpc from the EDID */
|
|
-static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
|
|
+static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
|
|
{
|
|
if (bpc == 0)
|
|
return 24;
|
|
@@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
|
|
return bpc * 3;
|
|
}
|
|
|
|
-/* get the max pix clock supported by the link rate and lane num */
|
|
-static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
|
|
- int lane_num,
|
|
- int bpp)
|
|
-{
|
|
- return (link_rate * lane_num * 8) / bpp;
|
|
-}
|
|
-
|
|
/***** amdgpu specific DP functions *****/
|
|
|
|
-/* First get the min lane# when low rate is used according to pixel clock
|
|
- * (prefer low rate), second check max lane# supported by DP panel,
|
|
- * if the max lane# < low rate lane# then use max lane# instead.
|
|
- */
|
|
-static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
|
|
+static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
|
|
const u8 dpcd[DP_DPCD_SIZE],
|
|
- int pix_clock)
|
|
-{
|
|
- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
|
|
- int max_link_rate = drm_dp_max_link_rate(dpcd);
|
|
- int max_lane_num = drm_dp_max_lane_count(dpcd);
|
|
- int lane_num;
|
|
- int max_dp_pix_clock;
|
|
-
|
|
- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
|
|
- max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
|
|
- if (pix_clock <= max_dp_pix_clock)
|
|
- break;
|
|
- }
|
|
-
|
|
- return lane_num;
|
|
-}
|
|
-
|
|
-static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
|
|
- const u8 dpcd[DP_DPCD_SIZE],
|
|
- int pix_clock)
|
|
+ unsigned pix_clock,
|
|
+ unsigned *dp_lanes, unsigned *dp_rate)
|
|
{
|
|
- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
|
|
- int lane_num, max_pix_clock;
|
|
-
|
|
- if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
|
|
- ENCODER_OBJECT_ID_NUTMEG)
|
|
- return 270000;
|
|
-
|
|
- lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
|
|
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
|
|
- if (pix_clock <= max_pix_clock)
|
|
- return 162000;
|
|
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
|
|
- if (pix_clock <= max_pix_clock)
|
|
- return 270000;
|
|
- if (amdgpu_connector_is_dp12_capable(connector)) {
|
|
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
|
|
- if (pix_clock <= max_pix_clock)
|
|
- return 540000;
|
|
+ unsigned bpp =
|
|
+ amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
|
|
+ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
|
|
+ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
|
|
+ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
|
|
+ unsigned lane_num, i, max_pix_clock;
|
|
+
|
|
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
|
|
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
|
|
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
|
|
+ if (max_pix_clock >= pix_clock) {
|
|
+ *dp_lanes = lane_num;
|
|
+ *dp_rate = link_rates[i];
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
- return drm_dp_max_link_rate(dpcd);
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
|
|
@@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
|
|
{
|
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
|
struct amdgpu_connector_atom_dig *dig_connector;
|
|
+ int ret;
|
|
|
|
if (!amdgpu_connector->con_priv)
|
|
return;
|
|
@@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
|
|
|
|
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
|
|
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
|
|
- dig_connector->dp_clock =
|
|
- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
|
|
- dig_connector->dp_lane_count =
|
|
- amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
|
|
+ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
|
|
+ mode->clock,
|
|
+ &dig_connector->dp_lane_count,
|
|
+ &dig_connector->dp_clock);
|
|
+ if (ret) {
|
|
+ dig_connector->dp_clock = 0;
|
|
+ dig_connector->dp_lane_count = 0;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
|
|
{
|
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
|
struct amdgpu_connector_atom_dig *dig_connector;
|
|
- int dp_clock;
|
|
+ unsigned dp_lanes, dp_clock;
|
|
+ int ret;
|
|
|
|
if (!amdgpu_connector->con_priv)
|
|
return MODE_CLOCK_HIGH;
|
|
dig_connector = amdgpu_connector->con_priv;
|
|
|
|
- dp_clock =
|
|
- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
|
|
+ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
|
|
+ mode->clock, &dp_lanes, &dp_clock);
|
|
+ if (ret)
|
|
+ return MODE_CLOCK_HIGH;
|
|
|
|
if ((dp_clock == 540000) &&
|
|
(!amdgpu_connector_is_dp12_capable(connector)))
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
|
|
index 8035d4d6a4f5..653917a3bcc2 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
|
|
@@ -1955,10 +1955,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
|
|
}
|
|
} else { /*pi->caps_vce_pg*/
|
|
cz_update_vce_dpm(adev);
|
|
- cz_enable_vce_dpm(adev, true);
|
|
+ cz_enable_vce_dpm(adev, !gate);
|
|
}
|
|
-
|
|
- return;
|
|
}
|
|
|
|
const struct amd_ip_funcs cz_dpm_ip_funcs = {
|
|
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
|
|
index e5aec45bf985..1ac29d703c12 100644
|
|
--- a/drivers/gpu/drm/drm_atomic_helper.c
|
|
+++ b/drivers/gpu/drm/drm_atomic_helper.c
|
|
@@ -108,7 +108,6 @@ steal_encoder(struct drm_atomic_state *state,
|
|
struct drm_crtc_state *crtc_state;
|
|
struct drm_connector *connector;
|
|
struct drm_connector_state *connector_state;
|
|
- int ret;
|
|
|
|
/*
|
|
* We can only steal an encoder coming from a connector, which means we
|
|
@@ -139,9 +138,6 @@ steal_encoder(struct drm_atomic_state *state,
|
|
if (IS_ERR(connector_state))
|
|
return PTR_ERR(connector_state);
|
|
|
|
- ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
|
|
- if (ret)
|
|
- return ret;
|
|
connector_state->best_encoder = NULL;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
|
|
index dc84003f694e..5e4bb4837bae 100644
|
|
--- a/drivers/gpu/drm/drm_crtc.c
|
|
+++ b/drivers/gpu/drm/drm_crtc.c
|
|
@@ -5231,6 +5231,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
|
unsigned long flags;
|
|
int ret = -EINVAL;
|
|
|
|
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
+ return -EINVAL;
|
|
+
|
|
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
|
|
page_flip->reserved != 0)
|
|
return -EINVAL;
|
|
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
|
|
index c7de454e8e88..b205224f1a44 100644
|
|
--- a/drivers/gpu/drm/drm_gem.c
|
|
+++ b/drivers/gpu/drm/drm_gem.c
|
|
@@ -338,27 +338,32 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
|
|
spin_unlock(&file_priv->table_lock);
|
|
idr_preload_end();
|
|
mutex_unlock(&dev->object_name_lock);
|
|
- if (ret < 0) {
|
|
- drm_gem_object_handle_unreference_unlocked(obj);
|
|
- return ret;
|
|
- }
|
|
+ if (ret < 0)
|
|
+ goto err_unref;
|
|
+
|
|
*handlep = ret;
|
|
|
|
ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
|
|
- if (ret) {
|
|
- drm_gem_handle_delete(file_priv, *handlep);
|
|
- return ret;
|
|
- }
|
|
+ if (ret)
|
|
+ goto err_remove;
|
|
|
|
if (dev->driver->gem_open_object) {
|
|
ret = dev->driver->gem_open_object(obj, file_priv);
|
|
- if (ret) {
|
|
- drm_gem_handle_delete(file_priv, *handlep);
|
|
- return ret;
|
|
- }
|
|
+ if (ret)
|
|
+ goto err_revoke;
|
|
}
|
|
|
|
return 0;
|
|
+
|
|
+err_revoke:
|
|
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
|
|
+err_remove:
|
|
+ spin_lock(&file_priv->table_lock);
|
|
+ idr_remove(&file_priv->object_idr, *handlep);
|
|
+ spin_unlock(&file_priv->table_lock);
|
|
+err_unref:
|
|
+ drm_gem_object_handle_unreference_unlocked(obj);
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
|
|
index d3ce4da6a6ad..d400d6773bbb 100644
|
|
--- a/drivers/gpu/drm/i915/i915_drv.h
|
|
+++ b/drivers/gpu/drm/i915/i915_drv.h
|
|
@@ -3313,6 +3313,9 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
|
|
}
|
|
extern void intel_i2c_reset(struct drm_device *dev);
|
|
|
|
+/* intel_bios.c */
|
|
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
|
|
+
|
|
/* intel_opregion.c */
|
|
#ifdef CONFIG_ACPI
|
|
extern int intel_opregion_setup(struct drm_device *dev);
|
|
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
|
|
index 9ed9f6dde86f..cace154bbdc0 100644
|
|
--- a/drivers/gpu/drm/i915/i915_reg.h
|
|
+++ b/drivers/gpu/drm/i915/i915_reg.h
|
|
@@ -3240,19 +3240,20 @@ enum skl_disp_power_wells {
|
|
|
|
#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
|
|
/*
|
|
- * HDMI/DP bits are gen4+
|
|
+ * HDMI/DP bits are g4x+
|
|
*
|
|
* WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
|
|
* Please check the detailed lore in the commit message for for experimental
|
|
* evidence.
|
|
*/
|
|
-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
|
|
+/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
|
|
+#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
|
|
+#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
|
|
+#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
|
|
+/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
|
|
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
|
|
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
|
|
-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
|
|
-/* VLV DP/HDMI bits again match Bspec */
|
|
-#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
|
|
-#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
|
|
-#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
|
|
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
|
|
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
|
|
#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
|
|
#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
|
|
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
|
|
index ce82f9c7df24..d14bdc537587 100644
|
|
--- a/drivers/gpu/drm/i915/intel_bios.c
|
|
+++ b/drivers/gpu/drm/i915/intel_bios.c
|
|
@@ -1351,3 +1351,42 @@ intel_parse_bios(struct drm_device *dev)
|
|
|
|
return 0;
|
|
}
|
|
+
|
|
+/**
|
|
+ * intel_bios_is_port_present - is the specified digital port present
|
|
+ * @dev_priv: i915 device instance
|
|
+ * @port: port to check
|
|
+ *
|
|
+ * Return true if the device in %port is present.
|
|
+ */
|
|
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
|
|
+{
|
|
+ static const struct {
|
|
+ u16 dp, hdmi;
|
|
+ } port_mapping[] = {
|
|
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
|
|
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
|
|
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
|
|
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
|
|
+ };
|
|
+ int i;
|
|
+
|
|
+ /* FIXME maybe deal with port A as well? */
|
|
+ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
|
|
+ return false;
|
|
+
|
|
+ if (!dev_priv->vbt.child_dev_num)
|
|
+ return false;
|
|
+
|
|
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
|
+ const union child_device_config *p_child =
|
|
+ &dev_priv->vbt.child_dev[i];
|
|
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
|
|
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
|
|
+ (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
|
|
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
|
|
index 3292495ee10f..a3254c3bcc7c 100644
|
|
--- a/drivers/gpu/drm/i915/intel_display.c
|
|
+++ b/drivers/gpu/drm/i915/intel_display.c
|
|
@@ -14160,6 +14160,8 @@ static void intel_setup_outputs(struct drm_device *dev)
|
|
if (I915_READ(PCH_DP_D) & DP_DETECTED)
|
|
intel_dp_init(dev, PCH_DP_D, PORT_D);
|
|
} else if (IS_VALLEYVIEW(dev)) {
|
|
+ bool has_edp, has_port;
|
|
+
|
|
/*
|
|
* The DP_DETECTED bit is the latched state of the DDC
|
|
* SDA pin at boot. However since eDP doesn't require DDC
|
|
@@ -14168,27 +14170,37 @@ static void intel_setup_outputs(struct drm_device *dev)
|
|
* Thus we can't rely on the DP_DETECTED bit alone to detect
|
|
* eDP ports. Consult the VBT as well as DP_DETECTED to
|
|
* detect eDP ports.
|
|
+ *
|
|
+ * Sadly the straps seem to be missing sometimes even for HDMI
|
|
+ * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
|
|
+ * and VBT for the presence of the port. Additionally we can't
|
|
+ * trust the port type the VBT declares as we've seen at least
|
|
+ * HDMI ports that the VBT claim are DP or eDP.
|
|
*/
|
|
- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
|
|
- !intel_dp_is_edp(dev, PORT_B))
|
|
+ has_edp = intel_dp_is_edp(dev, PORT_B);
|
|
+ has_port = intel_bios_is_port_present(dev_priv, PORT_B);
|
|
+ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
|
|
+ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
|
|
+ if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
|
|
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
|
|
- if (I915_READ(VLV_DP_B) & DP_DETECTED ||
|
|
- intel_dp_is_edp(dev, PORT_B))
|
|
- intel_dp_init(dev, VLV_DP_B, PORT_B);
|
|
|
|
- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
|
|
- !intel_dp_is_edp(dev, PORT_C))
|
|
+ has_edp = intel_dp_is_edp(dev, PORT_C);
|
|
+ has_port = intel_bios_is_port_present(dev_priv, PORT_C);
|
|
+ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
|
|
+ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
|
|
+ if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
|
|
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
|
|
- if (I915_READ(VLV_DP_C) & DP_DETECTED ||
|
|
- intel_dp_is_edp(dev, PORT_C))
|
|
- intel_dp_init(dev, VLV_DP_C, PORT_C);
|
|
|
|
if (IS_CHERRYVIEW(dev)) {
|
|
- /* eDP not supported on port D, so don't check VBT */
|
|
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
|
|
- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
|
|
- if (I915_READ(CHV_DP_D) & DP_DETECTED)
|
|
+ /*
|
|
+ * eDP not supported on port D,
|
|
+ * so no need to worry about it
|
|
+ */
|
|
+ has_port = intel_bios_is_port_present(dev_priv, PORT_D);
|
|
+ if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
|
|
intel_dp_init(dev, CHV_DP_D, PORT_D);
|
|
+ if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
|
|
+ intel_hdmi_init(dev, CHV_HDMID, PORT_D);
|
|
}
|
|
|
|
intel_dsi_init(dev);
|
|
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
|
|
index 8e1d6d74c203..ebbd23407a80 100644
|
|
--- a/drivers/gpu/drm/i915/intel_dp.c
|
|
+++ b/drivers/gpu/drm/i915/intel_dp.c
|
|
@@ -4592,20 +4592,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
|
|
return I915_READ(PORT_HOTPLUG_STAT) & bit;
|
|
}
|
|
|
|
-static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
|
|
- struct intel_digital_port *port)
|
|
+static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
|
|
+ struct intel_digital_port *port)
|
|
{
|
|
u32 bit;
|
|
|
|
switch (port->port) {
|
|
case PORT_B:
|
|
- bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
|
|
+ bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
|
|
break;
|
|
case PORT_C:
|
|
- bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
|
|
+ bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
|
|
break;
|
|
case PORT_D:
|
|
- bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
|
|
+ bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
|
|
break;
|
|
default:
|
|
MISSING_CASE(port->port);
|
|
@@ -4657,8 +4657,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
|
|
return cpt_digital_port_connected(dev_priv, port);
|
|
else if (IS_BROXTON(dev_priv))
|
|
return bxt_digital_port_connected(dev_priv, port);
|
|
- else if (IS_VALLEYVIEW(dev_priv))
|
|
- return vlv_digital_port_connected(dev_priv, port);
|
|
+ else if (IS_GM45(dev_priv))
|
|
+ return gm45_digital_port_connected(dev_priv, port);
|
|
else
|
|
return g4x_digital_port_connected(dev_priv, port);
|
|
}
|
|
@@ -6113,8 +6113,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
|
return true;
|
|
}
|
|
|
|
-void
|
|
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|
+bool intel_dp_init(struct drm_device *dev,
|
|
+ int output_reg,
|
|
+ enum port port)
|
|
{
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct intel_digital_port *intel_dig_port;
|
|
@@ -6124,7 +6125,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|
|
|
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
|
if (!intel_dig_port)
|
|
- return;
|
|
+ return false;
|
|
|
|
intel_connector = intel_connector_alloc();
|
|
if (!intel_connector)
|
|
@@ -6179,15 +6180,14 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
|
|
goto err_init_connector;
|
|
|
|
- return;
|
|
+ return true;
|
|
|
|
err_init_connector:
|
|
drm_encoder_cleanup(encoder);
|
|
kfree(intel_connector);
|
|
err_connector_alloc:
|
|
kfree(intel_dig_port);
|
|
-
|
|
- return;
|
|
+ return false;
|
|
}
|
|
|
|
void intel_dp_mst_suspend(struct drm_device *dev)
|
|
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
|
|
index c5f11e0c5d5b..67f72a7ee7cb 100644
|
|
--- a/drivers/gpu/drm/i915/intel_drv.h
|
|
+++ b/drivers/gpu/drm/i915/intel_drv.h
|
|
@@ -1195,7 +1195,7 @@ void intel_csr_ucode_fini(struct drm_device *dev);
|
|
void assert_csr_loaded(struct drm_i915_private *dev_priv);
|
|
|
|
/* intel_dp.c */
|
|
-void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
|
+bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
|
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
|
struct intel_connector *intel_connector);
|
|
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
|
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
|
|
index 4b8ed9f2dabc..dff69fef47e0 100644
|
|
--- a/drivers/gpu/drm/i915/intel_hdmi.c
|
|
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
|
|
@@ -2030,6 +2030,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
|
enum port port = intel_dig_port->port;
|
|
uint8_t alternate_ddc_pin;
|
|
|
|
+ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
|
|
+ port_name(port));
|
|
+
|
|
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
|
|
DRM_MODE_CONNECTOR_HDMIA);
|
|
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
|
|
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
|
|
index 6d7cd3fe21e7..1847f83b1e33 100644
|
|
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
|
|
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
|
|
@@ -55,6 +55,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
|
return submit;
|
|
}
|
|
|
|
+static inline unsigned long __must_check
|
|
+copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
|
+{
|
|
+ if (access_ok(VERIFY_READ, from, n))
|
|
+ return __copy_from_user_inatomic(to, from, n);
|
|
+ return -EFAULT;
|
|
+}
|
|
+
|
|
static int submit_lookup_objects(struct msm_gem_submit *submit,
|
|
struct drm_msm_gem_submit *args, struct drm_file *file)
|
|
{
|
|
@@ -62,6 +70,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
|
int ret = 0;
|
|
|
|
spin_lock(&file->table_lock);
|
|
+ pagefault_disable();
|
|
|
|
for (i = 0; i < args->nr_bos; i++) {
|
|
struct drm_msm_gem_submit_bo submit_bo;
|
|
@@ -70,10 +79,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
|
void __user *userptr =
|
|
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
|
|
|
|
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
|
|
- if (ret) {
|
|
- ret = -EFAULT;
|
|
- goto out_unlock;
|
|
+ ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
|
|
+ if (unlikely(ret)) {
|
|
+ pagefault_enable();
|
|
+ spin_unlock(&file->table_lock);
|
|
+ ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
|
|
+ if (ret)
|
|
+ goto out;
|
|
+ spin_lock(&file->table_lock);
|
|
+ pagefault_disable();
|
|
}
|
|
|
|
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
|
|
@@ -113,9 +127,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
|
}
|
|
|
|
out_unlock:
|
|
- submit->nr_bos = i;
|
|
+ pagefault_enable();
|
|
spin_unlock(&file->table_lock);
|
|
|
|
+out:
|
|
+ submit->nr_bos = i;
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
|
|
index bd73b4069069..44ee72e04df9 100644
|
|
--- a/drivers/gpu/drm/radeon/atombios_dp.c
|
|
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
|
|
@@ -302,77 +302,31 @@ static int convert_bpc_to_bpp(int bpc)
|
|
return bpc * 3;
|
|
}
|
|
|
|
-/* get the max pix clock supported by the link rate and lane num */
|
|
-static int dp_get_max_dp_pix_clock(int link_rate,
|
|
- int lane_num,
|
|
- int bpp)
|
|
-{
|
|
- return (link_rate * lane_num * 8) / bpp;
|
|
-}
|
|
-
|
|
/***** radeon specific DP functions *****/
|
|
|
|
-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
|
|
- const u8 dpcd[DP_DPCD_SIZE])
|
|
-{
|
|
- int max_link_rate;
|
|
-
|
|
- if (radeon_connector_is_dp12_capable(connector))
|
|
- max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
|
|
- else
|
|
- max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
|
|
-
|
|
- return max_link_rate;
|
|
-}
|
|
-
|
|
-/* First get the min lane# when low rate is used according to pixel clock
|
|
- * (prefer low rate), second check max lane# supported by DP panel,
|
|
- * if the max lane# < low rate lane# then use max lane# instead.
|
|
- */
|
|
-static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
|
|
- const u8 dpcd[DP_DPCD_SIZE],
|
|
- int pix_clock)
|
|
-{
|
|
- int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
|
|
- int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
|
|
- int max_lane_num = drm_dp_max_lane_count(dpcd);
|
|
- int lane_num;
|
|
- int max_dp_pix_clock;
|
|
-
|
|
- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
|
|
- max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
|
|
- if (pix_clock <= max_dp_pix_clock)
|
|
- break;
|
|
- }
|
|
-
|
|
- return lane_num;
|
|
-}
|
|
-
|
|
-static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
|
|
- const u8 dpcd[DP_DPCD_SIZE],
|
|
- int pix_clock)
|
|
+int radeon_dp_get_dp_link_config(struct drm_connector *connector,
|
|
+ const u8 dpcd[DP_DPCD_SIZE],
|
|
+ unsigned pix_clock,
|
|
+ unsigned *dp_lanes, unsigned *dp_rate)
|
|
{
|
|
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
|
|
- int lane_num, max_pix_clock;
|
|
-
|
|
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
|
|
- ENCODER_OBJECT_ID_NUTMEG)
|
|
- return 270000;
|
|
-
|
|
- lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
|
|
- max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
|
|
- if (pix_clock <= max_pix_clock)
|
|
- return 162000;
|
|
- max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
|
|
- if (pix_clock <= max_pix_clock)
|
|
- return 270000;
|
|
- if (radeon_connector_is_dp12_capable(connector)) {
|
|
- max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
|
|
- if (pix_clock <= max_pix_clock)
|
|
- return 540000;
|
|
+ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
|
|
+ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
|
|
+ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
|
|
+ unsigned lane_num, i, max_pix_clock;
|
|
+
|
|
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
|
|
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
|
|
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
|
|
+ if (max_pix_clock >= pix_clock) {
|
|
+ *dp_lanes = lane_num;
|
|
+ *dp_rate = link_rates[i];
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
- return radeon_dp_get_max_link_rate(connector, dpcd);
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
|
|
@@ -491,6 +445,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
|
|
{
|
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
|
struct radeon_connector_atom_dig *dig_connector;
|
|
+ int ret;
|
|
|
|
if (!radeon_connector->con_priv)
|
|
return;
|
|
@@ -498,10 +453,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
|
|
|
|
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
|
|
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
|
|
- dig_connector->dp_clock =
|
|
- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
|
|
- dig_connector->dp_lane_count =
|
|
- radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
|
|
+ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
|
|
+ mode->clock,
|
|
+ &dig_connector->dp_lane_count,
|
|
+ &dig_connector->dp_clock);
|
|
+ if (ret) {
|
|
+ dig_connector->dp_clock = 0;
|
|
+ dig_connector->dp_lane_count = 0;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -510,7 +469,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
|
|
{
|
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
|
struct radeon_connector_atom_dig *dig_connector;
|
|
- int dp_clock;
|
|
+ unsigned dp_clock, dp_lanes;
|
|
+ int ret;
|
|
|
|
if ((mode->clock > 340000) &&
|
|
(!radeon_connector_is_dp12_capable(connector)))
|
|
@@ -520,8 +480,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
|
|
return MODE_CLOCK_HIGH;
|
|
dig_connector = radeon_connector->con_priv;
|
|
|
|
- dp_clock =
|
|
- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
|
|
+ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
|
|
+ mode->clock,
|
|
+ &dp_lanes,
|
|
+ &dp_clock);
|
|
+ if (ret)
|
|
+ return MODE_CLOCK_HIGH;
|
|
|
|
if ((dp_clock == 540000) &&
|
|
(!radeon_connector_is_dp12_capable(connector)))
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
|
|
index 744f5c49c664..6dd39bdedb97 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
|
|
@@ -525,11 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
|
|
drm_mode_set_crtcinfo(adjusted_mode, 0);
|
|
{
|
|
struct radeon_connector_atom_dig *dig_connector;
|
|
-
|
|
dig_connector = mst_enc->connector->con_priv;
|
|
dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
|
|
- dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
|
|
- dig_connector->dpcd);
|
|
+ dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
|
|
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
|
|
dig_connector->dp_lane_count, dig_connector->dp_clock);
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
|
|
index bba112628b47..7a0666ac4e23 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_mode.h
|
|
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
|
|
@@ -757,8 +757,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
|
|
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
|
|
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
|
|
struct drm_connector *connector);
|
|
-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
|
|
- const u8 *dpcd);
|
|
+extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
|
|
+ const u8 *dpcd,
|
|
+ unsigned pix_clock,
|
|
+ unsigned *dp_lanes, unsigned *dp_rate);
|
|
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
|
|
u8 power_state);
|
|
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
index f342aad79cc6..35310336dd0a 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
@@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
|
|
|
|
rdev = radeon_get_rdev(bo->bdev);
|
|
ridx = radeon_copy_ring_index(rdev);
|
|
- old_start = old_mem->start << PAGE_SHIFT;
|
|
- new_start = new_mem->start << PAGE_SHIFT;
|
|
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
|
|
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
|
|
|
|
switch (old_mem->mem_type) {
|
|
case TTM_PL_VRAM:
|
|
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
|
|
index ec791e169f8f..936960202cf4 100644
|
|
--- a/drivers/hid/hid-core.c
|
|
+++ b/drivers/hid/hid-core.c
|
|
@@ -1251,6 +1251,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
|
|
/* Ignore report if ErrorRollOver */
|
|
if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
|
|
value[n] >= min && value[n] <= max &&
|
|
+ value[n] - min < field->maxusage &&
|
|
field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
|
|
goto exit;
|
|
}
|
|
@@ -1263,11 +1264,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
|
|
}
|
|
|
|
if (field->value[n] >= min && field->value[n] <= max
|
|
+ && field->value[n] - min < field->maxusage
|
|
&& field->usage[field->value[n] - min].hid
|
|
&& search(value, field->value[n], count))
|
|
hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
|
|
|
|
if (value[n] >= min && value[n] <= max
|
|
+ && value[n] - min < field->maxusage
|
|
&& field->usage[value[n] - min].hid
|
|
&& search(field->value, value[n], count))
|
|
hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
|
|
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
|
|
index 9098f13f2f44..1ef37c727572 100644
|
|
--- a/drivers/hv/channel.c
|
|
+++ b/drivers/hv/channel.c
|
|
@@ -28,6 +28,7 @@
|
|
#include <linux/module.h>
|
|
#include <linux/hyperv.h>
|
|
#include <linux/uio.h>
|
|
+#include <linux/interrupt.h>
|
|
|
|
#include "hyperv_vmbus.h"
|
|
|
|
@@ -496,8 +497,21 @@ static void reset_channel_cb(void *arg)
|
|
static int vmbus_close_internal(struct vmbus_channel *channel)
|
|
{
|
|
struct vmbus_channel_close_channel *msg;
|
|
+ struct tasklet_struct *tasklet;
|
|
int ret;
|
|
|
|
+ /*
|
|
+ * process_chn_event(), running in the tasklet, can race
|
|
+ * with vmbus_close_internal() in the case of SMP guest, e.g., when
|
|
+ * the former is accessing channel->inbound.ring_buffer, the latter
|
|
+ * could be freeing the ring_buffer pages.
|
|
+ *
|
|
+ * To resolve the race, we can serialize them by disabling the
|
|
+ * tasklet when the latter is running here.
|
|
+ */
|
|
+ tasklet = hv_context.event_dpc[channel->target_cpu];
|
|
+ tasklet_disable(tasklet);
|
|
+
|
|
channel->state = CHANNEL_OPEN_STATE;
|
|
channel->sc_creation_callback = NULL;
|
|
/* Stop callback and cancel the timer asap */
|
|
@@ -525,7 +539,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
|
* If we failed to post the close msg,
|
|
* it is perhaps better to leak memory.
|
|
*/
|
|
- return ret;
|
|
+ goto out;
|
|
}
|
|
|
|
/* Tear down the gpadl for the channel's ring buffer */
|
|
@@ -538,7 +552,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
|
* If we failed to teardown gpadl,
|
|
* it is perhaps better to leak memory.
|
|
*/
|
|
- return ret;
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
@@ -549,12 +563,9 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
|
free_pages((unsigned long)channel->ringbuffer_pages,
|
|
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
|
|
|
|
- /*
|
|
- * If the channel has been rescinded; process device removal.
|
|
- */
|
|
- if (channel->rescind)
|
|
- hv_process_channel_removal(channel,
|
|
- channel->offermsg.child_relid);
|
|
+out:
|
|
+ tasklet_enable(tasklet);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
|
|
index 652afd11a9ef..37238dffd947 100644
|
|
--- a/drivers/hv/channel_mgmt.c
|
|
+++ b/drivers/hv/channel_mgmt.c
|
|
@@ -28,6 +28,7 @@
|
|
#include <linux/list.h>
|
|
#include <linux/module.h>
|
|
#include <linux/completion.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/hyperv.h>
|
|
|
|
#include "hyperv_vmbus.h"
|
|
@@ -191,6 +192,8 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
|
|
if (channel == NULL)
|
|
return;
|
|
|
|
+ BUG_ON(!channel->rescind);
|
|
+
|
|
if (channel->target_cpu != get_cpu()) {
|
|
put_cpu();
|
|
smp_call_function_single(channel->target_cpu,
|
|
@@ -230,9 +233,7 @@ void vmbus_free_channels(void)
|
|
|
|
list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
|
|
listentry) {
|
|
- /* if we don't set rescind to true, vmbus_close_internal()
|
|
- * won't invoke hv_process_channel_removal().
|
|
- */
|
|
+ /* hv_process_channel_removal() needs this */
|
|
channel->rescind = true;
|
|
|
|
vmbus_device_unregister(channel->device_obj);
|
|
@@ -459,6 +460,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
|
|
cpumask_of_node(primary->numa_node));
|
|
|
|
cur_cpu = -1;
|
|
+
|
|
+ /*
|
|
+ * Normally Hyper-V host doesn't create more subchannels than there
|
|
+ * are VCPUs on the node but it is possible when not all present VCPUs
|
|
+ * on the node are initialized by guest. Clear the alloced_cpus_in_node
|
|
+ * to start over.
|
|
+ */
|
|
+ if (cpumask_equal(&primary->alloced_cpus_in_node,
|
|
+ cpumask_of_node(primary->numa_node)))
|
|
+ cpumask_clear(&primary->alloced_cpus_in_node);
|
|
+
|
|
while (true) {
|
|
cur_cpu = cpumask_next(cur_cpu, &available_mask);
|
|
if (cur_cpu >= nr_cpu_ids) {
|
|
@@ -488,6 +500,40 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
|
|
channel->target_vp = hv_context.vp_index[cur_cpu];
|
|
}
|
|
|
|
+static void vmbus_wait_for_unload(void)
|
|
+{
|
|
+ int cpu = smp_processor_id();
|
|
+ void *page_addr = hv_context.synic_message_page[cpu];
|
|
+ struct hv_message *msg = (struct hv_message *)page_addr +
|
|
+ VMBUS_MESSAGE_SINT;
|
|
+ struct vmbus_channel_message_header *hdr;
|
|
+ bool unloaded = false;
|
|
+
|
|
+ while (1) {
|
|
+ if (msg->header.message_type == HVMSG_NONE) {
|
|
+ mdelay(10);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
|
|
+ if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
|
|
+ unloaded = true;
|
|
+
|
|
+ msg->header.message_type = HVMSG_NONE;
|
|
+ /*
|
|
+ * header.message_type needs to be written before we do
|
|
+ * wrmsrl() below.
|
|
+ */
|
|
+ mb();
|
|
+
|
|
+ if (msg->header.message_flags.msg_pending)
|
|
+ wrmsrl(HV_X64_MSR_EOM, 0);
|
|
+
|
|
+ if (unloaded)
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* vmbus_unload_response - Handler for the unload response.
|
|
*/
|
|
@@ -513,7 +559,14 @@ void vmbus_initiate_unload(void)
|
|
hdr.msgtype = CHANNELMSG_UNLOAD;
|
|
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
|
|
|
|
- wait_for_completion(&vmbus_connection.unload_event);
|
|
+ /*
|
|
+ * vmbus_initiate_unload() is also called on crash and the crash can be
|
|
+ * happening in an interrupt context, where scheduling is impossible.
|
|
+ */
|
|
+ if (!in_interrupt())
|
|
+ wait_for_completion(&vmbus_connection.unload_event);
|
|
+ else
|
|
+ vmbus_wait_for_unload();
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
|
|
index 6341be8739ae..63194a9a7189 100644
|
|
--- a/drivers/hv/hv.c
|
|
+++ b/drivers/hv/hv.c
|
|
@@ -293,8 +293,14 @@ void hv_cleanup(void)
|
|
* Cleanup the TSC page based CS.
|
|
*/
|
|
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
|
|
- clocksource_change_rating(&hyperv_cs_tsc, 10);
|
|
- clocksource_unregister(&hyperv_cs_tsc);
|
|
+ /*
|
|
+ * Crash can happen in an interrupt context and unregistering
|
|
+ * a clocksource is impossible and redundant in this case.
|
|
+ */
|
|
+ if (!oops_in_progress) {
|
|
+ clocksource_change_rating(&hyperv_cs_tsc, 10);
|
|
+ clocksource_unregister(&hyperv_cs_tsc);
|
|
+ }
|
|
|
|
hypercall_msr.as_uint64 = 0;
|
|
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
|
|
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
|
|
index db4b887b889d..c37a71e13de0 100644
|
|
--- a/drivers/hv/hv_fcopy.c
|
|
+++ b/drivers/hv/hv_fcopy.c
|
|
@@ -51,7 +51,6 @@ static struct {
|
|
struct hv_fcopy_hdr *fcopy_msg; /* current message */
|
|
struct vmbus_channel *recv_channel; /* chn we got the request */
|
|
u64 recv_req_id; /* request ID. */
|
|
- void *fcopy_context; /* for the channel callback */
|
|
} fcopy_transaction;
|
|
|
|
static void fcopy_respond_to_host(int error);
|
|
@@ -67,6 +66,13 @@ static struct hvutil_transport *hvt;
|
|
*/
|
|
static int dm_reg_value;
|
|
|
|
+static void fcopy_poll_wrapper(void *channel)
|
|
+{
|
|
+ /* Transaction is finished, reset the state here to avoid races. */
|
|
+ fcopy_transaction.state = HVUTIL_READY;
|
|
+ hv_fcopy_onchannelcallback(channel);
|
|
+}
|
|
+
|
|
static void fcopy_timeout_func(struct work_struct *dummy)
|
|
{
|
|
/*
|
|
@@ -74,13 +80,7 @@ static void fcopy_timeout_func(struct work_struct *dummy)
|
|
* process the pending transaction.
|
|
*/
|
|
fcopy_respond_to_host(HV_E_FAIL);
|
|
-
|
|
- /* Transaction is finished, reset the state. */
|
|
- if (fcopy_transaction.state > HVUTIL_READY)
|
|
- fcopy_transaction.state = HVUTIL_READY;
|
|
-
|
|
- hv_poll_channel(fcopy_transaction.fcopy_context,
|
|
- hv_fcopy_onchannelcallback);
|
|
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
|
|
}
|
|
|
|
static int fcopy_handle_handshake(u32 version)
|
|
@@ -108,9 +108,7 @@ static int fcopy_handle_handshake(u32 version)
|
|
return -EINVAL;
|
|
}
|
|
pr_debug("FCP: userspace daemon ver. %d registered\n", version);
|
|
- fcopy_transaction.state = HVUTIL_READY;
|
|
- hv_poll_channel(fcopy_transaction.fcopy_context,
|
|
- hv_fcopy_onchannelcallback);
|
|
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
|
|
return 0;
|
|
}
|
|
|
|
@@ -227,15 +225,8 @@ void hv_fcopy_onchannelcallback(void *context)
|
|
int util_fw_version;
|
|
int fcopy_srv_version;
|
|
|
|
- if (fcopy_transaction.state > HVUTIL_READY) {
|
|
- /*
|
|
- * We will defer processing this callback once
|
|
- * the current transaction is complete.
|
|
- */
|
|
- fcopy_transaction.fcopy_context = context;
|
|
+ if (fcopy_transaction.state > HVUTIL_READY)
|
|
return;
|
|
- }
|
|
- fcopy_transaction.fcopy_context = NULL;
|
|
|
|
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
|
|
&requestid);
|
|
@@ -275,7 +266,8 @@ void hv_fcopy_onchannelcallback(void *context)
|
|
* Send the information to the user-level daemon.
|
|
*/
|
|
schedule_work(&fcopy_send_work);
|
|
- schedule_delayed_work(&fcopy_timeout_work, 5*HZ);
|
|
+ schedule_delayed_work(&fcopy_timeout_work,
|
|
+ HV_UTIL_TIMEOUT * HZ);
|
|
return;
|
|
}
|
|
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
|
|
@@ -304,9 +296,8 @@ static int fcopy_on_msg(void *msg, int len)
|
|
if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
|
|
fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
|
|
fcopy_respond_to_host(*val);
|
|
- fcopy_transaction.state = HVUTIL_READY;
|
|
- hv_poll_channel(fcopy_transaction.fcopy_context,
|
|
- hv_fcopy_onchannelcallback);
|
|
+ hv_poll_channel(fcopy_transaction.recv_channel,
|
|
+ fcopy_poll_wrapper);
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
|
|
index 74c38a9f34a6..2a3420c4ca59 100644
|
|
--- a/drivers/hv/hv_kvp.c
|
|
+++ b/drivers/hv/hv_kvp.c
|
|
@@ -66,7 +66,6 @@ static struct {
|
|
struct hv_kvp_msg *kvp_msg; /* current message */
|
|
struct vmbus_channel *recv_channel; /* chn we got the request */
|
|
u64 recv_req_id; /* request ID. */
|
|
- void *kvp_context; /* for the channel callback */
|
|
} kvp_transaction;
|
|
|
|
/*
|
|
@@ -94,6 +93,13 @@ static struct hvutil_transport *hvt;
|
|
*/
|
|
#define HV_DRV_VERSION "3.1"
|
|
|
|
+static void kvp_poll_wrapper(void *channel)
|
|
+{
|
|
+ /* Transaction is finished, reset the state here to avoid races. */
|
|
+ kvp_transaction.state = HVUTIL_READY;
|
|
+ hv_kvp_onchannelcallback(channel);
|
|
+}
|
|
+
|
|
static void
|
|
kvp_register(int reg_value)
|
|
{
|
|
@@ -121,12 +127,7 @@ static void kvp_timeout_func(struct work_struct *dummy)
|
|
*/
|
|
kvp_respond_to_host(NULL, HV_E_FAIL);
|
|
|
|
- /* Transaction is finished, reset the state. */
|
|
- if (kvp_transaction.state > HVUTIL_READY)
|
|
- kvp_transaction.state = HVUTIL_READY;
|
|
-
|
|
- hv_poll_channel(kvp_transaction.kvp_context,
|
|
- hv_kvp_onchannelcallback);
|
|
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
|
|
}
|
|
|
|
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
|
|
@@ -218,9 +219,7 @@ static int kvp_on_msg(void *msg, int len)
|
|
*/
|
|
if (cancel_delayed_work_sync(&kvp_timeout_work)) {
|
|
kvp_respond_to_host(message, error);
|
|
- kvp_transaction.state = HVUTIL_READY;
|
|
- hv_poll_channel(kvp_transaction.kvp_context,
|
|
- hv_kvp_onchannelcallback);
|
|
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
|
|
}
|
|
|
|
return 0;
|
|
@@ -596,15 +595,8 @@ void hv_kvp_onchannelcallback(void *context)
|
|
int util_fw_version;
|
|
int kvp_srv_version;
|
|
|
|
- if (kvp_transaction.state > HVUTIL_READY) {
|
|
- /*
|
|
- * We will defer processing this callback once
|
|
- * the current transaction is complete.
|
|
- */
|
|
- kvp_transaction.kvp_context = context;
|
|
+ if (kvp_transaction.state > HVUTIL_READY)
|
|
return;
|
|
- }
|
|
- kvp_transaction.kvp_context = NULL;
|
|
|
|
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
|
|
&requestid);
|
|
@@ -668,7 +660,8 @@ void hv_kvp_onchannelcallback(void *context)
|
|
* user-mode not responding.
|
|
*/
|
|
schedule_work(&kvp_sendkey_work);
|
|
- schedule_delayed_work(&kvp_timeout_work, 5*HZ);
|
|
+ schedule_delayed_work(&kvp_timeout_work,
|
|
+ HV_UTIL_TIMEOUT * HZ);
|
|
|
|
return;
|
|
|
|
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
|
|
index 815405f2e777..81882d4848bd 100644
|
|
--- a/drivers/hv/hv_snapshot.c
|
|
+++ b/drivers/hv/hv_snapshot.c
|
|
@@ -53,7 +53,6 @@ static struct {
|
|
struct vmbus_channel *recv_channel; /* chn we got the request */
|
|
u64 recv_req_id; /* request ID. */
|
|
struct hv_vss_msg *msg; /* current message */
|
|
- void *vss_context; /* for the channel callback */
|
|
} vss_transaction;
|
|
|
|
|
|
@@ -74,6 +73,13 @@ static void vss_timeout_func(struct work_struct *dummy);
|
|
static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
|
|
static DECLARE_WORK(vss_send_op_work, vss_send_op);
|
|
|
|
+static void vss_poll_wrapper(void *channel)
|
|
+{
|
|
+ /* Transaction is finished, reset the state here to avoid races. */
|
|
+ vss_transaction.state = HVUTIL_READY;
|
|
+ hv_vss_onchannelcallback(channel);
|
|
+}
|
|
+
|
|
/*
|
|
* Callback when data is received from user mode.
|
|
*/
|
|
@@ -86,12 +92,7 @@ static void vss_timeout_func(struct work_struct *dummy)
|
|
pr_warn("VSS: timeout waiting for daemon to reply\n");
|
|
vss_respond_to_host(HV_E_FAIL);
|
|
|
|
- /* Transaction is finished, reset the state. */
|
|
- if (vss_transaction.state > HVUTIL_READY)
|
|
- vss_transaction.state = HVUTIL_READY;
|
|
-
|
|
- hv_poll_channel(vss_transaction.vss_context,
|
|
- hv_vss_onchannelcallback);
|
|
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
|
|
}
|
|
|
|
static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
|
|
@@ -138,9 +139,8 @@ static int vss_on_msg(void *msg, int len)
|
|
if (cancel_delayed_work_sync(&vss_timeout_work)) {
|
|
vss_respond_to_host(vss_msg->error);
|
|
/* Transaction is finished, reset the state. */
|
|
- vss_transaction.state = HVUTIL_READY;
|
|
- hv_poll_channel(vss_transaction.vss_context,
|
|
- hv_vss_onchannelcallback);
|
|
+ hv_poll_channel(vss_transaction.recv_channel,
|
|
+ vss_poll_wrapper);
|
|
}
|
|
} else {
|
|
/* This is a spurious call! */
|
|
@@ -238,15 +238,8 @@ void hv_vss_onchannelcallback(void *context)
|
|
struct icmsg_hdr *icmsghdrp;
|
|
struct icmsg_negotiate *negop = NULL;
|
|
|
|
- if (vss_transaction.state > HVUTIL_READY) {
|
|
- /*
|
|
- * We will defer processing this callback once
|
|
- * the current transaction is complete.
|
|
- */
|
|
- vss_transaction.vss_context = context;
|
|
+ if (vss_transaction.state > HVUTIL_READY)
|
|
return;
|
|
- }
|
|
- vss_transaction.vss_context = NULL;
|
|
|
|
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
|
|
&requestid);
|
|
@@ -338,6 +331,11 @@ static void vss_on_reset(void)
|
|
int
|
|
hv_vss_init(struct hv_util_service *srv)
|
|
{
|
|
+ if (vmbus_proto_version < VERSION_WIN8_1) {
|
|
+ pr_warn("Integration service 'Backup (volume snapshot)'"
|
|
+ " not supported on this host version.\n");
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
recv_buffer = srv->recv_buffer;
|
|
|
|
/*
|
|
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
|
|
index 6a9d80a5332d..1505ee6e6605 100644
|
|
--- a/drivers/hv/hv_utils_transport.c
|
|
+++ b/drivers/hv/hv_utils_transport.c
|
|
@@ -204,9 +204,12 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
|
|
goto out_unlock;
|
|
}
|
|
hvt->outmsg = kzalloc(len, GFP_KERNEL);
|
|
- memcpy(hvt->outmsg, msg, len);
|
|
- hvt->outmsg_len = len;
|
|
- wake_up_interruptible(&hvt->outmsg_q);
|
|
+ if (hvt->outmsg) {
|
|
+ memcpy(hvt->outmsg, msg, len);
|
|
+ hvt->outmsg_len = len;
|
|
+ wake_up_interruptible(&hvt->outmsg_q);
|
|
+ } else
|
|
+ ret = -ENOMEM;
|
|
out_unlock:
|
|
mutex_unlock(&hvt->outmsg_lock);
|
|
return ret;
|
|
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
|
|
index 3782636562a1..12156db2e88e 100644
|
|
--- a/drivers/hv/hyperv_vmbus.h
|
|
+++ b/drivers/hv/hyperv_vmbus.h
|
|
@@ -31,6 +31,11 @@
|
|
#include <linux/hyperv.h>
|
|
|
|
/*
|
|
+ * Timeout for services such as KVP and fcopy.
|
|
+ */
|
|
+#define HV_UTIL_TIMEOUT 30
|
|
+
|
|
+/*
|
|
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
|
|
* is set by CPUID(HVCPUID_VERSION_FEATURES).
|
|
*/
|
|
@@ -759,11 +764,7 @@ static inline void hv_poll_channel(struct vmbus_channel *channel,
|
|
if (!channel)
|
|
return;
|
|
|
|
- if (channel->target_cpu != smp_processor_id())
|
|
- smp_call_function_single(channel->target_cpu,
|
|
- cb, channel, true);
|
|
- else
|
|
- cb(channel);
|
|
+ smp_call_function_single(channel->target_cpu, cb, channel, true);
|
|
}
|
|
|
|
enum hvutil_device_state {
|
|
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
|
|
index 9b5440f6b3b4..509ed9731630 100644
|
|
--- a/drivers/hv/vmbus_drv.c
|
|
+++ b/drivers/hv/vmbus_drv.c
|
|
@@ -105,6 +105,7 @@ static struct notifier_block hyperv_panic_block = {
|
|
};
|
|
|
|
struct resource *hyperv_mmio;
|
|
+DEFINE_SEMAPHORE(hyperv_mmio_lock);
|
|
|
|
static int vmbus_exists(void)
|
|
{
|
|
@@ -603,23 +604,11 @@ static int vmbus_remove(struct device *child_device)
|
|
{
|
|
struct hv_driver *drv;
|
|
struct hv_device *dev = device_to_hv_device(child_device);
|
|
- u32 relid = dev->channel->offermsg.child_relid;
|
|
|
|
if (child_device->driver) {
|
|
drv = drv_to_hv_drv(child_device->driver);
|
|
if (drv->remove)
|
|
drv->remove(dev);
|
|
- else {
|
|
- hv_process_channel_removal(dev->channel, relid);
|
|
- pr_err("remove not set for driver %s\n",
|
|
- dev_name(child_device));
|
|
- }
|
|
- } else {
|
|
- /*
|
|
- * We don't have a driver for this device; deal with the
|
|
- * rescind message by removing the channel.
|
|
- */
|
|
- hv_process_channel_removal(dev->channel, relid);
|
|
}
|
|
|
|
return 0;
|
|
@@ -654,7 +643,10 @@ static void vmbus_shutdown(struct device *child_device)
|
|
static void vmbus_device_release(struct device *device)
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(device);
|
|
+ struct vmbus_channel *channel = hv_dev->channel;
|
|
|
|
+ hv_process_channel_removal(channel,
|
|
+ channel->offermsg.child_relid);
|
|
kfree(hv_dev);
|
|
|
|
}
|
|
@@ -870,7 +862,7 @@ static int vmbus_bus_init(int irq)
|
|
on_each_cpu(hv_synic_init, NULL, 1);
|
|
ret = vmbus_connect();
|
|
if (ret)
|
|
- goto err_alloc;
|
|
+ goto err_connect;
|
|
|
|
if (vmbus_proto_version > VERSION_WIN7)
|
|
cpu_hotplug_disable();
|
|
@@ -888,6 +880,8 @@ static int vmbus_bus_init(int irq)
|
|
|
|
return 0;
|
|
|
|
+err_connect:
|
|
+ on_each_cpu(hv_synic_cleanup, NULL, 1);
|
|
err_alloc:
|
|
hv_synic_free();
|
|
hv_remove_vmbus_irq();
|
|
@@ -1147,7 +1141,10 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
|
|
resource_size_t range_min, range_max, start, local_min, local_max;
|
|
const char *dev_n = dev_name(&device_obj->device);
|
|
u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
|
|
- int i;
|
|
+ int i, retval;
|
|
+
|
|
+ retval = -ENXIO;
|
|
+ down(&hyperv_mmio_lock);
|
|
|
|
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
|
|
if ((iter->start >= max) || (iter->end <= min))
|
|
@@ -1184,13 +1181,17 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
|
|
for (; start + size - 1 <= local_max; start += align) {
|
|
*new = request_mem_region_exclusive(start, size,
|
|
dev_n);
|
|
- if (*new)
|
|
- return 0;
|
|
+ if (*new) {
|
|
+ retval = 0;
|
|
+ goto exit;
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
|
|
- return -ENXIO;
|
|
+exit:
|
|
+ up(&hyperv_mmio_lock);
|
|
+ return retval;
|
|
}
|
|
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
|
|
|
|
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
|
|
index 146eed70bdf4..ba947df5a8c7 100644
|
|
--- a/drivers/idle/intel_idle.c
|
|
+++ b/drivers/idle/intel_idle.c
|
|
@@ -716,6 +716,26 @@ static struct cpuidle_state avn_cstates[] = {
|
|
{
|
|
.enter = NULL }
|
|
};
|
|
+static struct cpuidle_state knl_cstates[] = {
|
|
+ {
|
|
+ .name = "C1-KNL",
|
|
+ .desc = "MWAIT 0x00",
|
|
+ .flags = MWAIT2flg(0x00),
|
|
+ .exit_latency = 1,
|
|
+ .target_residency = 2,
|
|
+ .enter = &intel_idle,
|
|
+ .enter_freeze = intel_idle_freeze },
|
|
+ {
|
|
+ .name = "C6-KNL",
|
|
+ .desc = "MWAIT 0x10",
|
|
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
+ .exit_latency = 120,
|
|
+ .target_residency = 500,
|
|
+ .enter = &intel_idle,
|
|
+ .enter_freeze = intel_idle_freeze },
|
|
+ {
|
|
+ .enter = NULL }
|
|
+};
|
|
|
|
/**
|
|
* intel_idle
|
|
@@ -890,6 +910,10 @@ static const struct idle_cpu idle_cpu_avn = {
|
|
.disable_promotion_to_c1e = true,
|
|
};
|
|
|
|
+static const struct idle_cpu idle_cpu_knl = {
|
|
+ .state_table = knl_cstates,
|
|
+};
|
|
+
|
|
#define ICPU(model, cpu) \
|
|
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
|
|
|
|
@@ -921,6 +945,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
|
ICPU(0x56, idle_cpu_bdw),
|
|
ICPU(0x4e, idle_cpu_skl),
|
|
ICPU(0x5e, idle_cpu_skl),
|
|
+ ICPU(0x57, idle_cpu_knl),
|
|
{}
|
|
};
|
|
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
|
|
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
|
|
index 5ea0c14070d1..fa9c42ff1fb0 100644
|
|
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
|
|
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
|
|
@@ -245,8 +245,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
|
|
skb_reset_mac_header(skb);
|
|
skb_pull(skb, IPOIB_ENCAP_LEN);
|
|
|
|
- skb->truesize = SKB_TRUESIZE(skb->len);
|
|
-
|
|
++dev->stats.rx_packets;
|
|
dev->stats.rx_bytes += skb->len;
|
|
|
|
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
|
|
index 2b2f9d66c2c7..aff42d5e2296 100644
|
|
--- a/drivers/input/joystick/xpad.c
|
|
+++ b/drivers/input/joystick/xpad.c
|
|
@@ -317,6 +317,19 @@ static struct usb_device_id xpad_table[] = {
|
|
|
|
MODULE_DEVICE_TABLE(usb, xpad_table);
|
|
|
|
+struct xpad_output_packet {
|
|
+ u8 data[XPAD_PKT_LEN];
|
|
+ u8 len;
|
|
+ bool pending;
|
|
+};
|
|
+
|
|
+#define XPAD_OUT_CMD_IDX 0
|
|
+#define XPAD_OUT_FF_IDX 1
|
|
+#define XPAD_OUT_LED_IDX (1 + IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF))
|
|
+#define XPAD_NUM_OUT_PACKETS (1 + \
|
|
+ IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF) + \
|
|
+ IS_ENABLED(CONFIG_JOYSTICK_XPAD_LEDS))
|
|
+
|
|
struct usb_xpad {
|
|
struct input_dev *dev; /* input device interface */
|
|
struct usb_device *udev; /* usb device */
|
|
@@ -329,9 +342,13 @@ struct usb_xpad {
|
|
dma_addr_t idata_dma;
|
|
|
|
struct urb *irq_out; /* urb for interrupt out report */
|
|
+ bool irq_out_active; /* we must not use an active URB */
|
|
unsigned char *odata; /* output data */
|
|
dma_addr_t odata_dma;
|
|
- struct mutex odata_mutex;
|
|
+ spinlock_t odata_lock;
|
|
+
|
|
+ struct xpad_output_packet out_packets[XPAD_NUM_OUT_PACKETS];
|
|
+ int last_out_packet;
|
|
|
|
#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
|
|
struct xpad_led *led;
|
|
@@ -678,18 +695,71 @@ exit:
|
|
__func__, retval);
|
|
}
|
|
|
|
+/* Callers must hold xpad->odata_lock spinlock */
|
|
+static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
|
|
+{
|
|
+ struct xpad_output_packet *pkt, *packet = NULL;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < XPAD_NUM_OUT_PACKETS; i++) {
|
|
+ if (++xpad->last_out_packet >= XPAD_NUM_OUT_PACKETS)
|
|
+ xpad->last_out_packet = 0;
|
|
+
|
|
+ pkt = &xpad->out_packets[xpad->last_out_packet];
|
|
+ if (pkt->pending) {
|
|
+ dev_dbg(&xpad->intf->dev,
|
|
+ "%s - found pending output packet %d\n",
|
|
+ __func__, xpad->last_out_packet);
|
|
+ packet = pkt;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (packet) {
|
|
+ memcpy(xpad->odata, packet->data, packet->len);
|
|
+ xpad->irq_out->transfer_buffer_length = packet->len;
|
|
+ packet->pending = false;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/* Callers must hold xpad->odata_lock spinlock */
|
|
+static int xpad_try_sending_next_out_packet(struct usb_xpad *xpad)
|
|
+{
|
|
+ int error;
|
|
+
|
|
+ if (!xpad->irq_out_active && xpad_prepare_next_out_packet(xpad)) {
|
|
+ error = usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
|
|
+ if (error) {
|
|
+ dev_err(&xpad->intf->dev,
|
|
+ "%s - usb_submit_urb failed with result %d\n",
|
|
+ __func__, error);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ xpad->irq_out_active = true;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static void xpad_irq_out(struct urb *urb)
|
|
{
|
|
struct usb_xpad *xpad = urb->context;
|
|
struct device *dev = &xpad->intf->dev;
|
|
- int retval, status;
|
|
+ int status = urb->status;
|
|
+ int error;
|
|
+ unsigned long flags;
|
|
|
|
- status = urb->status;
|
|
+ spin_lock_irqsave(&xpad->odata_lock, flags);
|
|
|
|
switch (status) {
|
|
case 0:
|
|
/* success */
|
|
- return;
|
|
+ xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
|
|
+ break;
|
|
|
|
case -ECONNRESET:
|
|
case -ENOENT:
|
|
@@ -697,19 +767,26 @@ static void xpad_irq_out(struct urb *urb)
|
|
/* this urb is terminated, clean up */
|
|
dev_dbg(dev, "%s - urb shutting down with status: %d\n",
|
|
__func__, status);
|
|
- return;
|
|
+ xpad->irq_out_active = false;
|
|
+ break;
|
|
|
|
default:
|
|
dev_dbg(dev, "%s - nonzero urb status received: %d\n",
|
|
__func__, status);
|
|
- goto exit;
|
|
+ break;
|
|
}
|
|
|
|
-exit:
|
|
- retval = usb_submit_urb(urb, GFP_ATOMIC);
|
|
- if (retval)
|
|
- dev_err(dev, "%s - usb_submit_urb failed with result %d\n",
|
|
- __func__, retval);
|
|
+ if (xpad->irq_out_active) {
|
|
+ error = usb_submit_urb(urb, GFP_ATOMIC);
|
|
+ if (error) {
|
|
+ dev_err(dev,
|
|
+ "%s - usb_submit_urb failed with result %d\n",
|
|
+ __func__, error);
|
|
+ xpad->irq_out_active = false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
|
|
}
|
|
|
|
static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
|
|
@@ -728,7 +805,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
|
|
goto fail1;
|
|
}
|
|
|
|
- mutex_init(&xpad->odata_mutex);
|
|
+ spin_lock_init(&xpad->odata_lock);
|
|
|
|
xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
|
|
if (!xpad->irq_out) {
|
|
@@ -770,27 +847,57 @@ static void xpad_deinit_output(struct usb_xpad *xpad)
|
|
|
|
static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
|
|
{
|
|
+ struct xpad_output_packet *packet =
|
|
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
|
|
+ unsigned long flags;
|
|
int retval;
|
|
|
|
- mutex_lock(&xpad->odata_mutex);
|
|
+ spin_lock_irqsave(&xpad->odata_lock, flags);
|
|
+
|
|
+ packet->data[0] = 0x08;
|
|
+ packet->data[1] = 0x00;
|
|
+ packet->data[2] = 0x0F;
|
|
+ packet->data[3] = 0xC0;
|
|
+ packet->data[4] = 0x00;
|
|
+ packet->data[5] = 0x00;
|
|
+ packet->data[6] = 0x00;
|
|
+ packet->data[7] = 0x00;
|
|
+ packet->data[8] = 0x00;
|
|
+ packet->data[9] = 0x00;
|
|
+ packet->data[10] = 0x00;
|
|
+ packet->data[11] = 0x00;
|
|
+ packet->len = 12;
|
|
+ packet->pending = true;
|
|
+
|
|
+ /* Reset the sequence so we send out presence first */
|
|
+ xpad->last_out_packet = -1;
|
|
+ retval = xpad_try_sending_next_out_packet(xpad);
|
|
+
|
|
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
|
|
|
|
- xpad->odata[0] = 0x08;
|
|
- xpad->odata[1] = 0x00;
|
|
- xpad->odata[2] = 0x0F;
|
|
- xpad->odata[3] = 0xC0;
|
|
- xpad->odata[4] = 0x00;
|
|
- xpad->odata[5] = 0x00;
|
|
- xpad->odata[6] = 0x00;
|
|
- xpad->odata[7] = 0x00;
|
|
- xpad->odata[8] = 0x00;
|
|
- xpad->odata[9] = 0x00;
|
|
- xpad->odata[10] = 0x00;
|
|
- xpad->odata[11] = 0x00;
|
|
- xpad->irq_out->transfer_buffer_length = 12;
|
|
+ return retval;
|
|
+}
|
|
|
|
- retval = usb_submit_urb(xpad->irq_out, GFP_KERNEL);
|
|
+static int xpad_start_xbox_one(struct usb_xpad *xpad)
|
|
+{
|
|
+ struct xpad_output_packet *packet =
|
|
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
|
|
+ unsigned long flags;
|
|
+ int retval;
|
|
+
|
|
+ spin_lock_irqsave(&xpad->odata_lock, flags);
|
|
+
|
|
+ /* Xbox one controller needs to be initialized. */
|
|
+ packet->data[0] = 0x05;
|
|
+ packet->data[1] = 0x20;
|
|
+ packet->len = 2;
|
|
+ packet->pending = true;
|
|
|
|
- mutex_unlock(&xpad->odata_mutex);
|
|
+ /* Reset the sequence so we send out start packet first */
|
|
+ xpad->last_out_packet = -1;
|
|
+ retval = xpad_try_sending_next_out_packet(xpad);
|
|
+
|
|
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
|
|
|
|
return retval;
|
|
}
|
|
@@ -799,8 +906,11 @@ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
|
|
static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
|
|
{
|
|
struct usb_xpad *xpad = input_get_drvdata(dev);
|
|
+ struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_FF_IDX];
|
|
__u16 strong;
|
|
__u16 weak;
|
|
+ int retval;
|
|
+ unsigned long flags;
|
|
|
|
if (effect->type != FF_RUMBLE)
|
|
return 0;
|
|
@@ -808,69 +918,80 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
|
|
strong = effect->u.rumble.strong_magnitude;
|
|
weak = effect->u.rumble.weak_magnitude;
|
|
|
|
+ spin_lock_irqsave(&xpad->odata_lock, flags);
|
|
+
|
|
switch (xpad->xtype) {
|
|
case XTYPE_XBOX:
|
|
- xpad->odata[0] = 0x00;
|
|
- xpad->odata[1] = 0x06;
|
|
- xpad->odata[2] = 0x00;
|
|
- xpad->odata[3] = strong / 256; /* left actuator */
|
|
- xpad->odata[4] = 0x00;
|
|
- xpad->odata[5] = weak / 256; /* right actuator */
|
|
- xpad->irq_out->transfer_buffer_length = 6;
|
|
+ packet->data[0] = 0x00;
|
|
+ packet->data[1] = 0x06;
|
|
+ packet->data[2] = 0x00;
|
|
+ packet->data[3] = strong / 256; /* left actuator */
|
|
+ packet->data[4] = 0x00;
|
|
+ packet->data[5] = weak / 256; /* right actuator */
|
|
+ packet->len = 6;
|
|
+ packet->pending = true;
|
|
break;
|
|
|
|
case XTYPE_XBOX360:
|
|
- xpad->odata[0] = 0x00;
|
|
- xpad->odata[1] = 0x08;
|
|
- xpad->odata[2] = 0x00;
|
|
- xpad->odata[3] = strong / 256; /* left actuator? */
|
|
- xpad->odata[4] = weak / 256; /* right actuator? */
|
|
- xpad->odata[5] = 0x00;
|
|
- xpad->odata[6] = 0x00;
|
|
- xpad->odata[7] = 0x00;
|
|
- xpad->irq_out->transfer_buffer_length = 8;
|
|
+ packet->data[0] = 0x00;
|
|
+ packet->data[1] = 0x08;
|
|
+ packet->data[2] = 0x00;
|
|
+ packet->data[3] = strong / 256; /* left actuator? */
|
|
+ packet->data[4] = weak / 256; /* right actuator? */
|
|
+ packet->data[5] = 0x00;
|
|
+ packet->data[6] = 0x00;
|
|
+ packet->data[7] = 0x00;
|
|
+ packet->len = 8;
|
|
+ packet->pending = true;
|
|
break;
|
|
|
|
case XTYPE_XBOX360W:
|
|
- xpad->odata[0] = 0x00;
|
|
- xpad->odata[1] = 0x01;
|
|
- xpad->odata[2] = 0x0F;
|
|
- xpad->odata[3] = 0xC0;
|
|
- xpad->odata[4] = 0x00;
|
|
- xpad->odata[5] = strong / 256;
|
|
- xpad->odata[6] = weak / 256;
|
|
- xpad->odata[7] = 0x00;
|
|
- xpad->odata[8] = 0x00;
|
|
- xpad->odata[9] = 0x00;
|
|
- xpad->odata[10] = 0x00;
|
|
- xpad->odata[11] = 0x00;
|
|
- xpad->irq_out->transfer_buffer_length = 12;
|
|
+ packet->data[0] = 0x00;
|
|
+ packet->data[1] = 0x01;
|
|
+ packet->data[2] = 0x0F;
|
|
+ packet->data[3] = 0xC0;
|
|
+ packet->data[4] = 0x00;
|
|
+ packet->data[5] = strong / 256;
|
|
+ packet->data[6] = weak / 256;
|
|
+ packet->data[7] = 0x00;
|
|
+ packet->data[8] = 0x00;
|
|
+ packet->data[9] = 0x00;
|
|
+ packet->data[10] = 0x00;
|
|
+ packet->data[11] = 0x00;
|
|
+ packet->len = 12;
|
|
+ packet->pending = true;
|
|
break;
|
|
|
|
case XTYPE_XBOXONE:
|
|
- xpad->odata[0] = 0x09; /* activate rumble */
|
|
- xpad->odata[1] = 0x08;
|
|
- xpad->odata[2] = 0x00;
|
|
- xpad->odata[3] = 0x08; /* continuous effect */
|
|
- xpad->odata[4] = 0x00; /* simple rumble mode */
|
|
- xpad->odata[5] = 0x03; /* L and R actuator only */
|
|
- xpad->odata[6] = 0x00; /* TODO: LT actuator */
|
|
- xpad->odata[7] = 0x00; /* TODO: RT actuator */
|
|
- xpad->odata[8] = strong / 256; /* left actuator */
|
|
- xpad->odata[9] = weak / 256; /* right actuator */
|
|
- xpad->odata[10] = 0x80; /* length of pulse */
|
|
- xpad->odata[11] = 0x00; /* stop period of pulse */
|
|
- xpad->irq_out->transfer_buffer_length = 12;
|
|
+ packet->data[0] = 0x09; /* activate rumble */
|
|
+ packet->data[1] = 0x08;
|
|
+ packet->data[2] = 0x00;
|
|
+ packet->data[3] = 0x08; /* continuous effect */
|
|
+ packet->data[4] = 0x00; /* simple rumble mode */
|
|
+ packet->data[5] = 0x03; /* L and R actuator only */
|
|
+ packet->data[6] = 0x00; /* TODO: LT actuator */
|
|
+ packet->data[7] = 0x00; /* TODO: RT actuator */
|
|
+ packet->data[8] = strong / 256; /* left actuator */
|
|
+ packet->data[9] = weak / 256; /* right actuator */
|
|
+ packet->data[10] = 0x80; /* length of pulse */
|
|
+ packet->data[11] = 0x00; /* stop period of pulse */
|
|
+ packet->len = 12;
|
|
+ packet->pending = true;
|
|
break;
|
|
|
|
default:
|
|
dev_dbg(&xpad->dev->dev,
|
|
"%s - rumble command sent to unsupported xpad type: %d\n",
|
|
__func__, xpad->xtype);
|
|
- return -EINVAL;
|
|
+ retval = -EINVAL;
|
|
+ goto out;
|
|
}
|
|
|
|
- return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
|
|
+ retval = xpad_try_sending_next_out_packet(xpad);
|
|
+
|
|
+out:
|
|
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
|
|
+ return retval;
|
|
}
|
|
|
|
static int xpad_init_ff(struct usb_xpad *xpad)
|
|
@@ -921,36 +1042,44 @@ struct xpad_led {
|
|
*/
|
|
static void xpad_send_led_command(struct usb_xpad *xpad, int command)
|
|
{
|
|
+ struct xpad_output_packet *packet =
|
|
+ &xpad->out_packets[XPAD_OUT_LED_IDX];
|
|
+ unsigned long flags;
|
|
+
|
|
command %= 16;
|
|
|
|
- mutex_lock(&xpad->odata_mutex);
|
|
+ spin_lock_irqsave(&xpad->odata_lock, flags);
|
|
|
|
switch (xpad->xtype) {
|
|
case XTYPE_XBOX360:
|
|
- xpad->odata[0] = 0x01;
|
|
- xpad->odata[1] = 0x03;
|
|
- xpad->odata[2] = command;
|
|
- xpad->irq_out->transfer_buffer_length = 3;
|
|
+ packet->data[0] = 0x01;
|
|
+ packet->data[1] = 0x03;
|
|
+ packet->data[2] = command;
|
|
+ packet->len = 3;
|
|
+ packet->pending = true;
|
|
break;
|
|
+
|
|
case XTYPE_XBOX360W:
|
|
- xpad->odata[0] = 0x00;
|
|
- xpad->odata[1] = 0x00;
|
|
- xpad->odata[2] = 0x08;
|
|
- xpad->odata[3] = 0x40 + command;
|
|
- xpad->odata[4] = 0x00;
|
|
- xpad->odata[5] = 0x00;
|
|
- xpad->odata[6] = 0x00;
|
|
- xpad->odata[7] = 0x00;
|
|
- xpad->odata[8] = 0x00;
|
|
- xpad->odata[9] = 0x00;
|
|
- xpad->odata[10] = 0x00;
|
|
- xpad->odata[11] = 0x00;
|
|
- xpad->irq_out->transfer_buffer_length = 12;
|
|
+ packet->data[0] = 0x00;
|
|
+ packet->data[1] = 0x00;
|
|
+ packet->data[2] = 0x08;
|
|
+ packet->data[3] = 0x40 + command;
|
|
+ packet->data[4] = 0x00;
|
|
+ packet->data[5] = 0x00;
|
|
+ packet->data[6] = 0x00;
|
|
+ packet->data[7] = 0x00;
|
|
+ packet->data[8] = 0x00;
|
|
+ packet->data[9] = 0x00;
|
|
+ packet->data[10] = 0x00;
|
|
+ packet->data[11] = 0x00;
|
|
+ packet->len = 12;
|
|
+ packet->pending = true;
|
|
break;
|
|
}
|
|
|
|
- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
|
|
- mutex_unlock(&xpad->odata_mutex);
|
|
+ xpad_try_sending_next_out_packet(xpad);
|
|
+
|
|
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -1048,13 +1177,8 @@ static int xpad_open(struct input_dev *dev)
|
|
if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
|
|
return -EIO;
|
|
|
|
- if (xpad->xtype == XTYPE_XBOXONE) {
|
|
- /* Xbox one controller needs to be initialized. */
|
|
- xpad->odata[0] = 0x05;
|
|
- xpad->odata[1] = 0x20;
|
|
- xpad->irq_out->transfer_buffer_length = 2;
|
|
- return usb_submit_urb(xpad->irq_out, GFP_KERNEL);
|
|
- }
|
|
+ if (xpad->xtype == XTYPE_XBOXONE)
|
|
+ return xpad_start_xbox_one(xpad);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
|
|
index a159529f9d53..c5f1757ac61d 100644
|
|
--- a/drivers/irqchip/irq-gic-v3-its.c
|
|
+++ b/drivers/irqchip/irq-gic-v3-its.c
|
|
@@ -41,6 +41,7 @@
|
|
|
|
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
|
|
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
|
|
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
|
|
|
|
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
|
|
|
|
@@ -71,6 +72,7 @@ struct its_node {
|
|
struct list_head its_device_list;
|
|
u64 flags;
|
|
u32 ite_size;
|
|
+ int numa_node;
|
|
};
|
|
|
|
#define ITS_ITT_ALIGN SZ_256
|
|
@@ -600,11 +602,23 @@ static void its_unmask_irq(struct irq_data *d)
|
|
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|
bool force)
|
|
{
|
|
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
|
+ unsigned int cpu;
|
|
+ const struct cpumask *cpu_mask = cpu_online_mask;
|
|
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
|
struct its_collection *target_col;
|
|
u32 id = its_get_event_id(d);
|
|
|
|
+ /* lpi cannot be routed to a redistributor that is on a foreign node */
|
|
+ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
|
|
+ if (its_dev->its->numa_node >= 0) {
|
|
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
|
|
+ if (!cpumask_intersects(mask_val, cpu_mask))
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ cpu = cpumask_any_and(mask_val, cpu_mask);
|
|
+
|
|
if (cpu >= nr_cpu_ids)
|
|
return -EINVAL;
|
|
|
|
@@ -1081,6 +1095,16 @@ static void its_cpu_init_collection(void)
|
|
list_for_each_entry(its, &its_nodes, entry) {
|
|
u64 target;
|
|
|
|
+ /* avoid cross node collections and its mapping */
|
|
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
|
|
+ struct device_node *cpu_node;
|
|
+
|
|
+ cpu_node = of_get_cpu_node(cpu, NULL);
|
|
+ if (its->numa_node != NUMA_NO_NODE &&
|
|
+ its->numa_node != of_node_to_nid(cpu_node))
|
|
+ continue;
|
|
+ }
|
|
+
|
|
/*
|
|
* We now have to bind each collection to its target
|
|
* redistributor.
|
|
@@ -1308,9 +1332,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
|
|
{
|
|
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
|
u32 event = its_get_event_id(d);
|
|
+ const struct cpumask *cpu_mask = cpu_online_mask;
|
|
+
|
|
+ /* get the cpu_mask of local node */
|
|
+ if (its_dev->its->numa_node >= 0)
|
|
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
|
|
|
|
/* Bind the LPI to the first possible CPU */
|
|
- its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
|
|
+ its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
|
|
|
|
/* Map the GIC IRQ and event to the device */
|
|
its_send_mapvi(its_dev, d->hwirq, event);
|
|
@@ -1400,6 +1429,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
|
|
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
|
|
}
|
|
|
|
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
|
|
+{
|
|
+ struct its_node *its = data;
|
|
+
|
|
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
|
|
+}
|
|
+
|
|
static const struct gic_quirk its_quirks[] = {
|
|
#ifdef CONFIG_CAVIUM_ERRATUM_22375
|
|
{
|
|
@@ -1409,6 +1445,14 @@ static const struct gic_quirk its_quirks[] = {
|
|
.init = its_enable_quirk_cavium_22375,
|
|
},
|
|
#endif
|
|
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
|
|
+ {
|
|
+ .desc = "ITS: Cavium erratum 23144",
|
|
+ .iidr = 0xa100034c, /* ThunderX pass 1.x */
|
|
+ .mask = 0xffff0fff,
|
|
+ .init = its_enable_quirk_cavium_23144,
|
|
+ },
|
|
+#endif
|
|
{
|
|
}
|
|
};
|
|
@@ -1470,6 +1514,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
|
|
its->base = its_base;
|
|
its->phys_base = res.start;
|
|
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
|
|
+ its->numa_node = of_node_to_nid(node);
|
|
|
|
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
|
|
if (!its->cmd_base) {
|
|
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
|
|
index a54b339951a3..2a96ff6923f0 100644
|
|
--- a/drivers/lightnvm/gennvm.c
|
|
+++ b/drivers/lightnvm/gennvm.c
|
|
@@ -89,6 +89,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
|
|
|
|
list_move_tail(&blk->list, &lun->bb_list);
|
|
lun->vlun.nr_bad_blocks++;
|
|
+ lun->vlun.nr_free_blocks--;
|
|
}
|
|
|
|
return 0;
|
|
@@ -345,7 +346,7 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
|
|
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
|
{
|
|
if (!dev->ops->submit_io)
|
|
- return 0;
|
|
+ return -ENODEV;
|
|
|
|
/* Convert address space */
|
|
gennvm_generic_to_addr_mode(dev, rqd);
|
|
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
|
|
index 134e4faba482..a9859489acf6 100644
|
|
--- a/drivers/lightnvm/rrpc.c
|
|
+++ b/drivers/lightnvm/rrpc.c
|
|
@@ -287,6 +287,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
|
|
}
|
|
|
|
page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
|
|
+ if (!page)
|
|
+ return -ENOMEM;
|
|
|
|
while ((slot = find_first_zero_bit(rblk->invalid_pages,
|
|
nr_pgs_per_blk)) < nr_pgs_per_blk) {
|
|
@@ -427,7 +429,7 @@ static void rrpc_lun_gc(struct work_struct *work)
|
|
if (nr_blocks_need < rrpc->nr_luns)
|
|
nr_blocks_need = rrpc->nr_luns;
|
|
|
|
- spin_lock(&lun->lock);
|
|
+ spin_lock(&rlun->lock);
|
|
while (nr_blocks_need > lun->nr_free_blocks &&
|
|
!list_empty(&rlun->prio_list)) {
|
|
struct rrpc_block *rblock = block_prio_find_max(rlun);
|
|
@@ -436,16 +438,16 @@ static void rrpc_lun_gc(struct work_struct *work)
|
|
if (!rblock->nr_invalid_pages)
|
|
break;
|
|
|
|
+ gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
|
|
+ if (!gcb)
|
|
+ break;
|
|
+
|
|
list_del_init(&rblock->prio);
|
|
|
|
BUG_ON(!block_is_full(rrpc, rblock));
|
|
|
|
pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
|
|
|
|
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
|
|
- if (!gcb)
|
|
- break;
|
|
-
|
|
gcb->rrpc = rrpc;
|
|
gcb->rblk = rblock;
|
|
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
|
|
@@ -454,7 +456,7 @@ static void rrpc_lun_gc(struct work_struct *work)
|
|
|
|
nr_blocks_need--;
|
|
}
|
|
- spin_unlock(&lun->lock);
|
|
+ spin_unlock(&rlun->lock);
|
|
|
|
/* TODO: Hint that request queue can be started again */
|
|
}
|
|
@@ -650,11 +652,12 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
|
|
if (bio_data_dir(rqd->bio) == WRITE)
|
|
rrpc_end_io_write(rrpc, rrqd, laddr, npages);
|
|
|
|
+ bio_put(rqd->bio);
|
|
+
|
|
if (rrqd->flags & NVM_IOTYPE_GC)
|
|
return 0;
|
|
|
|
rrpc_unlock_rq(rrpc, rqd);
|
|
- bio_put(rqd->bio);
|
|
|
|
if (npages > 1)
|
|
nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
|
|
@@ -841,6 +844,13 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
|
|
err = nvm_submit_io(rrpc->dev, rqd);
|
|
if (err) {
|
|
pr_err("rrpc: I/O submission failed: %d\n", err);
|
|
+ bio_put(bio);
|
|
+ if (!(flags & NVM_IOTYPE_GC)) {
|
|
+ rrpc_unlock_rq(rrpc, rqd);
|
|
+ if (rqd->nr_pages > 1)
|
|
+ nvm_dev_dma_free(rrpc->dev,
|
|
+ rqd->ppa_list, rqd->dma_ppa_list);
|
|
+ }
|
|
return NVM_IO_ERR;
|
|
}
|
|
|
|
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
|
|
index a296425a7270..3d5c0ba13181 100644
|
|
--- a/drivers/md/bcache/super.c
|
|
+++ b/drivers/md/bcache/super.c
|
|
@@ -1818,7 +1818,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
|
|
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
|
|
|
|
if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
|
|
- !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
|
|
+ !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
|
|
!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
|
|
!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
|
|
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
|
|
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
|
|
index 292c9479bb75..310e4b8beae8 100644
|
|
--- a/drivers/media/dvb-frontends/Kconfig
|
|
+++ b/drivers/media/dvb-frontends/Kconfig
|
|
@@ -264,7 +264,7 @@ config DVB_MB86A16
|
|
config DVB_TDA10071
|
|
tristate "NXP TDA10071"
|
|
depends on DVB_CORE && I2C
|
|
- select REGMAP
|
|
+ select REGMAP_I2C
|
|
default m if !MEDIA_SUBDRV_AUTOSELECT
|
|
help
|
|
Say Y when you want to support this frontend.
|
|
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
|
|
index d11fd6ac2df0..5cefca95734e 100644
|
|
--- a/drivers/media/usb/uvc/uvc_driver.c
|
|
+++ b/drivers/media/usb/uvc/uvc_driver.c
|
|
@@ -148,6 +148,26 @@ static struct uvc_format_desc uvc_fmts[] = {
|
|
.guid = UVC_GUID_FORMAT_H264,
|
|
.fcc = V4L2_PIX_FMT_H264,
|
|
},
|
|
+ {
|
|
+ .name = "Greyscale 8 L/R (Y8I)",
|
|
+ .guid = UVC_GUID_FORMAT_Y8I,
|
|
+ .fcc = V4L2_PIX_FMT_Y8I,
|
|
+ },
|
|
+ {
|
|
+ .name = "Greyscale 12 L/R (Y12I)",
|
|
+ .guid = UVC_GUID_FORMAT_Y12I,
|
|
+ .fcc = V4L2_PIX_FMT_Y12I,
|
|
+ },
|
|
+ {
|
|
+ .name = "Depth data 16-bit (Z16)",
|
|
+ .guid = UVC_GUID_FORMAT_Z16,
|
|
+ .fcc = V4L2_PIX_FMT_Z16,
|
|
+ },
|
|
+ {
|
|
+ .name = "Bayer 10-bit (SRGGB10P)",
|
|
+ .guid = UVC_GUID_FORMAT_RW10,
|
|
+ .fcc = V4L2_PIX_FMT_SRGGB10P,
|
|
+ },
|
|
};
|
|
|
|
/* ------------------------------------------------------------------------
|
|
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
|
|
index f0f2391e1b43..7e4d3eea371b 100644
|
|
--- a/drivers/media/usb/uvc/uvcvideo.h
|
|
+++ b/drivers/media/usb/uvc/uvcvideo.h
|
|
@@ -119,6 +119,18 @@
|
|
#define UVC_GUID_FORMAT_H264 \
|
|
{ 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
|
|
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
|
|
+#define UVC_GUID_FORMAT_Y8I \
|
|
+ { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
|
|
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
|
|
+#define UVC_GUID_FORMAT_Y12I \
|
|
+ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
|
|
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
|
|
+#define UVC_GUID_FORMAT_Z16 \
|
|
+ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
|
|
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
|
|
+#define UVC_GUID_FORMAT_RW10 \
|
|
+ { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
|
|
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
|
|
|
|
/* ------------------------------------------------------------------------
|
|
* Driver specific constants.
|
|
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
|
|
index 6982f603fadc..ab6f392d3504 100644
|
|
--- a/drivers/misc/cxl/Makefile
|
|
+++ b/drivers/misc/cxl/Makefile
|
|
@@ -1,4 +1,4 @@
|
|
-ccflags-y := -Werror -Wno-unused-const-variable
|
|
+ccflags-y := -Werror $(call cc-disable-warning, unused-const-variable)
|
|
|
|
cxl-y += main.o file.o irq.o fault.o native.o
|
|
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
|
|
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
|
|
index 103baf0e0c5b..ea3eeb7011e1 100644
|
|
--- a/drivers/misc/cxl/api.c
|
|
+++ b/drivers/misc/cxl/api.c
|
|
@@ -25,7 +25,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
|
|
|
|
afu = cxl_pci_to_afu(dev);
|
|
|
|
- get_device(&afu->dev);
|
|
ctx = cxl_context_alloc();
|
|
if (IS_ERR(ctx)) {
|
|
rc = PTR_ERR(ctx);
|
|
@@ -61,7 +60,6 @@ err_mapping:
|
|
err_ctx:
|
|
kfree(ctx);
|
|
err_dev:
|
|
- put_device(&afu->dev);
|
|
return ERR_PTR(rc);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cxl_dev_context_init);
|
|
@@ -87,8 +85,6 @@ int cxl_release_context(struct cxl_context *ctx)
|
|
if (ctx->status >= STARTED)
|
|
return -EBUSY;
|
|
|
|
- put_device(&ctx->afu->dev);
|
|
-
|
|
cxl_context_free(ctx);
|
|
|
|
return 0;
|
|
@@ -176,7 +172,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
|
|
|
|
if (task) {
|
|
ctx->pid = get_task_pid(task, PIDTYPE_PID);
|
|
- get_pid(ctx->pid);
|
|
+ ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
|
|
kernel = false;
|
|
}
|
|
|
|
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
|
|
index 2faa1270d085..262b88eac414 100644
|
|
--- a/drivers/misc/cxl/context.c
|
|
+++ b/drivers/misc/cxl/context.c
|
|
@@ -42,7 +42,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
|
|
spin_lock_init(&ctx->sste_lock);
|
|
ctx->afu = afu;
|
|
ctx->master = master;
|
|
- ctx->pid = NULL; /* Set in start work ioctl */
|
|
+ ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
|
|
mutex_init(&ctx->mapping_lock);
|
|
ctx->mapping = mapping;
|
|
|
|
@@ -97,6 +97,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
|
|
ctx->pe = i;
|
|
ctx->elem = &ctx->afu->spa[i];
|
|
ctx->pe_inserted = false;
|
|
+
|
|
+ /*
|
|
+ * take a ref on the afu so that it stays alive at-least till
|
|
+ * this context is reclaimed inside reclaim_ctx.
|
|
+ */
|
|
+ cxl_afu_get(afu);
|
|
return 0;
|
|
}
|
|
|
|
@@ -211,7 +217,11 @@ int __detach_context(struct cxl_context *ctx)
|
|
WARN_ON(cxl_detach_process(ctx) &&
|
|
cxl_adapter_link_ok(ctx->afu->adapter));
|
|
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
|
|
+
|
|
+ /* release the reference to the group leader and mm handling pid */
|
|
put_pid(ctx->pid);
|
|
+ put_pid(ctx->glpid);
|
|
+
|
|
cxl_ctx_put();
|
|
return 0;
|
|
}
|
|
@@ -278,6 +288,9 @@ static void reclaim_ctx(struct rcu_head *rcu)
|
|
if (ctx->irq_bitmap)
|
|
kfree(ctx->irq_bitmap);
|
|
|
|
+ /* Drop ref to the afu device taken during cxl_context_init */
|
|
+ cxl_afu_put(ctx->afu);
|
|
+
|
|
kfree(ctx);
|
|
}
|
|
|
|
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
|
|
index 0cfb9c129f27..a521bc72cec2 100644
|
|
--- a/drivers/misc/cxl/cxl.h
|
|
+++ b/drivers/misc/cxl/cxl.h
|
|
@@ -403,6 +403,18 @@ struct cxl_afu {
|
|
bool enabled;
|
|
};
|
|
|
|
+/* AFU refcount management */
|
|
+static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
|
|
+{
|
|
+
|
|
+ return (get_device(&afu->dev) == NULL) ? NULL : afu;
|
|
+}
|
|
+
|
|
+static inline void cxl_afu_put(struct cxl_afu *afu)
|
|
+{
|
|
+ put_device(&afu->dev);
|
|
+}
|
|
+
|
|
|
|
struct cxl_irq_name {
|
|
struct list_head list;
|
|
@@ -433,6 +445,9 @@ struct cxl_context {
|
|
unsigned int sst_size, sst_lru;
|
|
|
|
wait_queue_head_t wq;
|
|
+ /* pid of the group leader associated with the pid */
|
|
+ struct pid *glpid;
|
|
+ /* use mm context associated with this pid for ds faults */
|
|
struct pid *pid;
|
|
spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
|
|
/* Only used in PR mode */
|
|
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
|
|
index 25a5418c55cb..81c3f75b7330 100644
|
|
--- a/drivers/misc/cxl/fault.c
|
|
+++ b/drivers/misc/cxl/fault.c
|
|
@@ -166,13 +166,92 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
|
|
cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
|
|
}
|
|
|
|
+/*
|
|
+ * Returns the mm_struct corresponding to the context ctx via ctx->pid
|
|
+ * In case the task has exited we use the task group leader accessible
|
|
+ * via ctx->glpid to find the next task in the thread group that has a
|
|
+ * valid mm_struct associated with it. If a task with valid mm_struct
|
|
+ * is found the ctx->pid is updated to use the task struct for subsequent
|
|
+ * translations. In case no valid mm_struct is found in the task group to
|
|
+ * service the fault a NULL is returned.
|
|
+ */
|
|
+static struct mm_struct *get_mem_context(struct cxl_context *ctx)
|
|
+{
|
|
+ struct task_struct *task = NULL;
|
|
+ struct mm_struct *mm = NULL;
|
|
+ struct pid *old_pid = ctx->pid;
|
|
+
|
|
+ if (old_pid == NULL) {
|
|
+ pr_warn("%s: Invalid context for pe=%d\n",
|
|
+ __func__, ctx->pe);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ task = get_pid_task(old_pid, PIDTYPE_PID);
|
|
+
|
|
+ /*
|
|
+ * pid_alive may look racy but this saves us from costly
|
|
+ * get_task_mm when the task is a zombie. In worst case
|
|
+ * we may think a task is alive, which is about to die
|
|
+ * but get_task_mm will return NULL.
|
|
+ */
|
|
+ if (task != NULL && pid_alive(task))
|
|
+ mm = get_task_mm(task);
|
|
+
|
|
+ /* release the task struct that was taken earlier */
|
|
+ if (task)
|
|
+ put_task_struct(task);
|
|
+ else
|
|
+ pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
|
|
+ __func__, pid_nr(old_pid), ctx->pe);
|
|
+
|
|
+ /*
|
|
+ * If we couldn't find the mm context then use the group
|
|
+ * leader to iterate over the task group and find a task
|
|
+ * that gives us mm_struct.
|
|
+ */
|
|
+ if (unlikely(mm == NULL && ctx->glpid != NULL)) {
|
|
+
|
|
+ rcu_read_lock();
|
|
+ task = pid_task(ctx->glpid, PIDTYPE_PID);
|
|
+ if (task)
|
|
+ do {
|
|
+ mm = get_task_mm(task);
|
|
+ if (mm) {
|
|
+ ctx->pid = get_task_pid(task,
|
|
+ PIDTYPE_PID);
|
|
+ break;
|
|
+ }
|
|
+ task = next_thread(task);
|
|
+ } while (task && !thread_group_leader(task));
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ /* check if we switched pid */
|
|
+ if (ctx->pid != old_pid) {
|
|
+ if (mm)
|
|
+ pr_devel("%s:pe=%i switch pid %i->%i\n",
|
|
+ __func__, ctx->pe, pid_nr(old_pid),
|
|
+ pid_nr(ctx->pid));
|
|
+ else
|
|
+ pr_devel("%s:Cannot find mm for pid=%i\n",
|
|
+ __func__, pid_nr(old_pid));
|
|
+
|
|
+ /* drop the reference to older pid */
|
|
+ put_pid(old_pid);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return mm;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
void cxl_handle_fault(struct work_struct *fault_work)
|
|
{
|
|
struct cxl_context *ctx =
|
|
container_of(fault_work, struct cxl_context, fault_work);
|
|
u64 dsisr = ctx->dsisr;
|
|
u64 dar = ctx->dar;
|
|
- struct task_struct *task = NULL;
|
|
struct mm_struct *mm = NULL;
|
|
|
|
if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
|
|
@@ -195,17 +274,17 @@ void cxl_handle_fault(struct work_struct *fault_work)
|
|
"DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
|
|
|
|
if (!ctx->kernel) {
|
|
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
|
|
- pr_devel("cxl_handle_fault unable to get task %i\n",
|
|
- pid_nr(ctx->pid));
|
|
+
|
|
+ mm = get_mem_context(ctx);
|
|
+ /* indicates all the thread in task group have exited */
|
|
+ if (mm == NULL) {
|
|
+ pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
|
|
+ __func__, ctx->pe, pid_nr(ctx->pid));
|
|
cxl_ack_ae(ctx);
|
|
return;
|
|
- }
|
|
- if (!(mm = get_task_mm(task))) {
|
|
- pr_devel("cxl_handle_fault unable to get mm %i\n",
|
|
- pid_nr(ctx->pid));
|
|
- cxl_ack_ae(ctx);
|
|
- goto out;
|
|
+ } else {
|
|
+ pr_devel("Handling page fault for pe=%d pid=%i\n",
|
|
+ ctx->pe, pid_nr(ctx->pid));
|
|
}
|
|
}
|
|
|
|
@@ -218,33 +297,22 @@ void cxl_handle_fault(struct work_struct *fault_work)
|
|
|
|
if (mm)
|
|
mmput(mm);
|
|
-out:
|
|
- if (task)
|
|
- put_task_struct(task);
|
|
}
|
|
|
|
static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
|
|
{
|
|
- int rc;
|
|
- struct task_struct *task;
|
|
struct mm_struct *mm;
|
|
|
|
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
|
|
- pr_devel("cxl_prefault_one unable to get task %i\n",
|
|
- pid_nr(ctx->pid));
|
|
- return;
|
|
- }
|
|
- if (!(mm = get_task_mm(task))) {
|
|
+ mm = get_mem_context(ctx);
|
|
+ if (mm == NULL) {
|
|
pr_devel("cxl_prefault_one unable to get mm %i\n",
|
|
pid_nr(ctx->pid));
|
|
- put_task_struct(task);
|
|
return;
|
|
}
|
|
|
|
- rc = cxl_fault_segment(ctx, mm, ea);
|
|
+ cxl_fault_segment(ctx, mm, ea);
|
|
|
|
mmput(mm);
|
|
- put_task_struct(task);
|
|
}
|
|
|
|
static u64 next_segment(u64 ea, u64 vsid)
|
|
@@ -263,18 +331,13 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
|
|
struct copro_slb slb;
|
|
struct vm_area_struct *vma;
|
|
int rc;
|
|
- struct task_struct *task;
|
|
struct mm_struct *mm;
|
|
|
|
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
|
|
- pr_devel("cxl_prefault_vma unable to get task %i\n",
|
|
- pid_nr(ctx->pid));
|
|
- return;
|
|
- }
|
|
- if (!(mm = get_task_mm(task))) {
|
|
+ mm = get_mem_context(ctx);
|
|
+ if (mm == NULL) {
|
|
pr_devel("cxl_prefault_vm unable to get mm %i\n",
|
|
pid_nr(ctx->pid));
|
|
- goto out1;
|
|
+ return;
|
|
}
|
|
|
|
down_read(&mm->mmap_sem);
|
|
@@ -295,8 +358,6 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
|
|
up_read(&mm->mmap_sem);
|
|
|
|
mmput(mm);
|
|
-out1:
|
|
- put_task_struct(task);
|
|
}
|
|
|
|
void cxl_prefault(struct cxl_context *ctx, u64 wed)
|
|
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
|
|
index 7ccd2998be92..783337d22f36 100644
|
|
--- a/drivers/misc/cxl/file.c
|
|
+++ b/drivers/misc/cxl/file.c
|
|
@@ -67,7 +67,13 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
|
|
spin_unlock(&adapter->afu_list_lock);
|
|
goto err_put_adapter;
|
|
}
|
|
- get_device(&afu->dev);
|
|
+
|
|
+ /*
|
|
+ * taking a ref to the afu so that it doesn't go away
|
|
+ * for rest of the function. This ref is released before
|
|
+ * we return.
|
|
+ */
|
|
+ cxl_afu_get(afu);
|
|
spin_unlock(&adapter->afu_list_lock);
|
|
|
|
if (!afu->current_mode)
|
|
@@ -90,13 +96,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
|
|
file->private_data = ctx;
|
|
cxl_ctx_get();
|
|
|
|
- /* Our ref on the AFU will now hold the adapter */
|
|
- put_device(&adapter->dev);
|
|
-
|
|
- return 0;
|
|
+ /* indicate success */
|
|
+ rc = 0;
|
|
|
|
err_put_afu:
|
|
- put_device(&afu->dev);
|
|
+ /* release the ref taken earlier */
|
|
+ cxl_afu_put(afu);
|
|
err_put_adapter:
|
|
put_device(&adapter->dev);
|
|
return rc;
|
|
@@ -131,8 +136,6 @@ int afu_release(struct inode *inode, struct file *file)
|
|
mutex_unlock(&ctx->mapping_lock);
|
|
}
|
|
|
|
- put_device(&ctx->afu->dev);
|
|
-
|
|
/*
|
|
* At this this point all bottom halfs have finished and we should be
|
|
* getting no more IRQs from the hardware for this context. Once it's
|
|
@@ -198,8 +201,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
|
|
* where a process (master, some daemon, etc) has opened the chardev on
|
|
* behalf of another process, so the AFU's mm gets bound to the process
|
|
* that performs this ioctl and not the process that opened the file.
|
|
+ * Also we grab the PID of the group leader so that if the task that
|
|
+ * has performed the attach operation exits the mm context of the
|
|
+ * process is still accessible.
|
|
*/
|
|
- ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
|
|
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
|
|
+ ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
|
|
|
|
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
|
|
|
|
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
|
|
index be2c8e248e2e..0c6c17a1c59e 100644
|
|
--- a/drivers/misc/cxl/pci.c
|
|
+++ b/drivers/misc/cxl/pci.c
|
|
@@ -138,6 +138,7 @@ static const struct pci_device_id cxl_pci_tbl[] = {
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
|
|
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
|
|
{ PCI_DEVICE_CLASS(0x120000, ~0), },
|
|
|
|
{ }
|
|
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
|
|
index 1a802af827ed..552a34dc4f82 100644
|
|
--- a/drivers/mmc/host/sdhci.c
|
|
+++ b/drivers/mmc/host/sdhci.c
|
|
@@ -492,7 +492,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
|
host->align_buffer, host->align_buffer_sz, direction);
|
|
if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
|
|
goto fail;
|
|
- BUG_ON(host->align_addr & host->align_mask);
|
|
+ BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
|
|
|
|
host->sg_count = sdhci_pre_dma_transfer(host, data);
|
|
if (host->sg_count < 0)
|
|
@@ -514,8 +514,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
|
* the (up to three) bytes that screw up the
|
|
* alignment.
|
|
*/
|
|
- offset = (host->align_sz - (addr & host->align_mask)) &
|
|
- host->align_mask;
|
|
+ offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
|
|
+ SDHCI_ADMA2_MASK;
|
|
if (offset) {
|
|
if (data->flags & MMC_DATA_WRITE) {
|
|
buffer = sdhci_kmap_atomic(sg, &flags);
|
|
@@ -529,8 +529,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
|
|
|
BUG_ON(offset > 65536);
|
|
|
|
- align += host->align_sz;
|
|
- align_addr += host->align_sz;
|
|
+ align += SDHCI_ADMA2_ALIGN;
|
|
+ align_addr += SDHCI_ADMA2_ALIGN;
|
|
|
|
desc += host->desc_sz;
|
|
|
|
@@ -611,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
|
|
/* Do a quick scan of the SG list for any unaligned mappings */
|
|
has_unaligned = false;
|
|
for_each_sg(data->sg, sg, host->sg_count, i)
|
|
- if (sg_dma_address(sg) & host->align_mask) {
|
|
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
|
|
has_unaligned = true;
|
|
break;
|
|
}
|
|
@@ -623,15 +623,15 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
|
|
align = host->align_buffer;
|
|
|
|
for_each_sg(data->sg, sg, host->sg_count, i) {
|
|
- if (sg_dma_address(sg) & host->align_mask) {
|
|
- size = host->align_sz -
|
|
- (sg_dma_address(sg) & host->align_mask);
|
|
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
|
|
+ size = SDHCI_ADMA2_ALIGN -
|
|
+ (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
|
|
|
|
buffer = sdhci_kmap_atomic(sg, &flags);
|
|
memcpy(buffer, align, size);
|
|
sdhci_kunmap_atomic(buffer, &flags);
|
|
|
|
- align += host->align_sz;
|
|
+ align += SDHCI_ADMA2_ALIGN;
|
|
}
|
|
}
|
|
}
|
|
@@ -1315,7 +1315,9 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
|
|
pwr = SDHCI_POWER_330;
|
|
break;
|
|
default:
|
|
- BUG();
|
|
+ WARN(1, "%s: Invalid vdd %#x\n",
|
|
+ mmc_hostname(host->mmc), vdd);
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
@@ -2983,24 +2985,17 @@ int sdhci_add_host(struct sdhci_host *host)
|
|
if (host->flags & SDHCI_USE_64_BIT_DMA) {
|
|
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
|
|
SDHCI_ADMA2_64_DESC_SZ;
|
|
- host->align_buffer_sz = SDHCI_MAX_SEGS *
|
|
- SDHCI_ADMA2_64_ALIGN;
|
|
host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
|
|
- host->align_sz = SDHCI_ADMA2_64_ALIGN;
|
|
- host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
|
|
} else {
|
|
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
|
|
SDHCI_ADMA2_32_DESC_SZ;
|
|
- host->align_buffer_sz = SDHCI_MAX_SEGS *
|
|
- SDHCI_ADMA2_32_ALIGN;
|
|
host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
|
|
- host->align_sz = SDHCI_ADMA2_32_ALIGN;
|
|
- host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
|
|
}
|
|
host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
|
|
host->adma_table_sz,
|
|
&host->adma_addr,
|
|
GFP_KERNEL);
|
|
+ host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
|
|
host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
|
|
if (!host->adma_table || !host->align_buffer) {
|
|
if (host->adma_table)
|
|
@@ -3014,7 +3009,7 @@ int sdhci_add_host(struct sdhci_host *host)
|
|
host->flags &= ~SDHCI_USE_ADMA;
|
|
host->adma_table = NULL;
|
|
host->align_buffer = NULL;
|
|
- } else if (host->adma_addr & host->align_mask) {
|
|
+ } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
|
|
pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
|
|
mmc_hostname(mmc));
|
|
host->flags &= ~SDHCI_USE_ADMA;
|
|
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
|
|
index 9c331ac5ad6b..0115e9907bf8 100644
|
|
--- a/drivers/mmc/host/sdhci.h
|
|
+++ b/drivers/mmc/host/sdhci.h
|
|
@@ -272,22 +272,27 @@
|
|
/* ADMA2 32-bit DMA descriptor size */
|
|
#define SDHCI_ADMA2_32_DESC_SZ 8
|
|
|
|
-/* ADMA2 32-bit DMA alignment */
|
|
-#define SDHCI_ADMA2_32_ALIGN 4
|
|
-
|
|
/* ADMA2 32-bit descriptor */
|
|
struct sdhci_adma2_32_desc {
|
|
__le16 cmd;
|
|
__le16 len;
|
|
__le32 addr;
|
|
-} __packed __aligned(SDHCI_ADMA2_32_ALIGN);
|
|
+} __packed __aligned(4);
|
|
+
|
|
+/* ADMA2 data alignment */
|
|
+#define SDHCI_ADMA2_ALIGN 4
|
|
+#define SDHCI_ADMA2_MASK (SDHCI_ADMA2_ALIGN - 1)
|
|
+
|
|
+/*
|
|
+ * ADMA2 descriptor alignment. Some controllers (e.g. Intel) require 8 byte
|
|
+ * alignment for the descriptor table even in 32-bit DMA mode. Memory
|
|
+ * allocation is at least 8 byte aligned anyway, so just stipulate 8 always.
|
|
+ */
|
|
+#define SDHCI_ADMA2_DESC_ALIGN 8
|
|
|
|
/* ADMA2 64-bit DMA descriptor size */
|
|
#define SDHCI_ADMA2_64_DESC_SZ 12
|
|
|
|
-/* ADMA2 64-bit DMA alignment */
|
|
-#define SDHCI_ADMA2_64_ALIGN 8
|
|
-
|
|
/*
|
|
* ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
|
|
* aligned.
|
|
@@ -483,8 +488,6 @@ struct sdhci_host {
|
|
dma_addr_t align_addr; /* Mapped bounce buffer */
|
|
|
|
unsigned int desc_sz; /* ADMA descriptor size */
|
|
- unsigned int align_sz; /* ADMA alignment */
|
|
- unsigned int align_mask; /* ADMA alignment mask */
|
|
|
|
struct tasklet_struct finish_tasklet; /* Tasklet structures */
|
|
|
|
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
|
|
index b89504405b72..7445da218bd9 100644
|
|
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
|
|
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
|
|
@@ -2526,7 +2526,7 @@ static void handle_timestamp(struct octeon_device *oct,
|
|
|
|
octeon_swap_8B_data(&resp->timestamp, 1);
|
|
|
|
- if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
|
|
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
|
|
struct skb_shared_hwtstamps ts;
|
|
u64 ns = resp->timestamp;
|
|
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
|
|
index 39ca6744a4e6..22471d283a95 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/nic.h
|
|
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
|
|
@@ -116,6 +116,15 @@
|
|
#define NIC_PF_INTR_ID_MBOX0 8
|
|
#define NIC_PF_INTR_ID_MBOX1 9
|
|
|
|
+/* Minimum FIFO level before all packets for the CQ are dropped
|
|
+ *
|
|
+ * This value ensures that once a packet has been "accepted"
|
|
+ * for reception it will not get dropped due to non-availability
|
|
+ * of CQ descriptor. An errata in HW mandates this value to be
|
|
+ * atleast 0x100.
|
|
+ */
|
|
+#define NICPF_CQM_MIN_DROP_LEVEL 0x100
|
|
+
|
|
/* Global timer for CQ timer thresh interrupts
|
|
* Calculated for SCLK of 700Mhz
|
|
* value written should be a 1/16th of what is expected
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
|
|
index 5f24d11cb16a..16baaafed26c 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
|
|
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
|
|
@@ -309,6 +309,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
|
|
static void nic_init_hw(struct nicpf *nic)
|
|
{
|
|
int i;
|
|
+ u64 cqm_cfg;
|
|
|
|
/* Enable NIC HW block */
|
|
nic_reg_write(nic, NIC_PF_CFG, 0x3);
|
|
@@ -345,6 +346,11 @@ static void nic_init_hw(struct nicpf *nic)
|
|
/* Enable VLAN ethertype matching and stripping */
|
|
nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
|
|
(2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
|
|
+
|
|
+ /* Check if HW expected value is higher (could be in future chips) */
|
|
+ cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
|
|
+ if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
|
|
+ nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
|
|
}
|
|
|
|
/* Channel parse index configuration */
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
|
|
index dd536be20193..afb10e326b4f 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
|
|
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
|
|
@@ -21,7 +21,7 @@
|
|
#define NIC_PF_TCP_TIMER (0x0060)
|
|
#define NIC_PF_BP_CFG (0x0080)
|
|
#define NIC_PF_RRM_CFG (0x0088)
|
|
-#define NIC_PF_CQM_CF (0x00A0)
|
|
+#define NIC_PF_CQM_CFG (0x00A0)
|
|
#define NIC_PF_CNM_CF (0x00A8)
|
|
#define NIC_PF_CNM_STATUS (0x00B0)
|
|
#define NIC_PF_CQ_AVG_CFG (0x00C0)
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
|
|
index dde8dc720cd3..b7093b9cd1e8 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
|
|
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
|
|
@@ -566,8 +566,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
|
|
|
|
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
|
|
struct napi_struct *napi,
|
|
- struct cmp_queue *cq,
|
|
- struct cqe_rx_t *cqe_rx, int cqe_type)
|
|
+ struct cqe_rx_t *cqe_rx)
|
|
{
|
|
struct sk_buff *skb;
|
|
struct nicvf *nic = netdev_priv(netdev);
|
|
@@ -583,7 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
|
|
}
|
|
|
|
/* Check for errors */
|
|
- err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
|
|
+ err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
|
|
if (err && !cqe_rx->rb_cnt)
|
|
return;
|
|
|
|
@@ -674,8 +673,7 @@ loop:
|
|
cq_idx, cq_desc->cqe_type);
|
|
switch (cq_desc->cqe_type) {
|
|
case CQE_TYPE_RX:
|
|
- nicvf_rcv_pkt_handler(netdev, napi, cq,
|
|
- cq_desc, CQE_TYPE_RX);
|
|
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
|
|
work_done++;
|
|
break;
|
|
case CQE_TYPE_SEND:
|
|
@@ -1117,7 +1115,6 @@ int nicvf_stop(struct net_device *netdev)
|
|
|
|
/* Clear multiqset info */
|
|
nic->pnicvf = nic;
|
|
- nic->sqs_count = 0;
|
|
|
|
return 0;
|
|
}
|
|
@@ -1346,6 +1343,9 @@ void nicvf_update_stats(struct nicvf *nic)
|
|
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
|
|
stats->tx_bcast_frames_ok +
|
|
stats->tx_mcast_frames_ok;
|
|
+ drv_stats->rx_frames_ok = stats->rx_ucast_frames +
|
|
+ stats->rx_bcast_frames +
|
|
+ stats->rx_mcast_frames;
|
|
drv_stats->rx_drops = stats->rx_drop_red +
|
|
stats->rx_drop_overrun;
|
|
drv_stats->tx_drops = stats->tx_drops;
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
|
|
index d1c217eaf417..912ee28ab58b 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
|
|
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
|
|
@@ -1414,16 +1414,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
|
|
}
|
|
|
|
/* Check for errors in the receive cmp.queue entry */
|
|
-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
|
|
- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
|
|
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
|
|
{
|
|
struct nicvf_hw_stats *stats = &nic->hw_stats;
|
|
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
|
|
|
|
- if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
|
|
- drv_stats->rx_frames_ok++;
|
|
+ if (!cqe_rx->err_level && !cqe_rx->err_opcode)
|
|
return 0;
|
|
- }
|
|
|
|
if (netif_msg_rx_err(nic))
|
|
netdev_err(nic->netdev,
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
|
|
index 033e8306e91c..5652c612e20b 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
|
|
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
|
|
@@ -344,8 +344,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
|
|
/* Stats */
|
|
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
|
|
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
|
|
-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
|
|
- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
|
|
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
|
|
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
|
|
struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
|
|
#endif /* NICVF_QUEUES_H */
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
|
|
index 9df26c2263bc..42718cc7d4e8 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
|
|
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
|
|
@@ -549,7 +549,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
|
|
}
|
|
|
|
/* Clear rcvflt bit (latching high) and read it back */
|
|
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
|
|
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
|
|
+ bgx_reg_modify(bgx, lmacid,
|
|
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
|
|
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
|
|
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
|
|
if (bgx->use_training) {
|
|
@@ -568,13 +570,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
|
|
return -1;
|
|
}
|
|
|
|
- /* Wait for MAC RX to be ready */
|
|
- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
|
|
- SMU_RX_CTL_STATUS, true)) {
|
|
- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
|
|
- return -1;
|
|
- }
|
|
-
|
|
/* Wait for BGX RX to be idle */
|
|
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
|
|
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
|
|
@@ -587,29 +582,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
|
|
return -1;
|
|
}
|
|
|
|
- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
|
|
- dev_err(&bgx->pdev->dev, "Receive fault\n");
|
|
- return -1;
|
|
- }
|
|
-
|
|
- /* Receive link is latching low. Force it high and verify it */
|
|
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
|
|
- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
|
|
- SPU_STATUS1_RCV_LNK, false)) {
|
|
- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
|
|
- return -1;
|
|
- }
|
|
-
|
|
+ /* Clear receive packet disable */
|
|
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
|
|
cfg &= ~SPU_MISC_CTL_RX_DIS;
|
|
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
|
|
- return 0;
|
|
+
|
|
+ /* Check for MAC RX faults */
|
|
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
|
|
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
|
|
+ cfg &= SMU_RX_CTL_STATUS;
|
|
+ if (!cfg)
|
|
+ return 0;
|
|
+
|
|
+ /* Rx local/remote fault seen.
|
|
+ * Do lmac reinit to see if condition recovers
|
|
+ */
|
|
+ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
|
|
+
|
|
+ return -1;
|
|
}
|
|
|
|
static void bgx_poll_for_link(struct work_struct *work)
|
|
{
|
|
struct lmac *lmac;
|
|
- u64 link;
|
|
+ u64 spu_link, smu_link;
|
|
|
|
lmac = container_of(work, struct lmac, dwork.work);
|
|
|
|
@@ -619,8 +615,11 @@ static void bgx_poll_for_link(struct work_struct *work)
|
|
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
|
|
SPU_STATUS1_RCV_LNK, false);
|
|
|
|
- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
|
|
- if (link & SPU_STATUS1_RCV_LNK) {
|
|
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
|
|
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
|
|
+
|
|
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
|
|
+ !(smu_link & SMU_RX_CTL_STATUS)) {
|
|
lmac->link_up = 1;
|
|
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
|
|
lmac->last_speed = 40000;
|
|
@@ -634,9 +633,15 @@ static void bgx_poll_for_link(struct work_struct *work)
|
|
}
|
|
|
|
if (lmac->last_link != lmac->link_up) {
|
|
+ if (lmac->link_up) {
|
|
+ if (bgx_xaui_check_link(lmac)) {
|
|
+ /* Errors, clear link_up state */
|
|
+ lmac->link_up = 0;
|
|
+ lmac->last_speed = SPEED_UNKNOWN;
|
|
+ lmac->last_duplex = DUPLEX_UNKNOWN;
|
|
+ }
|
|
+ }
|
|
lmac->last_link = lmac->link_up;
|
|
- if (lmac->link_up)
|
|
- bgx_xaui_check_link(lmac);
|
|
}
|
|
|
|
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
|
|
@@ -708,7 +713,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
|
|
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
|
|
{
|
|
struct lmac *lmac;
|
|
- u64 cmrx_cfg;
|
|
+ u64 cfg;
|
|
|
|
lmac = &bgx->lmac[lmacid];
|
|
if (lmac->check_link) {
|
|
@@ -717,9 +722,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
|
|
destroy_workqueue(lmac->check_link);
|
|
}
|
|
|
|
- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
|
|
- cmrx_cfg &= ~(1 << 15);
|
|
- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
|
|
+ /* Disable packet reception */
|
|
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
|
|
+ cfg &= ~CMR_PKT_RX_EN;
|
|
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
|
|
+
|
|
+ /* Give chance for Rx/Tx FIFO to get drained */
|
|
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
|
|
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
|
|
+
|
|
+ /* Disable packet transmission */
|
|
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
|
|
+ cfg &= ~CMR_PKT_TX_EN;
|
|
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
|
|
+
|
|
+ /* Disable serdes lanes */
|
|
+ if (!lmac->is_sgmii)
|
|
+ bgx_reg_modify(bgx, lmacid,
|
|
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
|
|
+ else
|
|
+ bgx_reg_modify(bgx, lmacid,
|
|
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
|
|
+
|
|
+ /* Disable LMAC */
|
|
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
|
|
+ cfg &= ~CMR_EN;
|
|
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
|
|
+
|
|
bgx_flush_dmac_addrs(bgx, lmacid);
|
|
|
|
if ((bgx->lmac_type != BGX_MODE_XFI) &&
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
|
|
index 149e179363a1..42010d2e5ddf 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
|
|
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
|
|
@@ -41,6 +41,7 @@
|
|
#define BGX_CMRX_RX_STAT10 0xC0
|
|
#define BGX_CMRX_RX_BP_DROP 0xC8
|
|
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
|
|
+#define BGX_CMRX_RX_FIFO_LEN 0x108
|
|
#define BGX_CMR_RX_DMACX_CAM 0x200
|
|
#define RX_DMACX_CAM_EN BIT_ULL(48)
|
|
#define RX_DMACX_CAM_LMACID(x) (x << 49)
|
|
@@ -50,6 +51,7 @@
|
|
#define BGX_CMR_CHAN_MSK_AND 0x450
|
|
#define BGX_CMR_BIST_STATUS 0x460
|
|
#define BGX_CMR_RX_LMACS 0x468
|
|
+#define BGX_CMRX_TX_FIFO_LEN 0x518
|
|
#define BGX_CMRX_TX_STAT0 0x600
|
|
#define BGX_CMRX_TX_STAT1 0x608
|
|
#define BGX_CMRX_TX_STAT2 0x610
|
|
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
|
|
index 69707108d23c..98fe5a2cd6e3 100644
|
|
--- a/drivers/net/ethernet/intel/e1000/e1000.h
|
|
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
|
|
@@ -213,8 +213,11 @@ struct e1000_rx_ring {
|
|
};
|
|
|
|
#define E1000_DESC_UNUSED(R) \
|
|
- ((((R)->next_to_clean > (R)->next_to_use) \
|
|
- ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
|
|
+({ \
|
|
+ unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \
|
|
+ unsigned int use = READ_ONCE((R)->next_to_use); \
|
|
+ (clean > use ? 0 : (R)->count) + clean - use - 1; \
|
|
+})
|
|
|
|
#define E1000_RX_DESC_EXT(R, i) \
|
|
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
|
|
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
|
|
index fd7be860c201..068023595d84 100644
|
|
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
|
|
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
|
|
@@ -3876,7 +3876,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
|
|
eop_desc = E1000_TX_DESC(*tx_ring, eop);
|
|
}
|
|
|
|
- tx_ring->next_to_clean = i;
|
|
+ /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
|
|
+ * which will reuse the cleaned buffers.
|
|
+ */
|
|
+ smp_store_release(&tx_ring->next_to_clean, i);
|
|
|
|
netdev_completed_queue(netdev, pkts_compl, bytes_compl);
|
|
|
|
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
|
|
index 0a854a47d31a..80ec587d510e 100644
|
|
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
|
|
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
|
|
@@ -1959,8 +1959,10 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
|
|
* previous interrupt.
|
|
*/
|
|
if (rx_ring->set_itr) {
|
|
- writel(1000000000 / (rx_ring->itr_val * 256),
|
|
- rx_ring->itr_register);
|
|
+ u32 itr = rx_ring->itr_val ?
|
|
+ 1000000000 / (rx_ring->itr_val * 256) : 0;
|
|
+
|
|
+ writel(itr, rx_ring->itr_register);
|
|
rx_ring->set_itr = 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
|
|
index 14440200499b..48809e5d3f79 100644
|
|
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
|
|
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
|
|
@@ -33,7 +33,7 @@
|
|
#include "fm10k_pf.h"
|
|
#include "fm10k_vf.h"
|
|
|
|
-#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */
|
|
+#define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */
|
|
|
|
#define MAX_QUEUES FM10K_MAX_QUEUES_PF
|
|
|
|
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
|
|
index e76a44cf330c..09281558bfbc 100644
|
|
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
|
|
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
|
|
@@ -1428,6 +1428,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
|
|
fm10k_for_each_ring(ring, q_vector->tx)
|
|
clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
|
|
|
|
+ /* Handle case where we are called by netpoll with a budget of 0 */
|
|
+ if (budget <= 0)
|
|
+ return budget;
|
|
+
|
|
/* attempt to distribute budget to each queue fairly, but don't
|
|
* allow the budget to go below 1 because we'll exit polling
|
|
*/
|
|
@@ -1966,8 +1970,10 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
|
|
|
|
/* Allocate memory for queues */
|
|
err = fm10k_alloc_q_vectors(interface);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ fm10k_reset_msix_capability(interface);
|
|
return err;
|
|
+ }
|
|
|
|
/* Map rings to devices, and map devices to physical queues */
|
|
fm10k_assign_rings(interface);
|
|
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
|
|
index 74be792f3f1b..7f3fb51bc37b 100644
|
|
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
|
|
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
|
|
@@ -159,13 +159,30 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
|
|
|
|
fm10k_mbx_free_irq(interface);
|
|
|
|
+ /* free interrupts */
|
|
+ fm10k_clear_queueing_scheme(interface);
|
|
+
|
|
/* delay any future reset requests */
|
|
interface->last_reset = jiffies + (10 * HZ);
|
|
|
|
/* reset and initialize the hardware so it is in a known state */
|
|
- err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
|
|
- if (err)
|
|
+ err = hw->mac.ops.reset_hw(hw);
|
|
+ if (err) {
|
|
+ dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
|
|
+ goto reinit_err;
|
|
+ }
|
|
+
|
|
+ err = hw->mac.ops.init_hw(hw);
|
|
+ if (err) {
|
|
dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
|
|
+ goto reinit_err;
|
|
+ }
|
|
+
|
|
+ err = fm10k_init_queueing_scheme(interface);
|
|
+ if (err) {
|
|
+ dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
|
|
+ goto reinit_err;
|
|
+ }
|
|
|
|
/* reassociate interrupts */
|
|
fm10k_mbx_request_irq(interface);
|
|
@@ -193,6 +210,10 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
|
|
|
|
fm10k_iov_resume(interface->pdev);
|
|
|
|
+reinit_err:
|
|
+ if (err)
|
|
+ netif_device_detach(netdev);
|
|
+
|
|
rtnl_unlock();
|
|
|
|
clear_bit(__FM10K_RESETTING, &interface->state);
|
|
@@ -1101,6 +1122,10 @@ void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
|
|
struct fm10k_hw *hw = &interface->hw;
|
|
int itr_reg;
|
|
|
|
+ /* no mailbox IRQ to free if MSI-X is not enabled */
|
|
+ if (!interface->msix_entries)
|
|
+ return;
|
|
+
|
|
/* disconnect the mailbox */
|
|
hw->mbx.ops.disconnect(hw, &hw->mbx);
|
|
|
|
@@ -1423,10 +1448,15 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
|
|
err = fm10k_mbx_request_irq_pf(interface);
|
|
else
|
|
err = fm10k_mbx_request_irq_vf(interface);
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
/* connect mailbox */
|
|
- if (!err)
|
|
- err = hw->mbx.ops.connect(hw, &hw->mbx);
|
|
+ err = hw->mbx.ops.connect(hw, &hw->mbx);
|
|
+
|
|
+ /* if the mailbox failed to connect, then free IRQ */
|
|
+ if (err)
|
|
+ fm10k_mbx_free_irq(interface);
|
|
|
|
return err;
|
|
}
|
|
@@ -1684,7 +1714,13 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
|
|
interface->last_reset = jiffies + (10 * HZ);
|
|
|
|
/* reset and initialize the hardware so it is in a known state */
|
|
- err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
|
|
+ err = hw->mac.ops.reset_hw(hw);
|
|
+ if (err) {
|
|
+ dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ err = hw->mac.ops.init_hw(hw);
|
|
if (err) {
|
|
dev_err(&pdev->dev, "init_hw failed: %d\n", err);
|
|
return err;
|
|
@@ -2071,8 +2107,10 @@ static int fm10k_resume(struct pci_dev *pdev)
|
|
|
|
/* reset hardware to known state */
|
|
err = hw->mac.ops.init_hw(&interface->hw);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ dev_err(&pdev->dev, "init_hw failed: %d\n", err);
|
|
return err;
|
|
+ }
|
|
|
|
/* reset statistics starting values */
|
|
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
|
|
@@ -2185,6 +2223,9 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
|
|
if (netif_running(netdev))
|
|
fm10k_close(netdev);
|
|
|
|
+ /* free interrupts */
|
|
+ fm10k_clear_queueing_scheme(interface);
|
|
+
|
|
fm10k_mbx_free_irq(interface);
|
|
|
|
pci_disable_device(pdev);
|
|
@@ -2248,11 +2289,21 @@ static void fm10k_io_resume(struct pci_dev *pdev)
|
|
int err = 0;
|
|
|
|
/* reset hardware to known state */
|
|
- hw->mac.ops.init_hw(&interface->hw);
|
|
+ err = hw->mac.ops.init_hw(&interface->hw);
|
|
+ if (err) {
|
|
+ dev_err(&pdev->dev, "init_hw failed: %d\n", err);
|
|
+ return;
|
|
+ }
|
|
|
|
/* reset statistics starting values */
|
|
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
|
|
|
|
+ err = fm10k_init_queueing_scheme(interface);
|
|
+ if (err) {
|
|
+ dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
|
|
+ return;
|
|
+ }
|
|
+
|
|
/* reassociate interrupts */
|
|
fm10k_mbx_request_irq(interface);
|
|
|
|
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
|
|
index 318a212f0a78..35afd711d144 100644
|
|
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
|
|
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
|
|
@@ -77,6 +77,7 @@ struct fm10k_hw;
|
|
#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10
|
|
|
|
#define FM10K_ERR_PARAM -2
|
|
+#define FM10K_ERR_NO_RESOURCES -3
|
|
#define FM10K_ERR_REQUESTS_PENDING -4
|
|
#define FM10K_ERR_RESET_REQUESTED -5
|
|
#define FM10K_ERR_DMA_PENDING -6
|
|
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
|
|
index 36c8b0aa08fd..d512575c33f3 100644
|
|
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
|
|
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
|
|
@@ -103,7 +103,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
|
|
s32 err;
|
|
u16 i;
|
|
|
|
- /* assume we always have at least 1 queue */
|
|
+ /* verify we have at least 1 queue */
|
|
+ if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) ||
|
|
+ !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) {
|
|
+ err = FM10K_ERR_NO_RESOURCES;
|
|
+ goto reset_max_queues;
|
|
+ }
|
|
+
|
|
+ /* determine how many queues we have */
|
|
for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
|
|
/* verify the Descriptor cache offsets are increasing */
|
|
tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i));
|
|
@@ -119,7 +126,7 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
|
|
/* shut down queues we own and reset DMA configuration */
|
|
err = fm10k_disable_queues_generic(hw, i);
|
|
if (err)
|
|
- return err;
|
|
+ goto reset_max_queues;
|
|
|
|
/* record maximum queue count */
|
|
hw->mac.max_queues = i;
|
|
@@ -129,6 +136,11 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
|
|
FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
|
|
|
|
return 0;
|
|
+
|
|
+reset_max_queues:
|
|
+ hw->mac.max_queues = 0;
|
|
+
|
|
+ return err;
|
|
}
|
|
|
|
/* This structure defines the attibutes to be parsed below */
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
|
|
index 4dd3e26129b4..7e258a83ccab 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e.h
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
|
|
@@ -767,6 +767,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
|
|
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
|
|
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
|
|
bool is_vf, bool is_netdev);
|
|
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
|
|
+ bool is_vf, bool is_netdev);
|
|
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
|
|
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
|
|
bool is_vf, bool is_netdev);
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
|
|
index 3f385ffe420f..488a50d59dca 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
|
|
@@ -2164,8 +2164,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|
case TCP_V4_FLOW:
|
|
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
case 0:
|
|
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
|
- break;
|
|
+ return -EINVAL;
|
|
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
|
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
|
break;
|
|
@@ -2176,8 +2175,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|
case TCP_V6_FLOW:
|
|
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
case 0:
|
|
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
|
- break;
|
|
+ return -EINVAL;
|
|
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
|
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
|
break;
|
|
@@ -2188,9 +2186,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|
case UDP_V4_FLOW:
|
|
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
case 0:
|
|
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
|
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
|
|
- break;
|
|
+ return -EINVAL;
|
|
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
|
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
|
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
|
|
@@ -2202,9 +2198,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|
case UDP_V6_FLOW:
|
|
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
case 0:
|
|
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
|
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
|
|
- break;
|
|
+ return -EINVAL;
|
|
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
|
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
|
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
|
|
index 4a9873ec28c7..2215bebe208e 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
|
|
@@ -1317,6 +1317,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
|
|
}
|
|
|
|
/**
|
|
+ * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
|
|
+ * @vsi: the VSI to be searched
|
|
+ * @macaddr: the mac address to be removed
|
|
+ * @is_vf: true if it is a VF
|
|
+ * @is_netdev: true if it is a netdev
|
|
+ *
|
|
+ * Removes a given MAC address from a VSI, regardless of VLAN
|
|
+ *
|
|
+ * Returns 0 for success, or error
|
|
+ **/
|
|
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
|
|
+ bool is_vf, bool is_netdev)
|
|
+{
|
|
+ struct i40e_mac_filter *f = NULL;
|
|
+ int changed = 0;
|
|
+
|
|
+ WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
|
|
+ "Missing mac_filter_list_lock\n");
|
|
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
|
|
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
|
|
+ (is_vf == f->is_vf) &&
|
|
+ (is_netdev == f->is_netdev)) {
|
|
+ f->counter--;
|
|
+ f->changed = true;
|
|
+ changed = 1;
|
|
+ }
|
|
+ }
|
|
+ if (changed) {
|
|
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
|
|
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
|
|
+ return 0;
|
|
+ }
|
|
+ return -ENOENT;
|
|
+}
|
|
+
|
|
+/**
|
|
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
|
|
* @vsi: the PF Main VSI - inappropriate for any other VSI
|
|
* @macaddr: the MAC address
|
|
@@ -1547,9 +1583,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
|
|
spin_unlock_bh(&vsi->mac_filter_list_lock);
|
|
}
|
|
|
|
- i40e_sync_vsi_filters(vsi, false);
|
|
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
|
-
|
|
+ /* schedule our worker thread which will take care of
|
|
+ * applying the new filter changes
|
|
+ */
|
|
+ i40e_service_event_schedule(vsi->back);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1935,11 +1973,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
|
|
|
|
/* Now process 'del_list' outside the lock */
|
|
if (!list_empty(&tmp_del_list)) {
|
|
+ int del_list_size;
|
|
+
|
|
filter_list_len = pf->hw.aq.asq_buf_size /
|
|
sizeof(struct i40e_aqc_remove_macvlan_element_data);
|
|
- del_list = kcalloc(filter_list_len,
|
|
- sizeof(struct i40e_aqc_remove_macvlan_element_data),
|
|
- GFP_KERNEL);
|
|
+ del_list_size = filter_list_len *
|
|
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
|
|
+ del_list = kzalloc(del_list_size, GFP_KERNEL);
|
|
if (!del_list) {
|
|
i40e_cleanup_add_list(&tmp_add_list);
|
|
|
|
@@ -1971,7 +2011,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
|
|
NULL);
|
|
aq_err = pf->hw.aq.asq_last_status;
|
|
num_del = 0;
|
|
- memset(del_list, 0, sizeof(*del_list));
|
|
+ memset(del_list, 0, del_list_size);
|
|
|
|
if (ret && aq_err != I40E_AQ_RC_ENOENT)
|
|
dev_err(&pf->pdev->dev,
|
|
@@ -2004,13 +2044,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
|
|
}
|
|
|
|
if (!list_empty(&tmp_add_list)) {
|
|
+ int add_list_size;
|
|
|
|
/* do all the adds now */
|
|
filter_list_len = pf->hw.aq.asq_buf_size /
|
|
sizeof(struct i40e_aqc_add_macvlan_element_data),
|
|
- add_list = kcalloc(filter_list_len,
|
|
- sizeof(struct i40e_aqc_add_macvlan_element_data),
|
|
- GFP_KERNEL);
|
|
+ add_list_size = filter_list_len *
|
|
+ sizeof(struct i40e_aqc_add_macvlan_element_data);
|
|
+ add_list = kzalloc(add_list_size, GFP_KERNEL);
|
|
if (!add_list) {
|
|
/* Purge element from temporary lists */
|
|
i40e_cleanup_add_list(&tmp_add_list);
|
|
@@ -2048,7 +2089,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
|
|
|
|
if (ret)
|
|
break;
|
|
- memset(add_list, 0, sizeof(*add_list));
|
|
+ memset(add_list, 0, add_list_size);
|
|
}
|
|
/* Entries from tmp_add_list were cloned from MAC
|
|
* filter list, hence clean those cloned entries
|
|
@@ -2112,12 +2153,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
|
|
*/
|
|
if (pf->cur_promisc != cur_promisc) {
|
|
pf->cur_promisc = cur_promisc;
|
|
- if (grab_rtnl)
|
|
- i40e_do_reset_safe(pf,
|
|
- BIT(__I40E_PF_RESET_REQUESTED));
|
|
- else
|
|
- i40e_do_reset(pf,
|
|
- BIT(__I40E_PF_RESET_REQUESTED));
|
|
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
|
|
}
|
|
} else {
|
|
ret = i40e_aq_set_vsi_unicast_promiscuous(
|
|
@@ -2377,16 +2413,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
|
|
}
|
|
}
|
|
|
|
- /* Make sure to release before sync_vsi_filter because that
|
|
- * function will lock/unlock as necessary
|
|
- */
|
|
spin_unlock_bh(&vsi->mac_filter_list_lock);
|
|
|
|
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
|
|
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
|
|
- return 0;
|
|
-
|
|
- return i40e_sync_vsi_filters(vsi, false);
|
|
+ /* schedule our worker thread which will take care of
|
|
+ * applying the new filter changes
|
|
+ */
|
|
+ i40e_service_event_schedule(vsi->back);
|
|
+ return 0;
|
|
}
|
|
|
|
/**
|
|
@@ -2459,16 +2492,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
|
|
}
|
|
}
|
|
|
|
- /* Make sure to release before sync_vsi_filter because that
|
|
- * function with lock/unlock as necessary
|
|
- */
|
|
spin_unlock_bh(&vsi->mac_filter_list_lock);
|
|
|
|
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
|
|
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
|
|
- return 0;
|
|
-
|
|
- return i40e_sync_vsi_filters(vsi, false);
|
|
+ /* schedule our worker thread which will take care of
|
|
+ * applying the new filter changes
|
|
+ */
|
|
+ i40e_service_event_schedule(vsi->back);
|
|
+ return 0;
|
|
}
|
|
|
|
/**
|
|
@@ -2711,6 +2741,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
|
|
netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
|
|
free_cpumask_var(mask);
|
|
}
|
|
+
|
|
+ /* schedule our worker thread which will take care of
|
|
+ * applying the new filter changes
|
|
+ */
|
|
+ i40e_service_event_schedule(vsi->back);
|
|
}
|
|
|
|
/**
|
|
@@ -6685,6 +6720,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
|
|
struct i40e_hw *hw = &pf->hw;
|
|
u8 set_fc_aq_fail = 0;
|
|
i40e_status ret;
|
|
+ u32 val;
|
|
u32 v;
|
|
|
|
/* Now we wait for GRST to settle out.
|
|
@@ -6823,6 +6859,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
|
|
}
|
|
}
|
|
|
|
+ /* Reconfigure hardware for allowing smaller MSS in the case
|
|
+ * of TSO, so that we avoid the MDD being fired and causing
|
|
+ * a reset in the case of small MSS+TSO.
|
|
+ */
|
|
+#define I40E_REG_MSS 0x000E64DC
|
|
+#define I40E_REG_MSS_MIN_MASK 0x3FF0000
|
|
+#define I40E_64BYTE_MSS 0x400000
|
|
+ val = rd32(hw, I40E_REG_MSS);
|
|
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
|
|
+ val &= ~I40E_REG_MSS_MIN_MASK;
|
|
+ val |= I40E_64BYTE_MSS;
|
|
+ wr32(hw, I40E_REG_MSS, val);
|
|
+ }
|
|
+
|
|
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
|
|
(pf->hw.aq.fw_maj_ver < 4)) {
|
|
msleep(75);
|
|
@@ -10183,6 +10233,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
u16 link_status;
|
|
int err;
|
|
u32 len;
|
|
+ u32 val;
|
|
u32 i;
|
|
u8 set_fc_aq_fail;
|
|
|
|
@@ -10493,6 +10544,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
i40e_stat_str(&pf->hw, err),
|
|
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
|
|
|
|
+ /* Reconfigure hardware for allowing smaller MSS in the case
|
|
+ * of TSO, so that we avoid the MDD being fired and causing
|
|
+ * a reset in the case of small MSS+TSO.
|
|
+ */
|
|
+ val = rd32(hw, I40E_REG_MSS);
|
|
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
|
|
+ val &= ~I40E_REG_MSS_MIN_MASK;
|
|
+ val |= I40E_64BYTE_MSS;
|
|
+ wr32(hw, I40E_REG_MSS, val);
|
|
+ }
|
|
+
|
|
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
|
|
(pf->hw.aq.fw_maj_ver < 4)) {
|
|
msleep(75);
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
|
|
index 635b3ac17877..26c55bba4bf3 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
|
|
@@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
|
|
"Filter deleted for PCTYPE %d loc = %d\n",
|
|
fd_data->pctype, fd_data->fd_id);
|
|
}
|
|
+ if (err)
|
|
+ kfree(raw_packet);
|
|
+
|
|
return err ? -EOPNOTSUPP : 0;
|
|
}
|
|
|
|
@@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
|
|
fd_data->pctype, fd_data->fd_id);
|
|
}
|
|
|
|
+ if (err)
|
|
+ kfree(raw_packet);
|
|
+
|
|
return err ? -EOPNOTSUPP : 0;
|
|
}
|
|
|
|
@@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
|
|
}
|
|
}
|
|
|
|
+ if (err)
|
|
+ kfree(raw_packet);
|
|
+
|
|
return err ? -EOPNOTSUPP : 0;
|
|
}
|
|
|
|
@@ -526,11 +535,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
|
|
struct i40e_tx_buffer *tx_buffer)
|
|
{
|
|
if (tx_buffer->skb) {
|
|
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
|
|
- kfree(tx_buffer->raw_buf);
|
|
- else
|
|
- dev_kfree_skb_any(tx_buffer->skb);
|
|
-
|
|
+ dev_kfree_skb_any(tx_buffer->skb);
|
|
if (dma_unmap_len(tx_buffer, len))
|
|
dma_unmap_single(ring->dev,
|
|
dma_unmap_addr(tx_buffer, dma),
|
|
@@ -542,6 +547,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
|
|
dma_unmap_len(tx_buffer, len),
|
|
DMA_TO_DEVICE);
|
|
}
|
|
+
|
|
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
|
|
+ kfree(tx_buffer->raw_buf);
|
|
+
|
|
tx_buffer->next_to_watch = NULL;
|
|
tx_buffer->skb = NULL;
|
|
dma_unmap_len_set(tx_buffer, len, 0);
|
|
@@ -1416,31 +1425,12 @@ checksum_fail:
|
|
}
|
|
|
|
/**
|
|
- * i40e_rx_hash - returns the hash value from the Rx descriptor
|
|
- * @ring: descriptor ring
|
|
- * @rx_desc: specific descriptor
|
|
- **/
|
|
-static inline u32 i40e_rx_hash(struct i40e_ring *ring,
|
|
- union i40e_rx_desc *rx_desc)
|
|
-{
|
|
- const __le64 rss_mask =
|
|
- cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
|
|
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
|
|
-
|
|
- if ((ring->netdev->features & NETIF_F_RXHASH) &&
|
|
- (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
|
|
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
|
|
- else
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/**
|
|
- * i40e_ptype_to_hash - get a hash type
|
|
+ * i40e_ptype_to_htype - get a hash type
|
|
* @ptype: the ptype value from the descriptor
|
|
*
|
|
* Returns a hash type to be used by skb_set_hash
|
|
**/
|
|
-static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
|
|
+static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
|
|
{
|
|
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
|
|
|
|
@@ -1458,6 +1448,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
|
|
}
|
|
|
|
/**
|
|
+ * i40e_rx_hash - set the hash value in the skb
|
|
+ * @ring: descriptor ring
|
|
+ * @rx_desc: specific descriptor
|
|
+ **/
|
|
+static inline void i40e_rx_hash(struct i40e_ring *ring,
|
|
+ union i40e_rx_desc *rx_desc,
|
|
+ struct sk_buff *skb,
|
|
+ u8 rx_ptype)
|
|
+{
|
|
+ u32 hash;
|
|
+ const __le64 rss_mask =
|
|
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
|
|
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
|
|
+
|
|
+ if (ring->netdev->features & NETIF_F_RXHASH)
|
|
+ return;
|
|
+
|
|
+ if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
|
|
+ hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
|
|
+ skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
|
|
* @rx_ring: rx ring to clean
|
|
* @budget: how many cleans we're allowed
|
|
@@ -1606,8 +1620,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|
continue;
|
|
}
|
|
|
|
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
|
|
- i40e_ptype_to_hash(rx_ptype));
|
|
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
|
|
+
|
|
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
|
|
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
|
|
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
|
|
@@ -1736,8 +1750,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|
continue;
|
|
}
|
|
|
|
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
|
|
- i40e_ptype_to_hash(rx_ptype));
|
|
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
|
|
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
|
|
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
|
|
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
|
|
index 44462b40f2d7..e116d9a99b8e 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
|
|
@@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
|
|
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
|
|
|
|
spin_lock_bh(&vsi->mac_filter_list_lock);
|
|
- f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
|
|
- vf->port_vlan_id ? vf->port_vlan_id : -1,
|
|
- true, false);
|
|
- if (!f)
|
|
- dev_info(&pf->pdev->dev,
|
|
- "Could not allocate VF MAC addr\n");
|
|
+ if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
|
|
+ f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
|
|
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
|
|
+ true, false);
|
|
+ if (!f)
|
|
+ dev_info(&pf->pdev->dev,
|
|
+ "Could not add MAC filter %pM for VF %d\n",
|
|
+ vf->default_lan_addr.addr, vf->vf_id);
|
|
+ }
|
|
f = i40e_add_filter(vsi, brdcast,
|
|
vf->port_vlan_id ? vf->port_vlan_id : -1,
|
|
true, false);
|
|
@@ -1680,8 +1683,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
|
|
spin_lock_bh(&vsi->mac_filter_list_lock);
|
|
/* delete addresses from the list */
|
|
for (i = 0; i < al->num_elements; i++)
|
|
- i40e_del_filter(vsi, al->list[i].addr,
|
|
- I40E_VLAN_ANY, true, false);
|
|
+ if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
|
|
+ ret = I40E_ERR_INVALID_MAC_ADDR;
|
|
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
|
|
+ goto error_param;
|
|
+ }
|
|
+
|
|
spin_unlock_bh(&vsi->mac_filter_list_lock);
|
|
|
|
/* program the updated filter list */
|
|
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
|
|
index 47e9a90d6b10..39db70a597ed 100644
|
|
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
|
|
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
|
|
@@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
|
|
struct i40e_tx_buffer *tx_buffer)
|
|
{
|
|
if (tx_buffer->skb) {
|
|
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
|
|
- kfree(tx_buffer->raw_buf);
|
|
- else
|
|
- dev_kfree_skb_any(tx_buffer->skb);
|
|
-
|
|
+ dev_kfree_skb_any(tx_buffer->skb);
|
|
if (dma_unmap_len(tx_buffer, len))
|
|
dma_unmap_single(ring->dev,
|
|
dma_unmap_addr(tx_buffer, dma),
|
|
@@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
|
|
dma_unmap_len(tx_buffer, len),
|
|
DMA_TO_DEVICE);
|
|
}
|
|
+
|
|
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
|
|
+ kfree(tx_buffer->raw_buf);
|
|
+
|
|
tx_buffer->next_to_watch = NULL;
|
|
tx_buffer->skb = NULL;
|
|
dma_unmap_len_set(tx_buffer, len, 0);
|
|
@@ -245,16 +245,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
|
|
tx_ring->q_vector->tx.total_bytes += total_bytes;
|
|
tx_ring->q_vector->tx.total_packets += total_packets;
|
|
|
|
- /* check to see if there are any non-cache aligned descriptors
|
|
- * waiting to be written back, and kick the hardware to force
|
|
- * them to be written back in case of napi polling
|
|
- */
|
|
- if (budget &&
|
|
- !((i & WB_STRIDE) == WB_STRIDE) &&
|
|
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
|
|
- (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
|
|
- tx_ring->arm_wb = true;
|
|
-
|
|
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
|
|
tx_ring->queue_index),
|
|
total_packets, total_bytes);
|
|
@@ -889,31 +879,12 @@ checksum_fail:
|
|
}
|
|
|
|
/**
|
|
- * i40e_rx_hash - returns the hash value from the Rx descriptor
|
|
- * @ring: descriptor ring
|
|
- * @rx_desc: specific descriptor
|
|
- **/
|
|
-static inline u32 i40e_rx_hash(struct i40e_ring *ring,
|
|
- union i40e_rx_desc *rx_desc)
|
|
-{
|
|
- const __le64 rss_mask =
|
|
- cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
|
|
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
|
|
-
|
|
- if ((ring->netdev->features & NETIF_F_RXHASH) &&
|
|
- (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
|
|
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
|
|
- else
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/**
|
|
- * i40e_ptype_to_hash - get a hash type
|
|
+ * i40e_ptype_to_htype - get a hash type
|
|
* @ptype: the ptype value from the descriptor
|
|
*
|
|
* Returns a hash type to be used by skb_set_hash
|
|
**/
|
|
-static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
|
|
+static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
|
|
{
|
|
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
|
|
|
|
@@ -931,6 +902,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
|
|
}
|
|
|
|
/**
|
|
+ * i40e_rx_hash - set the hash value in the skb
|
|
+ * @ring: descriptor ring
|
|
+ * @rx_desc: specific descriptor
|
|
+ **/
|
|
+static inline void i40e_rx_hash(struct i40e_ring *ring,
|
|
+ union i40e_rx_desc *rx_desc,
|
|
+ struct sk_buff *skb,
|
|
+ u8 rx_ptype)
|
|
+{
|
|
+ u32 hash;
|
|
+ const __le64 rss_mask =
|
|
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
|
|
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
|
|
+
|
|
+ if (ring->netdev->features & NETIF_F_RXHASH)
|
|
+ return;
|
|
+
|
|
+ if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
|
|
+ hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
|
|
+ skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
|
|
* @rx_ring: rx ring to clean
|
|
* @budget: how many cleans we're allowed
|
|
@@ -1071,8 +1066,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|
continue;
|
|
}
|
|
|
|
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
|
|
- i40e_ptype_to_hash(rx_ptype));
|
|
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
|
|
+
|
|
/* probably a little skewed due to removing CRC */
|
|
total_rx_bytes += skb->len;
|
|
total_rx_packets++;
|
|
@@ -1189,8 +1184,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|
continue;
|
|
}
|
|
|
|
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
|
|
- i40e_ptype_to_hash(rx_ptype));
|
|
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
|
|
/* probably a little skewed due to removing CRC */
|
|
total_rx_bytes += skb->len;
|
|
total_rx_packets++;
|
|
@@ -1770,6 +1764,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
u32 td_tag = 0;
|
|
dma_addr_t dma;
|
|
u16 gso_segs;
|
|
+ u16 desc_count = 0;
|
|
+ bool tail_bump = true;
|
|
+ bool do_rs = false;
|
|
|
|
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
|
|
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
|
|
@@ -1810,6 +1807,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
|
|
tx_desc++;
|
|
i++;
|
|
+ desc_count++;
|
|
+
|
|
if (i == tx_ring->count) {
|
|
tx_desc = I40E_TX_DESC(tx_ring, 0);
|
|
i = 0;
|
|
@@ -1829,6 +1828,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
|
|
tx_desc++;
|
|
i++;
|
|
+ desc_count++;
|
|
+
|
|
if (i == tx_ring->count) {
|
|
tx_desc = I40E_TX_DESC(tx_ring, 0);
|
|
i = 0;
|
|
@@ -1843,35 +1844,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
tx_bi = &tx_ring->tx_bi[i];
|
|
}
|
|
|
|
- /* Place RS bit on last descriptor of any packet that spans across the
|
|
- * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
|
|
- */
|
|
#define WB_STRIDE 0x3
|
|
- if (((i & WB_STRIDE) != WB_STRIDE) &&
|
|
- (first <= &tx_ring->tx_bi[i]) &&
|
|
- (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
|
|
- tx_desc->cmd_type_offset_bsz =
|
|
- build_ctob(td_cmd, td_offset, size, td_tag) |
|
|
- cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
|
|
- I40E_TXD_QW1_CMD_SHIFT);
|
|
- } else {
|
|
- tx_desc->cmd_type_offset_bsz =
|
|
- build_ctob(td_cmd, td_offset, size, td_tag) |
|
|
- cpu_to_le64((u64)I40E_TXD_CMD <<
|
|
- I40E_TXD_QW1_CMD_SHIFT);
|
|
- }
|
|
-
|
|
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
|
|
- tx_ring->queue_index),
|
|
- first->bytecount);
|
|
-
|
|
- /* Force memory writes to complete before letting h/w
|
|
- * know there are new descriptors to fetch. (Only
|
|
- * applicable for weak-ordered memory model archs,
|
|
- * such as IA-64).
|
|
- */
|
|
- wmb();
|
|
-
|
|
/* set next_to_watch value indicating a packet is present */
|
|
first->next_to_watch = tx_desc;
|
|
|
|
@@ -1881,15 +1854,78 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
|
|
tx_ring->next_to_use = i;
|
|
|
|
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
|
|
+ tx_ring->queue_index),
|
|
+ first->bytecount);
|
|
i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
|
+
|
|
+ /* Algorithm to optimize tail and RS bit setting:
|
|
+ * if xmit_more is supported
|
|
+ * if xmit_more is true
|
|
+ * do not update tail and do not mark RS bit.
|
|
+ * if xmit_more is false and last xmit_more was false
|
|
+ * if every packet spanned less than 4 desc
|
|
+ * then set RS bit on 4th packet and update tail
|
|
+ * on every packet
|
|
+ * else
|
|
+ * update tail and set RS bit on every packet.
|
|
+ * if xmit_more is false and last_xmit_more was true
|
|
+ * update tail and set RS bit.
|
|
+ * else (kernel < 3.18)
|
|
+ * if every packet spanned less than 4 desc
|
|
+ * then set RS bit on 4th packet and update tail
|
|
+ * on every packet
|
|
+ * else
|
|
+ * set RS bit on EOP for every packet and update tail
|
|
+ *
|
|
+ * Optimization: wmb to be issued only in case of tail update.
|
|
+ * Also optimize the Descriptor WB path for RS bit with the same
|
|
+ * algorithm.
|
|
+ *
|
|
+ * Note: If there are less than 4 packets
|
|
+ * pending and interrupts were disabled the service task will
|
|
+ * trigger a force WB.
|
|
+ */
|
|
+ if (skb->xmit_more &&
|
|
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
|
|
+ tx_ring->queue_index))) {
|
|
+ tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
|
|
+ tail_bump = false;
|
|
+ } else if (!skb->xmit_more &&
|
|
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
|
|
+ tx_ring->queue_index)) &&
|
|
+ (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
|
|
+ (tx_ring->packet_stride < WB_STRIDE) &&
|
|
+ (desc_count < WB_STRIDE)) {
|
|
+ tx_ring->packet_stride++;
|
|
+ } else {
|
|
+ tx_ring->packet_stride = 0;
|
|
+ tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
|
|
+ do_rs = true;
|
|
+ }
|
|
+ if (do_rs)
|
|
+ tx_ring->packet_stride = 0;
|
|
+
|
|
+ tx_desc->cmd_type_offset_bsz =
|
|
+ build_ctob(td_cmd, td_offset, size, td_tag) |
|
|
+ cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
|
|
+ I40E_TX_DESC_CMD_EOP) <<
|
|
+ I40E_TXD_QW1_CMD_SHIFT);
|
|
+
|
|
/* notify HW of packet */
|
|
- if (!skb->xmit_more ||
|
|
- netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
|
|
- tx_ring->queue_index)))
|
|
- writel(i, tx_ring->tail);
|
|
- else
|
|
+ if (!tail_bump)
|
|
prefetchw(tx_desc + 1);
|
|
|
|
+ if (tail_bump) {
|
|
+ /* Force memory writes to complete before letting h/w
|
|
+ * know there are new descriptors to fetch. (Only
|
|
+ * applicable for weak-ordered memory model archs,
|
|
+ * such as IA-64).
|
|
+ */
|
|
+ wmb();
|
|
+ writel(i, tx_ring->tail);
|
|
+ }
|
|
+
|
|
return;
|
|
|
|
dma_error:
|
|
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
|
|
index ebc1bf77f036..998976844e4e 100644
|
|
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
|
|
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
|
|
@@ -267,6 +267,8 @@ struct i40e_ring {
|
|
|
|
bool ring_active; /* is ring online or not */
|
|
bool arm_wb; /* do something to arm write back */
|
|
+ u8 packet_stride;
|
|
+#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
|
|
|
|
u16 flags;
|
|
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
|
|
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
|
|
index 4790437a50ac..2ac62efc36f7 100644
|
|
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
|
|
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
|
|
@@ -477,54 +477,30 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
|
|
|
|
switch (nfc->flow_type) {
|
|
case TCP_V4_FLOW:
|
|
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
- case 0:
|
|
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
|
- break;
|
|
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
|
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
|
|
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
|
- break;
|
|
- default:
|
|
+ else
|
|
return -EINVAL;
|
|
- }
|
|
break;
|
|
case TCP_V6_FLOW:
|
|
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
- case 0:
|
|
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
|
- break;
|
|
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
|
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
|
|
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
|
- break;
|
|
- default:
|
|
+ else
|
|
return -EINVAL;
|
|
- }
|
|
break;
|
|
case UDP_V4_FLOW:
|
|
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
- case 0:
|
|
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
|
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
|
|
- break;
|
|
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
|
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
|
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
|
|
- break;
|
|
- default:
|
|
+ } else {
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case UDP_V6_FLOW:
|
|
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
- case 0:
|
|
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
|
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
|
|
- break;
|
|
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
|
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
|
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
|
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
|
|
- break;
|
|
- default:
|
|
+ } else {
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
|
|
index 99d2cffae0cd..5f03ab3dfa19 100644
|
|
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
|
|
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
|
|
@@ -1864,6 +1864,9 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
|
|
{
|
|
int i;
|
|
|
|
+ if (!adapter->tx_rings)
|
|
+ return;
|
|
+
|
|
for (i = 0; i < adapter->num_active_queues; i++)
|
|
if (adapter->tx_rings[i]->desc)
|
|
i40evf_free_tx_resources(adapter->tx_rings[i]);
|
|
@@ -1932,6 +1935,9 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
|
|
{
|
|
int i;
|
|
|
|
+ if (!adapter->rx_rings)
|
|
+ return;
|
|
+
|
|
for (i = 0; i < adapter->num_active_queues; i++)
|
|
if (adapter->rx_rings[i]->desc)
|
|
i40evf_free_rx_resources(adapter->rx_rings[i]);
|
|
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
|
|
index 32e620e1eb5c..5de3f52fd31f 100644
|
|
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
|
|
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
|
|
@@ -391,6 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
|
|
struct i40e_virtchnl_ether_addr_list *veal;
|
|
int len, i = 0, count = 0;
|
|
struct i40evf_mac_filter *f;
|
|
+ bool more = false;
|
|
|
|
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
|
|
/* bail because we already have a command pending */
|
|
@@ -415,7 +416,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
|
|
count = (I40EVF_MAX_AQ_BUF_SIZE -
|
|
sizeof(struct i40e_virtchnl_ether_addr_list)) /
|
|
sizeof(struct i40e_virtchnl_ether_addr);
|
|
- len = I40EVF_MAX_AQ_BUF_SIZE;
|
|
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
|
|
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
|
|
+ more = true;
|
|
}
|
|
|
|
veal = kzalloc(len, GFP_ATOMIC);
|
|
@@ -431,7 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
|
|
f->add = false;
|
|
}
|
|
}
|
|
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
|
|
+ if (!more)
|
|
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
|
|
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
|
|
(u8 *)veal, len);
|
|
kfree(veal);
|
|
@@ -450,6 +454,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
|
|
struct i40e_virtchnl_ether_addr_list *veal;
|
|
struct i40evf_mac_filter *f, *ftmp;
|
|
int len, i = 0, count = 0;
|
|
+ bool more = false;
|
|
|
|
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
|
|
/* bail because we already have a command pending */
|
|
@@ -474,7 +479,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
|
|
count = (I40EVF_MAX_AQ_BUF_SIZE -
|
|
sizeof(struct i40e_virtchnl_ether_addr_list)) /
|
|
sizeof(struct i40e_virtchnl_ether_addr);
|
|
- len = I40EVF_MAX_AQ_BUF_SIZE;
|
|
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
|
|
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
|
|
+ more = true;
|
|
}
|
|
veal = kzalloc(len, GFP_ATOMIC);
|
|
if (!veal)
|
|
@@ -490,7 +497,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
|
|
kfree(f);
|
|
}
|
|
}
|
|
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
|
|
+ if (!more)
|
|
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
|
|
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
|
|
(u8 *)veal, len);
|
|
kfree(veal);
|
|
@@ -509,6 +517,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
|
|
struct i40e_virtchnl_vlan_filter_list *vvfl;
|
|
int len, i = 0, count = 0;
|
|
struct i40evf_vlan_filter *f;
|
|
+ bool more = false;
|
|
|
|
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
|
|
/* bail because we already have a command pending */
|
|
@@ -534,7 +543,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
|
|
count = (I40EVF_MAX_AQ_BUF_SIZE -
|
|
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
|
|
sizeof(u16);
|
|
- len = I40EVF_MAX_AQ_BUF_SIZE;
|
|
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
|
|
+ (count * sizeof(u16));
|
|
+ more = true;
|
|
}
|
|
vvfl = kzalloc(len, GFP_ATOMIC);
|
|
if (!vvfl)
|
|
@@ -549,7 +560,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
|
|
f->add = false;
|
|
}
|
|
}
|
|
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
|
|
+ if (!more)
|
|
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
|
|
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
|
|
kfree(vvfl);
|
|
}
|
|
@@ -567,6 +579,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
|
|
struct i40e_virtchnl_vlan_filter_list *vvfl;
|
|
struct i40evf_vlan_filter *f, *ftmp;
|
|
int len, i = 0, count = 0;
|
|
+ bool more = false;
|
|
|
|
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
|
|
/* bail because we already have a command pending */
|
|
@@ -592,7 +605,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
|
|
count = (I40EVF_MAX_AQ_BUF_SIZE -
|
|
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
|
|
sizeof(u16);
|
|
- len = I40EVF_MAX_AQ_BUF_SIZE;
|
|
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
|
|
+ (count * sizeof(u16));
|
|
+ more = true;
|
|
}
|
|
vvfl = kzalloc(len, GFP_ATOMIC);
|
|
if (!vvfl)
|
|
@@ -608,7 +623,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
|
|
kfree(f);
|
|
}
|
|
}
|
|
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
|
|
+ if (!more)
|
|
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
|
|
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
|
|
kfree(vvfl);
|
|
}
|
|
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
|
|
index 7a73510e547c..97bf0c3d5c69 100644
|
|
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
|
|
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
|
|
@@ -294,6 +294,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
|
|
case I210_I_PHY_ID:
|
|
phy->type = e1000_phy_i210;
|
|
phy->ops.check_polarity = igb_check_polarity_m88;
|
|
+ phy->ops.get_cfg_done = igb_get_cfg_done_i210;
|
|
phy->ops.get_phy_info = igb_get_phy_info_m88;
|
|
phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
|
|
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
|
|
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
|
|
index 65d931669f81..29f59c76878a 100644
|
|
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
|
|
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
|
|
@@ -900,3 +900,30 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
|
|
wr32(E1000_MDICNFG, mdicnfg);
|
|
return ret_val;
|
|
}
|
|
+
|
|
+/**
|
|
+ * igb_get_cfg_done_i210 - Read config done bit
|
|
+ * @hw: pointer to the HW structure
|
|
+ *
|
|
+ * Read the management control register for the config done bit for
|
|
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
|
|
+ * to read the config done bit, so an error is *ONLY* logged and returns
|
|
+ * 0. If we were to return with error, EEPROM-less silicon
|
|
+ * would not be able to be reset or change link.
|
|
+ **/
|
|
+s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
|
|
+{
|
|
+ s32 timeout = PHY_CFG_TIMEOUT;
|
|
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
|
|
+
|
|
+ while (timeout) {
|
|
+ if (rd32(E1000_EEMNGCTL_I210) & mask)
|
|
+ break;
|
|
+ usleep_range(1000, 2000);
|
|
+ timeout--;
|
|
+ }
|
|
+ if (!timeout)
|
|
+ hw_dbg("MNG configuration cycle has not completed.\n");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
|
|
index 3442b6357d01..eaa68a50cb3b 100644
|
|
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
|
|
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
|
|
@@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
|
|
s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
|
|
bool igb_get_flash_presence_i210(struct e1000_hw *hw);
|
|
s32 igb_pll_workaround_i210(struct e1000_hw *hw);
|
|
+s32 igb_get_cfg_done_i210(struct e1000_hw *hw);
|
|
|
|
#define E1000_STM_OPCODE 0xDB00
|
|
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
|
|
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
|
|
index 4af2870e49f8..0fdcd4d1b982 100644
|
|
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
|
|
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
|
|
@@ -66,6 +66,7 @@
|
|
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
|
|
#define E1000_PBS 0x01008 /* Packet Buffer Size */
|
|
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
|
|
+#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */
|
|
#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
|
|
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
|
|
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
|
|
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
|
|
index 1a2f1cc44b28..e3cb93bdb21a 100644
|
|
--- a/drivers/net/ethernet/intel/igb/igb.h
|
|
+++ b/drivers/net/ethernet/intel/igb/igb.h
|
|
@@ -389,6 +389,8 @@ struct igb_adapter {
|
|
u16 link_speed;
|
|
u16 link_duplex;
|
|
|
|
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
|
|
+
|
|
struct work_struct reset_task;
|
|
struct work_struct watchdog_task;
|
|
bool fc_autoneg;
|
|
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
index ea7b09887245..fa3b4cbea23b 100644
|
|
--- a/drivers/net/ethernet/intel/igb/igb_main.c
|
|
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
@@ -2294,9 +2294,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
|
|
|
|
err = -EIO;
|
|
- hw->hw_addr = pci_iomap(pdev, 0, 0);
|
|
- if (!hw->hw_addr)
|
|
+ adapter->io_addr = pci_iomap(pdev, 0, 0);
|
|
+ if (!adapter->io_addr)
|
|
goto err_ioremap;
|
|
+ /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
|
|
+ hw->hw_addr = adapter->io_addr;
|
|
|
|
netdev->netdev_ops = &igb_netdev_ops;
|
|
igb_set_ethtool_ops(netdev);
|
|
@@ -2656,7 +2658,7 @@ err_sw_init:
|
|
#ifdef CONFIG_PCI_IOV
|
|
igb_disable_sriov(pdev);
|
|
#endif
|
|
- pci_iounmap(pdev, hw->hw_addr);
|
|
+ pci_iounmap(pdev, adapter->io_addr);
|
|
err_ioremap:
|
|
free_netdev(netdev);
|
|
err_alloc_etherdev:
|
|
@@ -2823,7 +2825,7 @@ static void igb_remove(struct pci_dev *pdev)
|
|
|
|
igb_clear_interrupt_scheme(adapter);
|
|
|
|
- pci_iounmap(pdev, hw->hw_addr);
|
|
+ pci_iounmap(pdev, adapter->io_addr);
|
|
if (hw->flash_address)
|
|
iounmap(hw->flash_address);
|
|
pci_release_selected_regions(pdev,
|
|
@@ -2856,6 +2858,13 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
|
|
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
|
|
return;
|
|
|
|
+ /* Of the below we really only want the effect of getting
|
|
+ * IGB_FLAG_HAS_MSIX set (if available), without which
|
|
+ * igb_enable_sriov() has no effect.
|
|
+ */
|
|
+ igb_set_interrupt_capability(adapter, true);
|
|
+ igb_reset_interrupt_capability(adapter);
|
|
+
|
|
pci_sriov_set_totalvfs(pdev, 7);
|
|
igb_enable_sriov(pdev, max_vfs);
|
|
|
|
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
|
|
index aed8d029b23d..cd9b284bc83b 100644
|
|
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
|
|
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
|
|
@@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
|
|
ixgbe_for_each_ring(ring, q_vector->tx)
|
|
clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
|
|
|
|
- if (!ixgbe_qv_lock_napi(q_vector))
|
|
+ /* Exit if we are called by netpoll or busy polling is active */
|
|
+ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
|
|
return budget;
|
|
|
|
/* attempt to distribute budget to each queue fairly, but don't allow
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
|
|
index 2e022e900939..7cc9df717323 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
|
|
@@ -399,6 +399,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
|
|
{
|
|
struct mlx5e_priv *priv = netdev_priv(netdev);
|
|
|
|
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
|
|
+ return -ENOTSUPP;
|
|
+
|
|
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
|
|
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
|
|
coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
|
|
@@ -416,11 +419,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
|
|
int tc;
|
|
int i;
|
|
|
|
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
|
|
+ return -ENOTSUPP;
|
|
+
|
|
+ mutex_lock(&priv->state_lock);
|
|
priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
|
|
priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
|
|
priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
|
|
priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
|
|
|
|
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
|
+ goto out;
|
|
+
|
|
for (i = 0; i < priv->params.num_channels; ++i) {
|
|
c = priv->channel[i];
|
|
|
|
@@ -436,6 +446,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
|
|
coal->rx_max_coalesced_frames);
|
|
}
|
|
|
|
+out:
|
|
+ mutex_unlock(&priv->state_lock);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
index cbd17e25beeb..90e876ecc720 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
@@ -863,12 +863,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
|
|
if (err)
|
|
goto err_destroy_cq;
|
|
|
|
- err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
|
|
- moderation_usecs,
|
|
- moderation_frames);
|
|
- if (err)
|
|
- goto err_destroy_cq;
|
|
-
|
|
+ if (MLX5_CAP_GEN(mdev, cq_moderation))
|
|
+ mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
|
|
+ moderation_usecs,
|
|
+ moderation_frames);
|
|
return 0;
|
|
|
|
err_destroy_cq:
|
|
@@ -1963,6 +1961,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
|
}
|
|
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
|
|
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
|
|
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
|
|
+ mlx5_core_warn(mdev, "CQ modiration is not supported\n");
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
|
|
index 289a5df0d44a..c851bc53831c 100644
|
|
--- a/drivers/nvme/host/pci.c
|
|
+++ b/drivers/nvme/host/pci.c
|
|
@@ -2725,7 +2725,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
|
return 0;
|
|
|
|
disable:
|
|
- pci_release_regions(pdev);
|
|
+ pci_disable_device(pdev);
|
|
|
|
return result;
|
|
}
|
|
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
|
|
index f9dfc8b6407a..7225ac6b3df5 100644
|
|
--- a/drivers/pwm/pwm-fsl-ftm.c
|
|
+++ b/drivers/pwm/pwm-fsl-ftm.c
|
|
@@ -80,7 +80,6 @@ struct fsl_pwm_chip {
|
|
|
|
struct mutex lock;
|
|
|
|
- unsigned int use_count;
|
|
unsigned int cnt_select;
|
|
unsigned int clk_ps;
|
|
|
|
@@ -300,9 +299,6 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
|
|
{
|
|
int ret;
|
|
|
|
- if (fpc->use_count++ != 0)
|
|
- return 0;
|
|
-
|
|
/* select counter clock source */
|
|
regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK,
|
|
FTM_SC_CLK(fpc->cnt_select));
|
|
@@ -334,25 +330,6 @@ static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
|
|
return ret;
|
|
}
|
|
|
|
-static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
|
|
-{
|
|
- /*
|
|
- * already disabled, do nothing
|
|
- */
|
|
- if (fpc->use_count == 0)
|
|
- return;
|
|
-
|
|
- /* there are still users, so can't disable yet */
|
|
- if (--fpc->use_count > 0)
|
|
- return;
|
|
-
|
|
- /* no users left, disable PWM counter clock */
|
|
- regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK, 0);
|
|
-
|
|
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
|
|
- clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
|
|
-}
|
|
-
|
|
static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
|
|
{
|
|
struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
|
|
@@ -362,7 +339,8 @@ static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
|
|
regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm),
|
|
BIT(pwm->hwpwm));
|
|
|
|
- fsl_counter_clock_disable(fpc);
|
|
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
|
|
+ clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
|
|
|
|
regmap_read(fpc->regmap, FTM_OUTMASK, &val);
|
|
if ((val & 0xFF) == 0xFF)
|
|
@@ -492,17 +470,24 @@ static int fsl_pwm_remove(struct platform_device *pdev)
|
|
static int fsl_pwm_suspend(struct device *dev)
|
|
{
|
|
struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
|
|
- u32 val;
|
|
+ int i;
|
|
|
|
regcache_cache_only(fpc->regmap, true);
|
|
regcache_mark_dirty(fpc->regmap);
|
|
|
|
- /* read from cache */
|
|
- regmap_read(fpc->regmap, FTM_OUTMASK, &val);
|
|
- if ((val & 0xFF) != 0xFF) {
|
|
+ for (i = 0; i < fpc->chip.npwm; i++) {
|
|
+ struct pwm_device *pwm = &fpc->chip.pwms[i];
|
|
+
|
|
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
|
|
+ continue;
|
|
+
|
|
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
|
|
+
|
|
+ if (!pwm_is_enabled(pwm))
|
|
+ continue;
|
|
+
|
|
clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
|
|
clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
|
|
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
|
|
}
|
|
|
|
return 0;
|
|
@@ -511,12 +496,19 @@ static int fsl_pwm_suspend(struct device *dev)
|
|
static int fsl_pwm_resume(struct device *dev)
|
|
{
|
|
struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
|
|
- u32 val;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < fpc->chip.npwm; i++) {
|
|
+ struct pwm_device *pwm = &fpc->chip.pwms[i];
|
|
+
|
|
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
|
|
+ continue;
|
|
|
|
- /* read from cache */
|
|
- regmap_read(fpc->regmap, FTM_OUTMASK, &val);
|
|
- if ((val & 0xFF) != 0xFF) {
|
|
clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
|
|
+
|
|
+ if (!pwm_is_enabled(pwm))
|
|
+ continue;
|
|
+
|
|
clk_prepare_enable(fpc->clk[fpc->cnt_select]);
|
|
clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
|
|
}
|
|
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
|
|
index 9fde60ce8e7b..6e203a65effb 100644
|
|
--- a/drivers/pwm/pwm-lpc32xx.c
|
|
+++ b/drivers/pwm/pwm-lpc32xx.c
|
|
@@ -24,9 +24,7 @@ struct lpc32xx_pwm_chip {
|
|
void __iomem *base;
|
|
};
|
|
|
|
-#define PWM_ENABLE (1 << 31)
|
|
-#define PWM_RELOADV(x) (((x) & 0xFF) << 8)
|
|
-#define PWM_DUTY(x) ((x) & 0xFF)
|
|
+#define PWM_ENABLE BIT(31)
|
|
|
|
#define to_lpc32xx_pwm_chip(_chip) \
|
|
container_of(_chip, struct lpc32xx_pwm_chip, chip)
|
|
@@ -38,40 +36,27 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
|
|
unsigned long long c;
|
|
int period_cycles, duty_cycles;
|
|
u32 val;
|
|
-
|
|
- c = clk_get_rate(lpc32xx->clk) / 256;
|
|
- c = c * period_ns;
|
|
- do_div(c, NSEC_PER_SEC);
|
|
-
|
|
- /* Handle high and low extremes */
|
|
- if (c == 0)
|
|
- c = 1;
|
|
- if (c > 255)
|
|
- c = 0; /* 0 set division by 256 */
|
|
- period_cycles = c;
|
|
-
|
|
- /* The duty-cycle value is as follows:
|
|
- *
|
|
- * DUTY-CYCLE HIGH LEVEL
|
|
- * 1 99.9%
|
|
- * 25 90.0%
|
|
- * 128 50.0%
|
|
- * 220 10.0%
|
|
- * 255 0.1%
|
|
- * 0 0.0%
|
|
- *
|
|
- * In other words, the register value is duty-cycle % 256 with
|
|
- * duty-cycle in the range 1-256.
|
|
- */
|
|
- c = 256 * duty_ns;
|
|
- do_div(c, period_ns);
|
|
- if (c > 255)
|
|
- c = 255;
|
|
- duty_cycles = 256 - c;
|
|
+ c = clk_get_rate(lpc32xx->clk);
|
|
+
|
|
+ /* The highest acceptable divisor is 256, which is represented by 0 */
|
|
+ period_cycles = div64_u64(c * period_ns,
|
|
+ (unsigned long long)NSEC_PER_SEC * 256);
|
|
+ if (!period_cycles)
|
|
+ period_cycles = 1;
|
|
+ if (period_cycles > 255)
|
|
+ period_cycles = 0;
|
|
+
|
|
+ /* Compute 256 x #duty/period value and care for corner cases */
|
|
+ duty_cycles = div64_u64((unsigned long long)(period_ns - duty_ns) * 256,
|
|
+ period_ns);
|
|
+ if (!duty_cycles)
|
|
+ duty_cycles = 1;
|
|
+ if (duty_cycles > 255)
|
|
+ duty_cycles = 255;
|
|
|
|
val = readl(lpc32xx->base + (pwm->hwpwm << 2));
|
|
val &= ~0xFFFF;
|
|
- val |= PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles);
|
|
+ val |= (period_cycles << 8) | duty_cycles;
|
|
writel(val, lpc32xx->base + (pwm->hwpwm << 2));
|
|
|
|
return 0;
|
|
@@ -134,7 +119,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
|
|
|
|
lpc32xx->chip.dev = &pdev->dev;
|
|
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
|
|
- lpc32xx->chip.npwm = 2;
|
|
+ lpc32xx->chip.npwm = 1;
|
|
lpc32xx->chip.base = -1;
|
|
|
|
ret = pwmchip_add(&lpc32xx->chip);
|
|
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
|
|
index 63cd5e68c864..3a6d0290c54c 100644
|
|
--- a/drivers/regulator/anatop-regulator.c
|
|
+++ b/drivers/regulator/anatop-regulator.c
|
|
@@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
|
|
if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
|
|
sreg->sel = 22;
|
|
|
|
- if (!sreg->sel) {
|
|
+ if (!sreg->bypass && !sreg->sel) {
|
|
dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
|
|
index 648cb86afd42..ea607a4a1bdd 100644
|
|
--- a/drivers/s390/char/sclp_ctl.c
|
|
+++ b/drivers/s390/char/sclp_ctl.c
|
|
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
|
|
{
|
|
struct sclp_ctl_sccb ctl_sccb;
|
|
struct sccb_header *sccb;
|
|
+ unsigned long copied;
|
|
int rc;
|
|
|
|
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
|
|
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
|
|
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
|
if (!sccb)
|
|
return -ENOMEM;
|
|
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
|
|
+ copied = PAGE_SIZE -
|
|
+ copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
|
|
+ if (offsetof(struct sccb_header, length) +
|
|
+ sizeof(sccb->length) > copied || sccb->length > copied) {
|
|
rc = -EFAULT;
|
|
goto out_free;
|
|
}
|
|
- if (sccb->length > PAGE_SIZE || sccb->length < 8)
|
|
- return -EINVAL;
|
|
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
|
|
- rc = -EFAULT;
|
|
+ if (sccb->length < 8) {
|
|
+ rc = -EINVAL;
|
|
goto out_free;
|
|
}
|
|
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
|
|
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
|
|
index c692dfebd0ba..50597f9522fe 100644
|
|
--- a/drivers/s390/cio/chp.c
|
|
+++ b/drivers/s390/cio/chp.c
|
|
@@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
|
|
|
|
device = container_of(kobj, struct device, kobj);
|
|
chp = to_channelpath(device);
|
|
- if (!chp->cmg_chars)
|
|
+ if (chp->cmg == -1)
|
|
return 0;
|
|
|
|
- return memory_read_from_buffer(buf, count, &off,
|
|
- chp->cmg_chars, sizeof(struct cmg_chars));
|
|
+ return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
|
|
+ sizeof(chp->cmg_chars));
|
|
}
|
|
|
|
static struct bin_attribute chp_measurement_chars_attr = {
|
|
@@ -416,7 +416,8 @@ static void chp_release(struct device *dev)
|
|
* chp_update_desc - update channel-path description
|
|
* @chp - channel-path
|
|
*
|
|
- * Update the channel-path description of the specified channel-path.
|
|
+ * Update the channel-path description of the specified channel-path
|
|
+ * including channel measurement related information.
|
|
* Return zero on success, non-zero otherwise.
|
|
*/
|
|
int chp_update_desc(struct channel_path *chp)
|
|
@@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path *chp)
|
|
return rc;
|
|
|
|
rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
|
|
+ if (rc)
|
|
+ return rc;
|
|
|
|
- return rc;
|
|
+ return chsc_get_channel_measurement_chars(chp);
|
|
}
|
|
|
|
/**
|
|
@@ -466,14 +469,6 @@ int chp_new(struct chp_id chpid)
|
|
ret = -ENODEV;
|
|
goto out_free;
|
|
}
|
|
- /* Get channel-measurement characteristics. */
|
|
- if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
|
|
- ret = chsc_get_channel_measurement_chars(chp);
|
|
- if (ret)
|
|
- goto out_free;
|
|
- } else {
|
|
- chp->cmg = -1;
|
|
- }
|
|
dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
|
|
|
|
/* make it known to the system */
|
|
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
|
|
index 4efd5b867cc3..af0232290dc4 100644
|
|
--- a/drivers/s390/cio/chp.h
|
|
+++ b/drivers/s390/cio/chp.h
|
|
@@ -48,7 +48,7 @@ struct channel_path {
|
|
/* Channel-measurement related stuff: */
|
|
int cmg;
|
|
int shared;
|
|
- void *cmg_chars;
|
|
+ struct cmg_chars cmg_chars;
|
|
};
|
|
|
|
/* Return channel_path struct for given chpid. */
|
|
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
|
|
index a831d18596a5..c424c0c7367e 100644
|
|
--- a/drivers/s390/cio/chsc.c
|
|
+++ b/drivers/s390/cio/chsc.c
|
|
@@ -14,6 +14,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/init.h>
|
|
#include <linux/device.h>
|
|
+#include <linux/mutex.h>
|
|
#include <linux/pci.h>
|
|
|
|
#include <asm/cio.h>
|
|
@@ -224,8 +225,9 @@ out_unreg:
|
|
|
|
void chsc_chp_offline(struct chp_id chpid)
|
|
{
|
|
- char dbf_txt[15];
|
|
+ struct channel_path *chp = chpid_to_chp(chpid);
|
|
struct chp_link link;
|
|
+ char dbf_txt[15];
|
|
|
|
sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
|
|
CIO_TRACE_EVENT(2, dbf_txt);
|
|
@@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpid)
|
|
link.chpid = chpid;
|
|
/* Wait until previous actions have settled. */
|
|
css_wait_for_slow_path();
|
|
+
|
|
+ mutex_lock(&chp->lock);
|
|
+ chp_update_desc(chp);
|
|
+ mutex_unlock(&chp->lock);
|
|
+
|
|
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
|
|
}
|
|
|
|
@@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
|
|
|
|
void chsc_chp_online(struct chp_id chpid)
|
|
{
|
|
- char dbf_txt[15];
|
|
+ struct channel_path *chp = chpid_to_chp(chpid);
|
|
struct chp_link link;
|
|
+ char dbf_txt[15];
|
|
|
|
sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
|
|
CIO_TRACE_EVENT(2, dbf_txt);
|
|
@@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid)
|
|
link.chpid = chpid;
|
|
/* Wait until previous actions have settled. */
|
|
css_wait_for_slow_path();
|
|
+
|
|
+ mutex_lock(&chp->lock);
|
|
+ chp_update_desc(chp);
|
|
+ mutex_unlock(&chp->lock);
|
|
+
|
|
for_each_subchannel_staged(__s390_process_res_acc, NULL,
|
|
&link);
|
|
css_schedule_reprobe();
|
|
@@ -967,22 +980,19 @@ static void
|
|
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
|
|
struct cmg_chars *chars)
|
|
{
|
|
- struct cmg_chars *cmg_chars;
|
|
int i, mask;
|
|
|
|
- cmg_chars = chp->cmg_chars;
|
|
for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
|
|
mask = 0x80 >> (i + 3);
|
|
if (cmcv & mask)
|
|
- cmg_chars->values[i] = chars->values[i];
|
|
+ chp->cmg_chars.values[i] = chars->values[i];
|
|
else
|
|
- cmg_chars->values[i] = 0;
|
|
+ chp->cmg_chars.values[i] = 0;
|
|
}
|
|
}
|
|
|
|
int chsc_get_channel_measurement_chars(struct channel_path *chp)
|
|
{
|
|
- struct cmg_chars *cmg_chars;
|
|
int ccode, ret;
|
|
|
|
struct {
|
|
@@ -1006,10 +1016,11 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
|
|
u32 data[NR_MEASUREMENT_CHARS];
|
|
} __attribute__ ((packed)) *scmc_area;
|
|
|
|
- chp->cmg_chars = NULL;
|
|
- cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
|
|
- if (!cmg_chars)
|
|
- return -ENOMEM;
|
|
+ chp->shared = -1;
|
|
+ chp->cmg = -1;
|
|
+
|
|
+ if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
|
|
+ return 0;
|
|
|
|
spin_lock_irq(&chsc_page_lock);
|
|
memset(chsc_page, 0, PAGE_SIZE);
|
|
@@ -1031,25 +1042,19 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
|
|
scmc_area->response.code);
|
|
goto out;
|
|
}
|
|
- if (scmc_area->not_valid) {
|
|
- chp->cmg = -1;
|
|
- chp->shared = -1;
|
|
+ if (scmc_area->not_valid)
|
|
goto out;
|
|
- }
|
|
+
|
|
chp->cmg = scmc_area->cmg;
|
|
chp->shared = scmc_area->shared;
|
|
if (chp->cmg != 2 && chp->cmg != 3) {
|
|
/* No cmg-dependent data. */
|
|
goto out;
|
|
}
|
|
- chp->cmg_chars = cmg_chars;
|
|
chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
|
|
(struct cmg_chars *) &scmc_area->data);
|
|
out:
|
|
spin_unlock_irq(&chsc_page_lock);
|
|
- if (!chp->cmg_chars)
|
|
- kfree(cmg_chars);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
|
|
index 12b2cb7769f9..df036b872b05 100644
|
|
--- a/drivers/s390/net/qeth_l2_main.c
|
|
+++ b/drivers/s390/net/qeth_l2_main.c
|
|
@@ -1127,6 +1127,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
|
|
qeth_l2_request_initial_mac(card);
|
|
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
|
|
netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
|
|
+ netif_carrier_off(card->dev);
|
|
return register_netdev(card->dev);
|
|
}
|
|
|
|
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
|
|
index 50cec6b13d27..cc4d3c3d8cc5 100644
|
|
--- a/drivers/s390/net/qeth_l3_main.c
|
|
+++ b/drivers/s390/net/qeth_l3_main.c
|
|
@@ -3220,6 +3220,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
|
|
|
|
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
|
|
netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
|
|
+ netif_carrier_off(card->dev);
|
|
return register_netdev(card->dev);
|
|
}
|
|
|
|
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
|
|
index 333db5953607..41f9a00e4f74 100644
|
|
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
|
|
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
|
|
@@ -2664,7 +2664,7 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
|
|
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
|
|
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
|
|
miscellaneous data' timeout \n", acb->host->host_no);
|
|
- return false;
|
|
+ goto err_free_dma;
|
|
}
|
|
count = 8;
|
|
while (count){
|
|
@@ -2694,19 +2694,23 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
|
|
acb->firm_model,
|
|
acb->firm_version);
|
|
|
|
- acb->signature = readl(®->message_rwbuffer[1]);
|
|
+ acb->signature = readl(®->message_rwbuffer[0]);
|
|
/*firm_signature,1,00-03*/
|
|
- acb->firm_request_len = readl(®->message_rwbuffer[2]);
|
|
+ acb->firm_request_len = readl(®->message_rwbuffer[1]);
|
|
/*firm_request_len,1,04-07*/
|
|
- acb->firm_numbers_queue = readl(®->message_rwbuffer[3]);
|
|
+ acb->firm_numbers_queue = readl(®->message_rwbuffer[2]);
|
|
/*firm_numbers_queue,2,08-11*/
|
|
- acb->firm_sdram_size = readl(®->message_rwbuffer[4]);
|
|
+ acb->firm_sdram_size = readl(®->message_rwbuffer[3]);
|
|
/*firm_sdram_size,3,12-15*/
|
|
- acb->firm_hd_channels = readl(®->message_rwbuffer[5]);
|
|
+ acb->firm_hd_channels = readl(®->message_rwbuffer[4]);
|
|
/*firm_ide_channels,4,16-19*/
|
|
acb->firm_cfg_version = readl(®->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
|
|
/*firm_ide_channels,4,16-19*/
|
|
return true;
|
|
+err_free_dma:
|
|
+ dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
|
|
+ acb->dma_coherent2, acb->dma_coherent_handle2);
|
|
+ return false;
|
|
}
|
|
|
|
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
|
|
@@ -2880,15 +2884,15 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
|
|
iop_device_map++;
|
|
count--;
|
|
}
|
|
- acb->signature = readl(®->msgcode_rwbuffer[1]);
|
|
+ acb->signature = readl(®->msgcode_rwbuffer[0]);
|
|
/*firm_signature,1,00-03*/
|
|
- acb->firm_request_len = readl(®->msgcode_rwbuffer[2]);
|
|
+ acb->firm_request_len = readl(®->msgcode_rwbuffer[1]);
|
|
/*firm_request_len,1,04-07*/
|
|
- acb->firm_numbers_queue = readl(®->msgcode_rwbuffer[3]);
|
|
+ acb->firm_numbers_queue = readl(®->msgcode_rwbuffer[2]);
|
|
/*firm_numbers_queue,2,08-11*/
|
|
- acb->firm_sdram_size = readl(®->msgcode_rwbuffer[4]);
|
|
+ acb->firm_sdram_size = readl(®->msgcode_rwbuffer[3]);
|
|
/*firm_sdram_size,3,12-15*/
|
|
- acb->firm_hd_channels = readl(®->msgcode_rwbuffer[5]);
|
|
+ acb->firm_hd_channels = readl(®->msgcode_rwbuffer[4]);
|
|
/*firm_hd_channels,4,16-19*/
|
|
acb->firm_cfg_version = readl(®->msgcode_rwbuffer[25]);
|
|
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
|
|
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
|
|
index fa09d4be2b53..2b456ca69d5c 100644
|
|
--- a/drivers/scsi/constants.c
|
|
+++ b/drivers/scsi/constants.c
|
|
@@ -1181,8 +1181,9 @@ static const char * const snstext[] = {
|
|
|
|
/* Get sense key string or NULL if not available */
|
|
const char *
|
|
-scsi_sense_key_string(unsigned char key) {
|
|
- if (key <= 0xE)
|
|
+scsi_sense_key_string(unsigned char key)
|
|
+{
|
|
+ if (key < ARRAY_SIZE(snstext))
|
|
return snstext[key];
|
|
return NULL;
|
|
}
|
|
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
|
|
index c11cd193f896..5ada9268a450 100644
|
|
--- a/drivers/scsi/cxlflash/common.h
|
|
+++ b/drivers/scsi/cxlflash/common.h
|
|
@@ -165,6 +165,8 @@ struct afu {
|
|
struct sisl_host_map __iomem *host_map; /* MC host map */
|
|
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
|
|
|
|
+ struct kref mapcount;
|
|
+
|
|
ctx_hndl_t ctx_hndl; /* master's context handle */
|
|
u64 *hrrq_start;
|
|
u64 *hrrq_end;
|
|
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
|
|
index 1e5bf0ca81da..c86847c68448 100644
|
|
--- a/drivers/scsi/cxlflash/main.c
|
|
+++ b/drivers/scsi/cxlflash/main.c
|
|
@@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
|
|
atomic64_set(&afu->room, room);
|
|
if (room)
|
|
goto write_rrin;
|
|
- udelay(nretry);
|
|
+ udelay(1 << nretry);
|
|
} while (nretry++ < MC_ROOM_RETRY_CNT);
|
|
|
|
pr_err("%s: no cmd_room to send reset\n", __func__);
|
|
@@ -303,7 +303,7 @@ write_rrin:
|
|
if (rrin != 0x1)
|
|
break;
|
|
/* Double delay each time */
|
|
- udelay(2 << nretry);
|
|
+ udelay(1 << nretry);
|
|
} while (nretry++ < MC_ROOM_RETRY_CNT);
|
|
}
|
|
|
|
@@ -338,7 +338,7 @@ retry:
|
|
atomic64_set(&afu->room, room);
|
|
if (room)
|
|
goto write_ioarrin;
|
|
- udelay(nretry);
|
|
+ udelay(1 << nretry);
|
|
} while (nretry++ < MC_ROOM_RETRY_CNT);
|
|
|
|
dev_err(dev, "%s: no cmd_room to send 0x%X\n",
|
|
@@ -352,7 +352,7 @@ retry:
|
|
* afu->room.
|
|
*/
|
|
if (nretry++ < MC_ROOM_RETRY_CNT) {
|
|
- udelay(nretry);
|
|
+ udelay(1 << nretry);
|
|
goto retry;
|
|
}
|
|
|
|
@@ -368,6 +368,7 @@ out:
|
|
|
|
no_room:
|
|
afu->read_room = true;
|
|
+ kref_get(&cfg->afu->mapcount);
|
|
schedule_work(&cfg->work_q);
|
|
rc = SCSI_MLQUEUE_HOST_BUSY;
|
|
goto out;
|
|
@@ -473,6 +474,16 @@ out:
|
|
return rc;
|
|
}
|
|
|
|
+static void afu_unmap(struct kref *ref)
|
|
+{
|
|
+ struct afu *afu = container_of(ref, struct afu, mapcount);
|
|
+
|
|
+ if (likely(afu->afu_map)) {
|
|
+ cxl_psa_unmap((void __iomem *)afu->afu_map);
|
|
+ afu->afu_map = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
/**
|
|
* cxlflash_driver_info() - information handler for this host driver
|
|
* @host: SCSI host associated with device.
|
|
@@ -503,6 +514,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
|
|
ulong lock_flags;
|
|
short lflag = 0;
|
|
int rc = 0;
|
|
+ int kref_got = 0;
|
|
|
|
dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
|
|
"cdb=(%08X-%08X-%08X-%08X)\n",
|
|
@@ -547,6 +559,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
|
|
goto out;
|
|
}
|
|
|
|
+ kref_get(&cfg->afu->mapcount);
|
|
+ kref_got = 1;
|
|
+
|
|
cmd->rcb.ctx_id = afu->ctx_hndl;
|
|
cmd->rcb.port_sel = port_sel;
|
|
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
|
|
@@ -587,6 +602,8 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
|
|
}
|
|
|
|
out:
|
|
+ if (kref_got)
|
|
+ kref_put(&afu->mapcount, afu_unmap);
|
|
pr_devel("%s: returning rc=%d\n", __func__, rc);
|
|
return rc;
|
|
}
|
|
@@ -632,20 +649,36 @@ static void free_mem(struct cxlflash_cfg *cfg)
|
|
* @cfg: Internal structure associated with the host.
|
|
*
|
|
* Safe to call with AFU in a partially allocated/initialized state.
|
|
+ *
|
|
+ * Cleans up all state associated with the command queue, and unmaps
|
|
+ * the MMIO space.
|
|
+ *
|
|
+ * - complete() will take care of commands we initiated (they'll be checked
|
|
+ * in as part of the cleanup that occurs after the completion)
|
|
+ *
|
|
+ * - cmd_checkin() will take care of entries that we did not initiate and that
|
|
+ * have not (and will not) complete because they are sitting on a [now stale]
|
|
+ * hardware queue
|
|
*/
|
|
static void stop_afu(struct cxlflash_cfg *cfg)
|
|
{
|
|
int i;
|
|
struct afu *afu = cfg->afu;
|
|
+ struct afu_cmd *cmd;
|
|
|
|
if (likely(afu)) {
|
|
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
|
|
- complete(&afu->cmd[i].cevent);
|
|
+ for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
|
|
+ cmd = &afu->cmd[i];
|
|
+ complete(&cmd->cevent);
|
|
+ if (!atomic_read(&cmd->free))
|
|
+ cmd_checkin(cmd);
|
|
+ }
|
|
|
|
if (likely(afu->afu_map)) {
|
|
cxl_psa_unmap((void __iomem *)afu->afu_map);
|
|
afu->afu_map = NULL;
|
|
}
|
|
+ kref_put(&afu->mapcount, afu_unmap);
|
|
}
|
|
}
|
|
|
|
@@ -731,8 +764,8 @@ static void cxlflash_remove(struct pci_dev *pdev)
|
|
scsi_remove_host(cfg->host);
|
|
/* fall through */
|
|
case INIT_STATE_AFU:
|
|
- term_afu(cfg);
|
|
cancel_work_sync(&cfg->work_q);
|
|
+ term_afu(cfg);
|
|
case INIT_STATE_PCI:
|
|
pci_release_regions(cfg->dev);
|
|
pci_disable_device(pdev);
|
|
@@ -1108,7 +1141,7 @@ static const struct asyc_intr_info ainfo[] = {
|
|
{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
|
|
{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
|
|
{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
|
|
- {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
|
|
+ {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
|
|
{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
|
|
{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
|
|
{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
|
|
@@ -1316,6 +1349,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
|
|
__func__, port);
|
|
cfg->lr_state = LINK_RESET_REQUIRED;
|
|
cfg->lr_port = port;
|
|
+ kref_get(&cfg->afu->mapcount);
|
|
schedule_work(&cfg->work_q);
|
|
}
|
|
|
|
@@ -1336,6 +1370,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
|
|
|
|
if (info->action & SCAN_HOST) {
|
|
atomic_inc(&cfg->scan_host_needed);
|
|
+ kref_get(&cfg->afu->mapcount);
|
|
schedule_work(&cfg->work_q);
|
|
}
|
|
}
|
|
@@ -1731,6 +1766,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
|
|
rc = -ENOMEM;
|
|
goto err1;
|
|
}
|
|
+ kref_init(&afu->mapcount);
|
|
|
|
/* No byte reverse on reading afu_version or string will be backwards */
|
|
reg = readq(&afu->afu_map->global.regs.afu_version);
|
|
@@ -1765,8 +1801,7 @@ out:
|
|
return rc;
|
|
|
|
err2:
|
|
- cxl_psa_unmap((void __iomem *)afu->afu_map);
|
|
- afu->afu_map = NULL;
|
|
+ kref_put(&afu->mapcount, afu_unmap);
|
|
err1:
|
|
term_mc(cfg, UNDO_START);
|
|
goto out;
|
|
@@ -2114,6 +2149,16 @@ static ssize_t lun_mode_store(struct device *dev,
|
|
rc = kstrtouint(buf, 10, &lun_mode);
|
|
if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
|
|
afu->internal_lun = lun_mode;
|
|
+
|
|
+ /*
|
|
+ * When configured for internal LUN, there is only one channel,
|
|
+ * channel number 0, else there will be 2 (default).
|
|
+ */
|
|
+ if (afu->internal_lun)
|
|
+ shost->max_channel = 0;
|
|
+ else
|
|
+ shost->max_channel = NUM_FC_PORTS - 1;
|
|
+
|
|
afu_reset(cfg);
|
|
scsi_scan_host(cfg->host);
|
|
}
|
|
@@ -2274,6 +2319,7 @@ static struct scsi_host_template driver_template = {
|
|
* Device dependent values
|
|
*/
|
|
static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
|
|
+static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS };
|
|
|
|
/*
|
|
* PCI device binding table
|
|
@@ -2281,6 +2327,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
|
|
static struct pci_device_id cxlflash_pci_table[] = {
|
|
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
|
|
+ {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
|
|
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
|
|
{}
|
|
};
|
|
|
|
@@ -2339,6 +2387,7 @@ static void cxlflash_worker_thread(struct work_struct *work)
|
|
|
|
if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
|
|
scsi_scan_host(cfg->host);
|
|
+ kref_put(&afu->mapcount, afu_unmap);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
|
|
index 60324566c14f..3d2d606fafb3 100644
|
|
--- a/drivers/scsi/cxlflash/main.h
|
|
+++ b/drivers/scsi/cxlflash/main.h
|
|
@@ -24,8 +24,8 @@
|
|
#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
|
|
#define CXLFLASH_DRIVER_DATE "(August 13, 2015)"
|
|
|
|
-#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
|
|
-#define CXLFLASH_SUBS_DEV_ID 0x04F0
|
|
+#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
|
|
+#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
|
|
|
|
/* Since there is only one target, make it 0 */
|
|
#define CXLFLASH_TARGET 0
|
|
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
|
|
index cac2e6a50efd..babe7ccc1777 100644
|
|
--- a/drivers/scsi/cxlflash/superpipe.c
|
|
+++ b/drivers/scsi/cxlflash/superpipe.c
|
|
@@ -1380,7 +1380,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
|
|
}
|
|
|
|
ctxid = cxl_process_element(ctx);
|
|
- if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
|
|
+ if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
|
|
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
|
|
rc = -EPERM;
|
|
goto err2;
|
|
@@ -1508,7 +1508,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
|
|
}
|
|
|
|
ctxid = cxl_process_element(ctx);
|
|
- if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
|
|
+ if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
|
|
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
|
|
rc = -EPERM;
|
|
goto err1;
|
|
@@ -1590,6 +1590,13 @@ err1:
|
|
* place at the same time and the failure was due to CXL services being
|
|
* unable to keep up.
|
|
*
|
|
+ * As this routine is called on ioctl context, it holds the ioctl r/w
|
|
+ * semaphore that is used to drain ioctls in recovery scenarios. The
|
|
+ * implementation to achieve the pacing described above (a local mutex)
|
|
+ * requires that the ioctl r/w semaphore be dropped and reacquired to
|
|
+ * avoid a 3-way deadlock when multiple process recoveries operate in
|
|
+ * parallel.
|
|
+ *
|
|
* Because a user can detect an error condition before the kernel, it is
|
|
* quite possible for this routine to act as the kernel's EEH detection
|
|
* source (MMIO read of mbox_r). Because of this, there is a window of
|
|
@@ -1617,9 +1624,17 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
|
|
int rc = 0;
|
|
|
|
atomic_inc(&cfg->recovery_threads);
|
|
+ up_read(&cfg->ioctl_rwsem);
|
|
rc = mutex_lock_interruptible(mutex);
|
|
+ down_read(&cfg->ioctl_rwsem);
|
|
if (rc)
|
|
goto out;
|
|
+ rc = check_state(cfg);
|
|
+ if (rc) {
|
|
+ dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
|
|
+ rc = -ENODEV;
|
|
+ goto out;
|
|
+ }
|
|
|
|
dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
|
|
__func__, recover->reason, rctxid);
|
|
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
|
|
index a53f583e2d7b..50f8e9300770 100644
|
|
--- a/drivers/scsi/cxlflash/vlun.c
|
|
+++ b/drivers/scsi/cxlflash/vlun.c
|
|
@@ -1008,6 +1008,8 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
|
|
virt->last_lba = last_lba;
|
|
virt->rsrc_handle = rsrc_handle;
|
|
|
|
+ if (lli->port_sel == BOTH_PORTS)
|
|
+ virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
|
|
out:
|
|
if (likely(ctxi))
|
|
put_context(ctxi);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
|
|
index b0e6fe46448d..80d3c740a8a8 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_crtn.h
|
|
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
|
|
@@ -72,6 +72,7 @@ void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
|
|
void lpfc_retry_pport_discovery(struct lpfc_hba *);
|
|
void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
|
|
|
|
+void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
|
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
|
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
|
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
|
|
index b6fa257ea3e0..59ced8864b2f 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_els.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_els.c
|
|
@@ -455,9 +455,9 @@ int
|
|
lpfc_issue_reg_vfi(struct lpfc_vport *vport)
|
|
{
|
|
struct lpfc_hba *phba = vport->phba;
|
|
- LPFC_MBOXQ_t *mboxq;
|
|
+ LPFC_MBOXQ_t *mboxq = NULL;
|
|
struct lpfc_nodelist *ndlp;
|
|
- struct lpfc_dmabuf *dmabuf;
|
|
+ struct lpfc_dmabuf *dmabuf = NULL;
|
|
int rc = 0;
|
|
|
|
/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
|
|
@@ -471,25 +471,33 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
|
|
}
|
|
}
|
|
|
|
- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
|
- if (!dmabuf) {
|
|
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
+ if (!mboxq) {
|
|
rc = -ENOMEM;
|
|
goto fail;
|
|
}
|
|
- dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
|
|
- if (!dmabuf->virt) {
|
|
- rc = -ENOMEM;
|
|
- goto fail_free_dmabuf;
|
|
- }
|
|
|
|
- mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
- if (!mboxq) {
|
|
- rc = -ENOMEM;
|
|
- goto fail_free_coherent;
|
|
+ /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
|
|
+ if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
|
|
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
|
+ if (!dmabuf) {
|
|
+ rc = -ENOMEM;
|
|
+ goto fail;
|
|
+ }
|
|
+ dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
|
|
+ if (!dmabuf->virt) {
|
|
+ rc = -ENOMEM;
|
|
+ goto fail;
|
|
+ }
|
|
+ memcpy(dmabuf->virt, &phba->fc_fabparam,
|
|
+ sizeof(struct serv_parm));
|
|
}
|
|
+
|
|
vport->port_state = LPFC_FABRIC_CFG_LINK;
|
|
- memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
|
|
- lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
|
|
+ if (dmabuf)
|
|
+ lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
|
|
+ else
|
|
+ lpfc_reg_vfi(mboxq, vport, 0);
|
|
|
|
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
|
|
mboxq->vport = vport;
|
|
@@ -497,17 +505,19 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
|
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
|
|
if (rc == MBX_NOT_FINISHED) {
|
|
rc = -ENXIO;
|
|
- goto fail_free_mbox;
|
|
+ goto fail;
|
|
}
|
|
return 0;
|
|
|
|
-fail_free_mbox:
|
|
- mempool_free(mboxq, phba->mbox_mem_pool);
|
|
-fail_free_coherent:
|
|
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
|
|
-fail_free_dmabuf:
|
|
- kfree(dmabuf);
|
|
fail:
|
|
+ if (mboxq)
|
|
+ mempool_free(mboxq, phba->mbox_mem_pool);
|
|
+ if (dmabuf) {
|
|
+ if (dmabuf->virt)
|
|
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
|
|
+ kfree(dmabuf);
|
|
+ }
|
|
+
|
|
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
|
|
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
|
"0289 Issue Register VFI failed: Err %d\n", rc);
|
|
@@ -711,9 +721,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
* For FC we need to do some special processing because of the SLI
|
|
* Port's default settings of the Common Service Parameters.
|
|
*/
|
|
- if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
|
|
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
|
|
+ (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
|
|
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
|
|
- if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
|
|
+ if (fabric_param_changed)
|
|
lpfc_unregister_fcf_prep(phba);
|
|
|
|
/* This should just update the VFI CSPs*/
|
|
@@ -824,13 +835,21 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
|
|
spin_lock_irq(shost->host_lock);
|
|
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
|
|
+ vport->fc_flag |= FC_PT2PT;
|
|
spin_unlock_irq(shost->host_lock);
|
|
|
|
- phba->fc_edtov = FF_DEF_EDTOV;
|
|
- phba->fc_ratov = FF_DEF_RATOV;
|
|
+ /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
|
|
+ if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
|
|
+ lpfc_unregister_fcf_prep(phba);
|
|
+
|
|
+ spin_lock_irq(shost->host_lock);
|
|
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
|
|
+ spin_unlock_irq(shost->host_lock);
|
|
+ phba->fc_topology_changed = 0;
|
|
+ }
|
|
+
|
|
rc = memcmp(&vport->fc_portname, &sp->portName,
|
|
sizeof(vport->fc_portname));
|
|
- memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
|
|
|
|
if (rc >= 0) {
|
|
/* This side will initiate the PLOGI */
|
|
@@ -839,38 +858,14 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
spin_unlock_irq(shost->host_lock);
|
|
|
|
/*
|
|
- * N_Port ID cannot be 0, set our to LocalID the other
|
|
- * side will be RemoteID.
|
|
+ * N_Port ID cannot be 0, set our Id to LocalID
|
|
+ * the other side will be RemoteID.
|
|
*/
|
|
|
|
/* not equal */
|
|
if (rc)
|
|
vport->fc_myDID = PT2PT_LocalID;
|
|
|
|
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
- if (!mbox)
|
|
- goto fail;
|
|
-
|
|
- lpfc_config_link(phba, mbox);
|
|
-
|
|
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
- mbox->vport = vport;
|
|
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
|
- if (rc == MBX_NOT_FINISHED) {
|
|
- mempool_free(mbox, phba->mbox_mem_pool);
|
|
- goto fail;
|
|
- }
|
|
-
|
|
- /*
|
|
- * For SLI4, the VFI/VPI are registered AFTER the
|
|
- * Nport with the higher WWPN sends the PLOGI with
|
|
- * an assigned NPortId.
|
|
- */
|
|
-
|
|
- /* not equal */
|
|
- if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
|
|
- lpfc_issue_reg_vfi(vport);
|
|
-
|
|
/* Decrement ndlp reference count indicating that ndlp can be
|
|
* safely released when other references to it are done.
|
|
*/
|
|
@@ -912,29 +907,20 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
/* If we are pt2pt with another NPort, force NPIV off! */
|
|
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
|
|
|
|
- spin_lock_irq(shost->host_lock);
|
|
- vport->fc_flag |= FC_PT2PT;
|
|
- spin_unlock_irq(shost->host_lock);
|
|
- /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
|
|
- if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
|
|
- lpfc_unregister_fcf_prep(phba);
|
|
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
+ if (!mbox)
|
|
+ goto fail;
|
|
|
|
- /* The FC_VFI_REGISTERED flag will get clear in the cmpl
|
|
- * handler for unreg_vfi, but if we don't force the
|
|
- * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
|
|
- * built with the update bit set instead of just the vp bit to
|
|
- * change the Nport ID. We need to have the vp set and the
|
|
- * Upd cleared on topology changes.
|
|
- */
|
|
- spin_lock_irq(shost->host_lock);
|
|
- vport->fc_flag &= ~FC_VFI_REGISTERED;
|
|
- spin_unlock_irq(shost->host_lock);
|
|
- phba->fc_topology_changed = 0;
|
|
- lpfc_issue_reg_vfi(vport);
|
|
+ lpfc_config_link(phba, mbox);
|
|
+
|
|
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
|
|
+ mbox->vport = vport;
|
|
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
|
+ if (rc == MBX_NOT_FINISHED) {
|
|
+ mempool_free(mbox, phba->mbox_mem_pool);
|
|
+ goto fail;
|
|
}
|
|
|
|
- /* Start discovery - this should just do CLEAR_LA */
|
|
- lpfc_disc_start(vport);
|
|
return 0;
|
|
fail:
|
|
return -ENXIO;
|
|
@@ -1157,6 +1143,7 @@ flogifail:
|
|
spin_lock_irq(&phba->hbalock);
|
|
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
|
|
spin_unlock_irq(&phba->hbalock);
|
|
+
|
|
lpfc_nlp_put(ndlp);
|
|
|
|
if (!lpfc_error_lost_link(irsp)) {
|
|
@@ -3792,14 +3779,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
lpfc_nlp_set_state(vport, ndlp,
|
|
NLP_STE_REG_LOGIN_ISSUE);
|
|
}
|
|
+
|
|
+ ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
|
|
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
|
|
!= MBX_NOT_FINISHED)
|
|
goto out;
|
|
- else
|
|
- /* Decrement the ndlp reference count we
|
|
- * set for this failed mailbox command.
|
|
- */
|
|
- lpfc_nlp_put(ndlp);
|
|
+
|
|
+ /* Decrement the ndlp reference count we
|
|
+ * set for this failed mailbox command.
|
|
+ */
|
|
+ lpfc_nlp_put(ndlp);
|
|
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
|
|
|
|
/* ELS rsp: Cannot issue reg_login for <NPortid> */
|
|
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
|
@@ -3856,6 +3846,7 @@ out:
|
|
* the routine lpfc_els_free_iocb.
|
|
*/
|
|
cmdiocb->context1 = NULL;
|
|
+
|
|
}
|
|
|
|
lpfc_els_free_iocb(phba, cmdiocb);
|
|
@@ -3898,6 +3889,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
|
|
IOCB_t *oldcmd;
|
|
struct lpfc_iocbq *elsiocb;
|
|
uint8_t *pcmd;
|
|
+ struct serv_parm *sp;
|
|
uint16_t cmdsize;
|
|
int rc;
|
|
ELS_PKT *els_pkt_ptr;
|
|
@@ -3927,6 +3919,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
|
|
"Issue ACC: did:x%x flg:x%x",
|
|
ndlp->nlp_DID, ndlp->nlp_flag, 0);
|
|
break;
|
|
+ case ELS_CMD_FLOGI:
|
|
case ELS_CMD_PLOGI:
|
|
cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
|
|
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
|
|
@@ -3944,10 +3937,34 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
|
|
|
|
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
|
pcmd += sizeof(uint32_t);
|
|
- memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
|
|
+ sp = (struct serv_parm *)pcmd;
|
|
+
|
|
+ if (flag == ELS_CMD_FLOGI) {
|
|
+ /* Copy the received service parameters back */
|
|
+ memcpy(sp, &phba->fc_fabparam,
|
|
+ sizeof(struct serv_parm));
|
|
+
|
|
+ /* Clear the F_Port bit */
|
|
+ sp->cmn.fPort = 0;
|
|
+
|
|
+ /* Mark all class service parameters as invalid */
|
|
+ sp->cls1.classValid = 0;
|
|
+ sp->cls2.classValid = 0;
|
|
+ sp->cls3.classValid = 0;
|
|
+ sp->cls4.classValid = 0;
|
|
+
|
|
+ /* Copy our worldwide names */
|
|
+ memcpy(&sp->portName, &vport->fc_sparam.portName,
|
|
+ sizeof(struct lpfc_name));
|
|
+ memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
|
|
+ sizeof(struct lpfc_name));
|
|
+ } else {
|
|
+ memcpy(pcmd, &vport->fc_sparam,
|
|
+ sizeof(struct serv_parm));
|
|
+ }
|
|
|
|
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
|
|
- "Issue ACC PLOGI: did:x%x flg:x%x",
|
|
+ "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
|
|
ndlp->nlp_DID, ndlp->nlp_flag, 0);
|
|
break;
|
|
case ELS_CMD_PRLO:
|
|
@@ -4681,28 +4698,25 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
|
|
|
|
desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
|
|
|
|
- switch (phba->sli4_hba.link_state.speed) {
|
|
- case LPFC_FC_LA_SPEED_1G:
|
|
+ switch (phba->fc_linkspeed) {
|
|
+ case LPFC_LINK_SPEED_1GHZ:
|
|
rdp_speed = RDP_PS_1GB;
|
|
break;
|
|
- case LPFC_FC_LA_SPEED_2G:
|
|
+ case LPFC_LINK_SPEED_2GHZ:
|
|
rdp_speed = RDP_PS_2GB;
|
|
break;
|
|
- case LPFC_FC_LA_SPEED_4G:
|
|
+ case LPFC_LINK_SPEED_4GHZ:
|
|
rdp_speed = RDP_PS_4GB;
|
|
break;
|
|
- case LPFC_FC_LA_SPEED_8G:
|
|
+ case LPFC_LINK_SPEED_8GHZ:
|
|
rdp_speed = RDP_PS_8GB;
|
|
break;
|
|
- case LPFC_FC_LA_SPEED_10G:
|
|
+ case LPFC_LINK_SPEED_10GHZ:
|
|
rdp_speed = RDP_PS_10GB;
|
|
break;
|
|
- case LPFC_FC_LA_SPEED_16G:
|
|
+ case LPFC_LINK_SPEED_16GHZ:
|
|
rdp_speed = RDP_PS_16GB;
|
|
break;
|
|
- case LPFC_FC_LA_SPEED_32G:
|
|
- rdp_speed = RDP_PS_32GB;
|
|
- break;
|
|
default:
|
|
rdp_speed = RDP_PS_UNKNOWN;
|
|
break;
|
|
@@ -5739,7 +5753,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
|
IOCB_t *icmd = &cmdiocb->iocb;
|
|
struct serv_parm *sp;
|
|
LPFC_MBOXQ_t *mbox;
|
|
- struct ls_rjt stat;
|
|
uint32_t cmd, did;
|
|
int rc;
|
|
uint32_t fc_flag = 0;
|
|
@@ -5765,135 +5778,92 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
|
return 1;
|
|
}
|
|
|
|
- if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
|
|
- /* For a FLOGI we accept, then if our portname is greater
|
|
- * then the remote portname we initiate Nport login.
|
|
- */
|
|
+ (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
|
|
|
|
- rc = memcmp(&vport->fc_portname, &sp->portName,
|
|
- sizeof(struct lpfc_name));
|
|
|
|
- if (!rc) {
|
|
- if (phba->sli_rev < LPFC_SLI_REV4) {
|
|
- mbox = mempool_alloc(phba->mbox_mem_pool,
|
|
- GFP_KERNEL);
|
|
- if (!mbox)
|
|
- return 1;
|
|
- lpfc_linkdown(phba);
|
|
- lpfc_init_link(phba, mbox,
|
|
- phba->cfg_topology,
|
|
- phba->cfg_link_speed);
|
|
- mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
|
|
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
- mbox->vport = vport;
|
|
- rc = lpfc_sli_issue_mbox(phba, mbox,
|
|
- MBX_NOWAIT);
|
|
- lpfc_set_loopback_flag(phba);
|
|
- if (rc == MBX_NOT_FINISHED)
|
|
- mempool_free(mbox, phba->mbox_mem_pool);
|
|
- return 1;
|
|
- } else {
|
|
- /* abort the flogi coming back to ourselves
|
|
- * due to external loopback on the port.
|
|
- */
|
|
- lpfc_els_abort_flogi(phba);
|
|
- return 0;
|
|
- }
|
|
- } else if (rc > 0) { /* greater than */
|
|
- spin_lock_irq(shost->host_lock);
|
|
- vport->fc_flag |= FC_PT2PT_PLOGI;
|
|
- spin_unlock_irq(shost->host_lock);
|
|
+ /*
|
|
+ * If our portname is greater than the remote portname,
|
|
+ * then we initiate Nport login.
|
|
+ */
|
|
|
|
- /* If we have the high WWPN we can assign our own
|
|
- * myDID; otherwise, we have to WAIT for a PLOGI
|
|
- * from the remote NPort to find out what it
|
|
- * will be.
|
|
- */
|
|
- vport->fc_myDID = PT2PT_LocalID;
|
|
- } else
|
|
- vport->fc_myDID = PT2PT_RemoteID;
|
|
+ rc = memcmp(&vport->fc_portname, &sp->portName,
|
|
+ sizeof(struct lpfc_name));
|
|
|
|
- /*
|
|
- * The vport state should go to LPFC_FLOGI only
|
|
- * AFTER we issue a FLOGI, not receive one.
|
|
+ if (!rc) {
|
|
+ if (phba->sli_rev < LPFC_SLI_REV4) {
|
|
+ mbox = mempool_alloc(phba->mbox_mem_pool,
|
|
+ GFP_KERNEL);
|
|
+ if (!mbox)
|
|
+ return 1;
|
|
+ lpfc_linkdown(phba);
|
|
+ lpfc_init_link(phba, mbox,
|
|
+ phba->cfg_topology,
|
|
+ phba->cfg_link_speed);
|
|
+ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
|
|
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
+ mbox->vport = vport;
|
|
+ rc = lpfc_sli_issue_mbox(phba, mbox,
|
|
+ MBX_NOWAIT);
|
|
+ lpfc_set_loopback_flag(phba);
|
|
+ if (rc == MBX_NOT_FINISHED)
|
|
+ mempool_free(mbox, phba->mbox_mem_pool);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ /* abort the flogi coming back to ourselves
|
|
+ * due to external loopback on the port.
|
|
*/
|
|
+ lpfc_els_abort_flogi(phba);
|
|
+ return 0;
|
|
+
|
|
+ } else if (rc > 0) { /* greater than */
|
|
spin_lock_irq(shost->host_lock);
|
|
- fc_flag = vport->fc_flag;
|
|
- port_state = vport->port_state;
|
|
- vport->fc_flag |= FC_PT2PT;
|
|
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
|
|
+ vport->fc_flag |= FC_PT2PT_PLOGI;
|
|
spin_unlock_irq(shost->host_lock);
|
|
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
|
- "3311 Rcv Flogi PS x%x new PS x%x "
|
|
- "fc_flag x%x new fc_flag x%x\n",
|
|
- port_state, vport->port_state,
|
|
- fc_flag, vport->fc_flag);
|
|
|
|
- /*
|
|
- * We temporarily set fc_myDID to make it look like we are
|
|
- * a Fabric. This is done just so we end up with the right
|
|
- * did / sid on the FLOGI ACC rsp.
|
|
+ /* If we have the high WWPN we can assign our own
|
|
+ * myDID; otherwise, we have to WAIT for a PLOGI
|
|
+ * from the remote NPort to find out what it
|
|
+ * will be.
|
|
*/
|
|
- did = vport->fc_myDID;
|
|
- vport->fc_myDID = Fabric_DID;
|
|
-
|
|
+ vport->fc_myDID = PT2PT_LocalID;
|
|
} else {
|
|
- /* Reject this request because invalid parameters */
|
|
- stat.un.b.lsRjtRsvd0 = 0;
|
|
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
|
|
- stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
|
|
- stat.un.b.vendorUnique = 0;
|
|
-
|
|
- /*
|
|
- * We temporarily set fc_myDID to make it look like we are
|
|
- * a Fabric. This is done just so we end up with the right
|
|
- * did / sid on the FLOGI LS_RJT rsp.
|
|
- */
|
|
- did = vport->fc_myDID;
|
|
- vport->fc_myDID = Fabric_DID;
|
|
-
|
|
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
|
|
- NULL);
|
|
+ vport->fc_myDID = PT2PT_RemoteID;
|
|
+ }
|
|
|
|
- /* Now lets put fc_myDID back to what its supposed to be */
|
|
- vport->fc_myDID = did;
|
|
+ /*
|
|
+ * The vport state should go to LPFC_FLOGI only
|
|
+ * AFTER we issue a FLOGI, not receive one.
|
|
+ */
|
|
+ spin_lock_irq(shost->host_lock);
|
|
+ fc_flag = vport->fc_flag;
|
|
+ port_state = vport->port_state;
|
|
+ vport->fc_flag |= FC_PT2PT;
|
|
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
|
|
+ spin_unlock_irq(shost->host_lock);
|
|
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
|
+ "3311 Rcv Flogi PS x%x new PS x%x "
|
|
+ "fc_flag x%x new fc_flag x%x\n",
|
|
+ port_state, vport->port_state,
|
|
+ fc_flag, vport->fc_flag);
|
|
|
|
- return 1;
|
|
- }
|
|
+ /*
|
|
+ * We temporarily set fc_myDID to make it look like we are
|
|
+ * a Fabric. This is done just so we end up with the right
|
|
+ * did / sid on the FLOGI ACC rsp.
|
|
+ */
|
|
+ did = vport->fc_myDID;
|
|
+ vport->fc_myDID = Fabric_DID;
|
|
|
|
- /* send our FLOGI first */
|
|
- if (vport->port_state < LPFC_FLOGI) {
|
|
- vport->fc_myDID = 0;
|
|
- lpfc_initial_flogi(vport);
|
|
- vport->fc_myDID = Fabric_DID;
|
|
- }
|
|
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
|
|
|
|
/* Send back ACC */
|
|
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
|
|
+ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
|
|
|
|
/* Now lets put fc_myDID back to what its supposed to be */
|
|
vport->fc_myDID = did;
|
|
|
|
- if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
|
|
-
|
|
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
- if (!mbox)
|
|
- goto fail;
|
|
-
|
|
- lpfc_config_link(phba, mbox);
|
|
-
|
|
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
- mbox->vport = vport;
|
|
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
|
- if (rc == MBX_NOT_FINISHED) {
|
|
- mempool_free(mbox, phba->mbox_mem_pool);
|
|
- goto fail;
|
|
- }
|
|
- }
|
|
-
|
|
return 0;
|
|
-fail:
|
|
- return 1;
|
|
}
|
|
|
|
/**
|
|
@@ -7345,7 +7315,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|
|
|
/* reject till our FLOGI completes */
|
|
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
|
|
- (cmd != ELS_CMD_FLOGI)) {
|
|
+ (cmd != ELS_CMD_FLOGI)) {
|
|
rjt_err = LSRJT_UNABLE_TPC;
|
|
rjt_exp = LSEXP_NOTHING_MORE;
|
|
goto lsrjt;
|
|
@@ -7381,6 +7351,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|
rjt_exp = LSEXP_NOTHING_MORE;
|
|
break;
|
|
}
|
|
+
|
|
if (vport->port_state < LPFC_DISC_AUTH) {
|
|
if (!(phba->pport->fc_flag & FC_PT2PT) ||
|
|
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
|
|
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
index bfc2442dd74a..d3668aa555d5 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
@@ -1083,7 +1083,7 @@ out:
|
|
}
|
|
|
|
|
|
-static void
|
|
+void
|
|
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|
{
|
|
struct lpfc_vport *vport = pmb->vport;
|
|
@@ -1113,8 +1113,10 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|
/* Start discovery by sending a FLOGI. port_state is identically
|
|
* LPFC_FLOGI while waiting for FLOGI cmpl
|
|
*/
|
|
- if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
|
|
+ if (vport->port_state != LPFC_FLOGI)
|
|
lpfc_initial_flogi(vport);
|
|
+ else if (vport->fc_flag & FC_PT2PT)
|
|
+ lpfc_disc_start(vport);
|
|
return;
|
|
|
|
out:
|
|
@@ -2963,8 +2965,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
|
|
|
out_free_mem:
|
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
|
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
|
|
- kfree(dmabuf);
|
|
+ if (dmabuf) {
|
|
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
|
|
+ kfree(dmabuf);
|
|
+ }
|
|
return;
|
|
}
|
|
|
|
@@ -3448,10 +3452,10 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|
spin_lock_irq(shost->host_lock);
|
|
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
|
|
spin_unlock_irq(shost->host_lock);
|
|
- } else
|
|
- /* Good status, call state machine */
|
|
- lpfc_disc_state_machine(vport, ndlp, pmb,
|
|
- NLP_EVT_CMPL_REG_LOGIN);
|
|
+ }
|
|
+
|
|
+ /* Call state machine */
|
|
+ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
|
|
|
|
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
|
kfree(mp);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
|
|
index b0d92b84bcdc..c14ab6c3ae40 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_init.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_init.c
|
|
@@ -8834,9 +8834,12 @@ found:
|
|
* already mapped to this phys_id.
|
|
*/
|
|
if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
|
|
- chann[saved_chann] =
|
|
- cpup->channel_id;
|
|
- saved_chann++;
|
|
+ if (saved_chann <=
|
|
+ LPFC_FCP_IO_CHAN_MAX) {
|
|
+ chann[saved_chann] =
|
|
+ cpup->channel_id;
|
|
+ saved_chann++;
|
|
+ }
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
|
|
index f87f90e9b7df..1e34b5408a29 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_mbox.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
|
|
@@ -2145,10 +2145,12 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
|
|
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
|
|
reg_vfi->e_d_tov = phba->fc_edtov;
|
|
reg_vfi->r_a_tov = phba->fc_ratov;
|
|
- reg_vfi->bde.addrHigh = putPaddrHigh(phys);
|
|
- reg_vfi->bde.addrLow = putPaddrLow(phys);
|
|
- reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
|
|
- reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
|
+ if (phys) {
|
|
+ reg_vfi->bde.addrHigh = putPaddrHigh(phys);
|
|
+ reg_vfi->bde.addrLow = putPaddrLow(phys);
|
|
+ reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
|
|
+ reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
|
+ }
|
|
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
|
|
|
|
/* Only FC supports upd bit */
|
|
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
index ed9a2c80c4aa..193733e8c823 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
@@ -280,38 +280,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
uint32_t *lp;
|
|
IOCB_t *icmd;
|
|
struct serv_parm *sp;
|
|
+ uint32_t ed_tov;
|
|
LPFC_MBOXQ_t *mbox;
|
|
struct ls_rjt stat;
|
|
int rc;
|
|
|
|
memset(&stat, 0, sizeof (struct ls_rjt));
|
|
- if (vport->port_state <= LPFC_FDISC) {
|
|
- /* Before responding to PLOGI, check for pt2pt mode.
|
|
- * If we are pt2pt, with an outstanding FLOGI, abort
|
|
- * the FLOGI and resend it first.
|
|
- */
|
|
- if (vport->fc_flag & FC_PT2PT) {
|
|
- lpfc_els_abort_flogi(phba);
|
|
- if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
|
|
- /* If the other side is supposed to initiate
|
|
- * the PLOGI anyway, just ACC it now and
|
|
- * move on with discovery.
|
|
- */
|
|
- phba->fc_edtov = FF_DEF_EDTOV;
|
|
- phba->fc_ratov = FF_DEF_RATOV;
|
|
- /* Start discovery - this should just do
|
|
- CLEAR_LA */
|
|
- lpfc_disc_start(vport);
|
|
- } else
|
|
- lpfc_initial_flogi(vport);
|
|
- } else {
|
|
- stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
|
|
- stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
|
|
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
|
|
- ndlp, NULL);
|
|
- return 0;
|
|
- }
|
|
- }
|
|
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
|
lp = (uint32_t *) pcmd->virt;
|
|
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
|
|
@@ -404,30 +378,46 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
/* Check for Nport to NPort pt2pt protocol */
|
|
if ((vport->fc_flag & FC_PT2PT) &&
|
|
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
|
|
-
|
|
/* rcv'ed PLOGI decides what our NPortId will be */
|
|
vport->fc_myDID = icmd->un.rcvels.parmRo;
|
|
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
- if (mbox == NULL)
|
|
- goto out;
|
|
- lpfc_config_link(phba, mbox);
|
|
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
- mbox->vport = vport;
|
|
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
|
- if (rc == MBX_NOT_FINISHED) {
|
|
- mempool_free(mbox, phba->mbox_mem_pool);
|
|
- goto out;
|
|
+
|
|
+ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
|
|
+ if (sp->cmn.edtovResolution) {
|
|
+ /* E_D_TOV ticks are in nanoseconds */
|
|
+ ed_tov = (phba->fc_edtov + 999999) / 1000000;
|
|
}
|
|
+
|
|
/*
|
|
- * For SLI4, the VFI/VPI are registered AFTER the
|
|
- * Nport with the higher WWPN sends us a PLOGI with
|
|
- * our assigned NPortId.
|
|
+ * For pt-to-pt, use the larger EDTOV
|
|
+ * RATOV = 2 * EDTOV
|
|
*/
|
|
+ if (ed_tov > phba->fc_edtov)
|
|
+ phba->fc_edtov = ed_tov;
|
|
+ phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
|
|
+
|
|
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
|
|
+
|
|
+ /* Issue config_link / reg_vfi to account for updated TOV's */
|
|
+
|
|
if (phba->sli_rev == LPFC_SLI_REV4)
|
|
lpfc_issue_reg_vfi(vport);
|
|
+ else {
|
|
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
+ if (mbox == NULL)
|
|
+ goto out;
|
|
+ lpfc_config_link(phba, mbox);
|
|
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
+ mbox->vport = vport;
|
|
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
|
+ if (rc == MBX_NOT_FINISHED) {
|
|
+ mempool_free(mbox, phba->mbox_mem_pool);
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
|
|
lpfc_can_disctmo(vport);
|
|
}
|
|
+
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
if (!mbox)
|
|
goto out;
|
|
@@ -1038,7 +1028,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
|
|
uint32_t *lp;
|
|
IOCB_t *irsp;
|
|
struct serv_parm *sp;
|
|
+ uint32_t ed_tov;
|
|
LPFC_MBOXQ_t *mbox;
|
|
+ int rc;
|
|
|
|
cmdiocb = (struct lpfc_iocbq *) arg;
|
|
rspiocb = cmdiocb->context_un.rsp_iocb;
|
|
@@ -1094,18 +1086,63 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
|
|
ndlp->nlp_maxframe =
|
|
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
|
|
|
|
+ if ((vport->fc_flag & FC_PT2PT) &&
|
|
+ (vport->fc_flag & FC_PT2PT_PLOGI)) {
|
|
+ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
|
|
+ if (sp->cmn.edtovResolution) {
|
|
+ /* E_D_TOV ticks are in nanoseconds */
|
|
+ ed_tov = (phba->fc_edtov + 999999) / 1000000;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Use the larger EDTOV
|
|
+ * RATOV = 2 * EDTOV for pt-to-pt
|
|
+ */
|
|
+ if (ed_tov > phba->fc_edtov)
|
|
+ phba->fc_edtov = ed_tov;
|
|
+ phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
|
|
+
|
|
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
|
|
+
|
|
+ /* Issue config_link / reg_vfi to account for updated TOV's */
|
|
+ if (phba->sli_rev == LPFC_SLI_REV4) {
|
|
+ lpfc_issue_reg_vfi(vport);
|
|
+ } else {
|
|
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
+ if (!mbox) {
|
|
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
|
+ "0133 PLOGI: no memory "
|
|
+ "for config_link "
|
|
+ "Data: x%x x%x x%x x%x\n",
|
|
+ ndlp->nlp_DID, ndlp->nlp_state,
|
|
+ ndlp->nlp_flag, ndlp->nlp_rpi);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ lpfc_config_link(phba, mbox);
|
|
+
|
|
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
+ mbox->vport = vport;
|
|
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
|
+ if (rc == MBX_NOT_FINISHED) {
|
|
+ mempool_free(mbox, phba->mbox_mem_pool);
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ lpfc_unreg_rpi(vport, ndlp);
|
|
+
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
if (!mbox) {
|
|
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
|
- "0133 PLOGI: no memory for reg_login "
|
|
- "Data: x%x x%x x%x x%x\n",
|
|
- ndlp->nlp_DID, ndlp->nlp_state,
|
|
- ndlp->nlp_flag, ndlp->nlp_rpi);
|
|
+ "0018 PLOGI: no memory for reg_login "
|
|
+ "Data: x%x x%x x%x x%x\n",
|
|
+ ndlp->nlp_DID, ndlp->nlp_state,
|
|
+ ndlp->nlp_flag, ndlp->nlp_rpi);
|
|
goto out;
|
|
}
|
|
|
|
- lpfc_unreg_rpi(vport, ndlp);
|
|
-
|
|
if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
|
|
(uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
|
|
switch (ndlp->nlp_DID) {
|
|
@@ -2299,6 +2336,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
|
|
if (vport->phba->sli_rev < LPFC_SLI_REV4)
|
|
ndlp->nlp_rpi = mb->un.varWords[0];
|
|
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
|
|
+ if (ndlp->nlp_flag & NLP_LOGO_ACC) {
|
|
+ lpfc_unreg_rpi(vport, ndlp);
|
|
+ }
|
|
} else {
|
|
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
|
|
lpfc_drop_node(vport, ndlp);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
index 9e165bc05ee1..bae36cc3740b 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_scsi.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
@@ -3908,9 +3908,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
|
uint32_t logit = LOG_FCP;
|
|
|
|
/* Sanity check on return of outstanding command */
|
|
- if (!(lpfc_cmd->pCmd))
|
|
- return;
|
|
cmd = lpfc_cmd->pCmd;
|
|
+ if (!cmd)
|
|
+ return;
|
|
shost = cmd->device->host;
|
|
|
|
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
|
|
index f9585cdd8933..92dfd6a5178c 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_sli.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_sli.c
|
|
@@ -14842,10 +14842,12 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
|
|
struct lpfc_dmabuf *h_buf;
|
|
struct hbq_dmabuf *seq_dmabuf = NULL;
|
|
struct hbq_dmabuf *temp_dmabuf = NULL;
|
|
+ uint8_t found = 0;
|
|
|
|
INIT_LIST_HEAD(&dmabuf->dbuf.list);
|
|
dmabuf->time_stamp = jiffies;
|
|
new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
|
|
+
|
|
/* Use the hdr_buf to find the sequence that this frame belongs to */
|
|
list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
|
|
temp_hdr = (struct fc_frame_header *)h_buf->virt;
|
|
@@ -14885,7 +14887,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
|
|
return seq_dmabuf;
|
|
}
|
|
/* find the correct place in the sequence to insert this frame */
|
|
- list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
|
|
+ d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
|
|
+ while (!found) {
|
|
temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
|
|
temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
|
|
/*
|
|
@@ -14895,9 +14898,17 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
|
|
if (be16_to_cpu(new_hdr->fh_seq_cnt) >
|
|
be16_to_cpu(temp_hdr->fh_seq_cnt)) {
|
|
list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
|
|
- return seq_dmabuf;
|
|
+ found = 1;
|
|
+ break;
|
|
}
|
|
+
|
|
+ if (&d_buf->list == &seq_dmabuf->dbuf.list)
|
|
+ break;
|
|
+ d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
|
|
}
|
|
+
|
|
+ if (found)
|
|
+ return seq_dmabuf;
|
|
return NULL;
|
|
}
|
|
|
|
@@ -16173,7 +16184,7 @@ fail_fcf_read:
|
|
}
|
|
|
|
/**
|
|
- * lpfc_check_next_fcf_pri
|
|
+ * lpfc_check_next_fcf_pri_level
|
|
* phba pointer to the lpfc_hba struct for this port.
|
|
* This routine is called from the lpfc_sli4_fcf_rr_next_index_get
|
|
* routine when the rr_bmask is empty. The FCF indecies are put into the
|
|
@@ -16329,8 +16340,12 @@ next_priority:
|
|
|
|
if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
|
|
phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
|
|
- LPFC_FCF_FLOGI_FAILED)
|
|
+ LPFC_FCF_FLOGI_FAILED) {
|
|
+ if (list_is_singular(&phba->fcf.fcf_pri_list))
|
|
+ return LPFC_FCOE_FCF_NEXT_NONE;
|
|
+
|
|
goto next_priority;
|
|
+ }
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
|
"2845 Get next roundrobin failover FCF (x%x)\n",
|
|
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
|
|
index c0f7c8ce54aa..ef4ff03242ea 100644
|
|
--- a/drivers/scsi/megaraid/megaraid_sas.h
|
|
+++ b/drivers/scsi/megaraid/megaraid_sas.h
|
|
@@ -1083,6 +1083,8 @@ struct megasas_ctrl_info {
|
|
|
|
#define VD_EXT_DEBUG 0
|
|
|
|
+#define SCAN_PD_CHANNEL 0x1
|
|
+#define SCAN_VD_CHANNEL 0x2
|
|
|
|
enum MR_SCSI_CMD_TYPE {
|
|
READ_WRITE_LDIO = 0,
|
|
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
|
|
index e994ff944091..3f8d357b1bac 100644
|
|
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
|
|
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
|
|
@@ -735,6 +735,7 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
|
|
&(regs)->inbound_high_queue_port);
|
|
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
|
|
&(regs)->inbound_low_queue_port);
|
|
+ mmiowb();
|
|
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
|
}
|
|
|
|
@@ -5476,7 +5477,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
|
|
spin_lock_init(&instance->hba_lock);
|
|
spin_lock_init(&instance->completion_lock);
|
|
|
|
- mutex_init(&instance->aen_mutex);
|
|
mutex_init(&instance->reset_mutex);
|
|
|
|
/*
|
|
@@ -6443,10 +6443,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
|
|
}
|
|
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
|
|
|
- mutex_lock(&instance->aen_mutex);
|
|
+ mutex_lock(&instance->reset_mutex);
|
|
error = megasas_register_aen(instance, aen.seq_num,
|
|
aen.class_locale_word);
|
|
- mutex_unlock(&instance->aen_mutex);
|
|
+ mutex_unlock(&instance->reset_mutex);
|
|
return error;
|
|
}
|
|
|
|
@@ -6477,9 +6477,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
|
|
int i;
|
|
int error = 0;
|
|
compat_uptr_t ptr;
|
|
- unsigned long local_raw_ptr;
|
|
u32 local_sense_off;
|
|
u32 local_sense_len;
|
|
+ u32 user_sense_off;
|
|
|
|
if (clear_user(ioc, sizeof(*ioc)))
|
|
return -EFAULT;
|
|
@@ -6497,17 +6497,16 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
|
|
* sense_len is not null, so prepare the 64bit value under
|
|
* the same condition.
|
|
*/
|
|
- if (get_user(local_raw_ptr, ioc->frame.raw) ||
|
|
- get_user(local_sense_off, &ioc->sense_off) ||
|
|
- get_user(local_sense_len, &ioc->sense_len))
|
|
+ if (get_user(local_sense_off, &ioc->sense_off) ||
|
|
+ get_user(local_sense_len, &ioc->sense_len) ||
|
|
+ get_user(user_sense_off, &cioc->sense_off))
|
|
return -EFAULT;
|
|
|
|
-
|
|
if (local_sense_len) {
|
|
void __user **sense_ioc_ptr =
|
|
- (void __user **)((u8*)local_raw_ptr + local_sense_off);
|
|
+ (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
|
|
compat_uptr_t *sense_cioc_ptr =
|
|
- (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
|
|
+ (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
|
|
if (get_user(ptr, sense_cioc_ptr) ||
|
|
put_user(compat_ptr(ptr), sense_ioc_ptr))
|
|
return -EFAULT;
|
|
@@ -6648,6 +6647,7 @@ megasas_aen_polling(struct work_struct *work)
|
|
int i, j, doscan = 0;
|
|
u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
|
|
int error;
|
|
+ u8 dcmd_ret = 0;
|
|
|
|
if (!instance) {
|
|
printk(KERN_ERR "invalid instance!\n");
|
|
@@ -6660,16 +6660,7 @@ megasas_aen_polling(struct work_struct *work)
|
|
wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
|
|
|
|
/* Don't run the event workqueue thread if OCR is running */
|
|
- for (i = 0; i < wait_time; i++) {
|
|
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
|
|
- break;
|
|
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
|
|
- dev_notice(&instance->pdev->dev, "%s waiting for "
|
|
- "controller reset to finish for scsi%d\n",
|
|
- __func__, instance->host->host_no);
|
|
- }
|
|
- msleep(1000);
|
|
- }
|
|
+ mutex_lock(&instance->reset_mutex);
|
|
|
|
instance->ev = NULL;
|
|
host = instance->host;
|
|
@@ -6677,212 +6668,127 @@ megasas_aen_polling(struct work_struct *work)
|
|
megasas_decode_evt(instance);
|
|
|
|
switch (le32_to_cpu(instance->evt_detail->code)) {
|
|
- case MR_EVT_PD_INSERTED:
|
|
- if (megasas_get_pd_list(instance) == 0) {
|
|
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
|
|
- for (j = 0;
|
|
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
|
|
- j++) {
|
|
-
|
|
- pd_index =
|
|
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
|
|
-
|
|
- sdev1 = scsi_device_lookup(host, i, j, 0);
|
|
-
|
|
- if (instance->pd_list[pd_index].driveState
|
|
- == MR_PD_STATE_SYSTEM) {
|
|
- if (!sdev1)
|
|
- scsi_add_device(host, i, j, 0);
|
|
-
|
|
- if (sdev1)
|
|
- scsi_device_put(sdev1);
|
|
- }
|
|
- }
|
|
- }
|
|
- }
|
|
- doscan = 0;
|
|
- break;
|
|
|
|
+ case MR_EVT_PD_INSERTED:
|
|
case MR_EVT_PD_REMOVED:
|
|
- if (megasas_get_pd_list(instance) == 0) {
|
|
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
|
|
- for (j = 0;
|
|
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
|
|
- j++) {
|
|
-
|
|
- pd_index =
|
|
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
|
|
-
|
|
- sdev1 = scsi_device_lookup(host, i, j, 0);
|
|
-
|
|
- if (instance->pd_list[pd_index].driveState
|
|
- == MR_PD_STATE_SYSTEM) {
|
|
- if (sdev1)
|
|
- scsi_device_put(sdev1);
|
|
- } else {
|
|
- if (sdev1) {
|
|
- scsi_remove_device(sdev1);
|
|
- scsi_device_put(sdev1);
|
|
- }
|
|
- }
|
|
- }
|
|
- }
|
|
- }
|
|
- doscan = 0;
|
|
+ dcmd_ret = megasas_get_pd_list(instance);
|
|
+ if (dcmd_ret == 0)
|
|
+ doscan = SCAN_PD_CHANNEL;
|
|
break;
|
|
|
|
case MR_EVT_LD_OFFLINE:
|
|
case MR_EVT_CFG_CLEARED:
|
|
case MR_EVT_LD_DELETED:
|
|
- if (!instance->requestorId ||
|
|
- megasas_get_ld_vf_affiliation(instance, 0)) {
|
|
- if (megasas_ld_list_query(instance,
|
|
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
|
|
- megasas_get_ld_list(instance);
|
|
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
|
|
- for (j = 0;
|
|
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
|
|
- j++) {
|
|
-
|
|
- ld_index =
|
|
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
|
|
-
|
|
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
|
|
-
|
|
- if (instance->ld_ids[ld_index]
|
|
- != 0xff) {
|
|
- if (sdev1)
|
|
- scsi_device_put(sdev1);
|
|
- } else {
|
|
- if (sdev1) {
|
|
- scsi_remove_device(sdev1);
|
|
- scsi_device_put(sdev1);
|
|
- }
|
|
- }
|
|
- }
|
|
- }
|
|
- doscan = 0;
|
|
- }
|
|
- break;
|
|
case MR_EVT_LD_CREATED:
|
|
if (!instance->requestorId ||
|
|
- megasas_get_ld_vf_affiliation(instance, 0)) {
|
|
- if (megasas_ld_list_query(instance,
|
|
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
|
|
- megasas_get_ld_list(instance);
|
|
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
|
|
- for (j = 0;
|
|
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
|
|
- j++) {
|
|
- ld_index =
|
|
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
|
|
-
|
|
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
|
|
-
|
|
- if (instance->ld_ids[ld_index]
|
|
- != 0xff) {
|
|
- if (!sdev1)
|
|
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
|
|
- }
|
|
- if (sdev1)
|
|
- scsi_device_put(sdev1);
|
|
- }
|
|
- }
|
|
- doscan = 0;
|
|
- }
|
|
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
|
|
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
|
|
+
|
|
+ if (dcmd_ret == 0)
|
|
+ doscan = SCAN_VD_CHANNEL;
|
|
+
|
|
break;
|
|
+
|
|
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
|
|
case MR_EVT_FOREIGN_CFG_IMPORTED:
|
|
case MR_EVT_LD_STATE_CHANGE:
|
|
- doscan = 1;
|
|
+ dcmd_ret = megasas_get_pd_list(instance);
|
|
+
|
|
+ if (dcmd_ret != 0)
|
|
+ break;
|
|
+
|
|
+ if (!instance->requestorId ||
|
|
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
|
|
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
|
|
+
|
|
+ if (dcmd_ret != 0)
|
|
+ break;
|
|
+
|
|
+ doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
|
|
+ dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
|
|
+ instance->host->host_no);
|
|
break;
|
|
+
|
|
case MR_EVT_CTRL_PROP_CHANGED:
|
|
- megasas_get_ctrl_info(instance);
|
|
- break;
|
|
+ dcmd_ret = megasas_get_ctrl_info(instance);
|
|
+ break;
|
|
default:
|
|
doscan = 0;
|
|
break;
|
|
}
|
|
} else {
|
|
dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
|
|
+ mutex_unlock(&instance->reset_mutex);
|
|
kfree(ev);
|
|
return;
|
|
}
|
|
|
|
- if (doscan) {
|
|
- dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
|
|
- instance->host->host_no);
|
|
- if (megasas_get_pd_list(instance) == 0) {
|
|
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
|
|
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
|
|
- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
|
|
- sdev1 = scsi_device_lookup(host, i, j, 0);
|
|
- if (instance->pd_list[pd_index].driveState ==
|
|
- MR_PD_STATE_SYSTEM) {
|
|
- if (!sdev1) {
|
|
- scsi_add_device(host, i, j, 0);
|
|
- }
|
|
- if (sdev1)
|
|
- scsi_device_put(sdev1);
|
|
- } else {
|
|
- if (sdev1) {
|
|
- scsi_remove_device(sdev1);
|
|
- scsi_device_put(sdev1);
|
|
- }
|
|
+ mutex_unlock(&instance->reset_mutex);
|
|
+
|
|
+ if (doscan & SCAN_PD_CHANNEL) {
|
|
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
|
|
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
|
|
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
|
|
+ sdev1 = scsi_device_lookup(host, i, j, 0);
|
|
+ if (instance->pd_list[pd_index].driveState ==
|
|
+ MR_PD_STATE_SYSTEM) {
|
|
+ if (!sdev1)
|
|
+ scsi_add_device(host, i, j, 0);
|
|
+ else
|
|
+ scsi_device_put(sdev1);
|
|
+ } else {
|
|
+ if (sdev1) {
|
|
+ scsi_remove_device(sdev1);
|
|
+ scsi_device_put(sdev1);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
+ }
|
|
|
|
- if (!instance->requestorId ||
|
|
- megasas_get_ld_vf_affiliation(instance, 0)) {
|
|
- if (megasas_ld_list_query(instance,
|
|
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
|
|
- megasas_get_ld_list(instance);
|
|
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
|
|
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
|
|
- j++) {
|
|
- ld_index =
|
|
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
|
|
-
|
|
- sdev1 = scsi_device_lookup(host,
|
|
- MEGASAS_MAX_PD_CHANNELS + i, j, 0);
|
|
- if (instance->ld_ids[ld_index]
|
|
- != 0xff) {
|
|
- if (!sdev1)
|
|
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
|
|
- else
|
|
- scsi_device_put(sdev1);
|
|
- } else {
|
|
- if (sdev1) {
|
|
- scsi_remove_device(sdev1);
|
|
- scsi_device_put(sdev1);
|
|
- }
|
|
+ if (doscan & SCAN_VD_CHANNEL) {
|
|
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
|
|
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
|
|
+ ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
|
|
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
|
|
+ if (instance->ld_ids[ld_index] != 0xff) {
|
|
+ if (!sdev1)
|
|
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
|
|
+ else
|
|
+ scsi_device_put(sdev1);
|
|
+ } else {
|
|
+ if (sdev1) {
|
|
+ scsi_remove_device(sdev1);
|
|
+ scsi_device_put(sdev1);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
- if (instance->aen_cmd != NULL) {
|
|
- kfree(ev);
|
|
- return ;
|
|
- }
|
|
-
|
|
- seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
|
|
+ if (dcmd_ret == 0)
|
|
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
|
|
+ else
|
|
+ seq_num = instance->last_seq_num;
|
|
|
|
/* Register AEN with FW for latest sequence number plus 1 */
|
|
class_locale.members.reserved = 0;
|
|
class_locale.members.locale = MR_EVT_LOCALE_ALL;
|
|
class_locale.members.class = MR_EVT_CLASS_DEBUG;
|
|
- mutex_lock(&instance->aen_mutex);
|
|
+
|
|
+ if (instance->aen_cmd != NULL) {
|
|
+ kfree(ev);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&instance->reset_mutex);
|
|
error = megasas_register_aen(instance, seq_num,
|
|
class_locale.word);
|
|
- mutex_unlock(&instance->aen_mutex);
|
|
-
|
|
if (error)
|
|
- dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
|
|
+ dev_err(&instance->pdev->dev,
|
|
+ "register aen failed error %x\n", error);
|
|
|
|
+ mutex_unlock(&instance->reset_mutex);
|
|
kfree(ev);
|
|
}
|
|
|
|
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
index 4f391e747be2..021b994fdae8 100644
|
|
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
@@ -201,6 +201,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
|
|
&instance->reg_set->inbound_low_queue_port);
|
|
writel(le32_to_cpu(req_desc->u.high),
|
|
&instance->reg_set->inbound_high_queue_port);
|
|
+ mmiowb();
|
|
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
|
#endif
|
|
}
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
index 356233f86064..5b2c37f1e908 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
@@ -2020,8 +2020,10 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
|
|
_base_free_irq(ioc);
|
|
_base_disable_msix(ioc);
|
|
|
|
- if (ioc->msix96_vector)
|
|
+ if (ioc->msix96_vector) {
|
|
kfree(ioc->replyPostRegisterIndex);
|
|
+ ioc->replyPostRegisterIndex = NULL;
|
|
+ }
|
|
|
|
if (ioc->chip_phys) {
|
|
iounmap(ioc->chip);
|
|
@@ -2240,6 +2242,12 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
|
|
return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
|
|
}
|
|
|
|
+static inline u8
|
|
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
|
|
+{
|
|
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
|
|
+}
|
|
+
|
|
/**
|
|
* mpt3sas_base_get_smid - obtain a free smid from internal queue
|
|
* @ioc: per adapter object
|
|
@@ -2300,6 +2308,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
|
|
request->scmd = scmd;
|
|
request->cb_idx = cb_idx;
|
|
smid = request->smid;
|
|
+ request->msix_io = _base_get_msix_index(ioc);
|
|
list_del(&request->tracker_list);
|
|
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
|
return smid;
|
|
@@ -2422,12 +2431,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
|
|
}
|
|
#endif
|
|
|
|
-static inline u8
|
|
-_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
|
|
-{
|
|
- return ioc->cpu_msix_table[raw_smp_processor_id()];
|
|
-}
|
|
-
|
|
/**
|
|
* mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
|
|
* @ioc: per adapter object
|
|
@@ -2481,18 +2484,19 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
* mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
|
|
* @ioc: per adapter object
|
|
* @smid: system request message index
|
|
- *
|
|
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
|
|
* Return nothing.
|
|
*/
|
|
void
|
|
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
+ u16 msix_task)
|
|
{
|
|
Mpi2RequestDescriptorUnion_t descriptor;
|
|
u64 *request = (u64 *)&descriptor;
|
|
|
|
descriptor.HighPriority.RequestFlags =
|
|
MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
|
|
- descriptor.HighPriority.MSIxIndex = 0;
|
|
+ descriptor.HighPriority.MSIxIndex = msix_task;
|
|
descriptor.HighPriority.SMID = cpu_to_le16(smid);
|
|
descriptor.HighPriority.LMID = 0;
|
|
descriptor.HighPriority.Reserved1 = 0;
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
|
|
index 5ad271efbd45..92648a5ea2d2 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
|
|
@@ -643,6 +643,7 @@ struct chain_tracker {
|
|
* @cb_idx: callback index
|
|
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
|
|
* @tracker_list: list of free request (ioc->free_list)
|
|
+ * @msix_io: IO's msix
|
|
*/
|
|
struct scsiio_tracker {
|
|
u16 smid;
|
|
@@ -651,6 +652,7 @@ struct scsiio_tracker {
|
|
u8 direct_io;
|
|
struct list_head chain_list;
|
|
struct list_head tracker_list;
|
|
+ u16 msix_io;
|
|
};
|
|
|
|
/**
|
|
@@ -1213,7 +1215,8 @@ void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
u16 handle);
|
|
void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|
u16 handle);
|
|
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
|
|
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
|
|
+ u16 smid, u16 msix_task);
|
|
void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
|
|
void mpt3sas_base_initialize_callback_handler(void);
|
|
u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
|
|
index d8366b056b70..4ccde5a05b70 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
|
|
@@ -817,7 +817,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
|
|
tm_request->DevHandle));
|
|
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
|
|
data_in_dma, data_in_sz);
|
|
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
|
|
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
|
|
break;
|
|
}
|
|
case MPI2_FUNCTION_SMP_PASSTHROUGH:
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
index 9ab77b06434d..6180f7970bbf 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
@@ -2193,6 +2193,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
|
|
unsigned long timeleft;
|
|
struct scsiio_tracker *scsi_lookup = NULL;
|
|
int rc;
|
|
+ u16 msix_task = 0;
|
|
|
|
if (m_type == TM_MUTEX_ON)
|
|
mutex_lock(&ioc->tm_cmds.mutex);
|
|
@@ -2256,7 +2257,12 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
|
|
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
|
|
mpt3sas_scsih_set_tm_flag(ioc, handle);
|
|
init_completion(&ioc->tm_cmds.done);
|
|
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
|
|
+ if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
|
|
+ (scsi_lookup->msix_io < ioc->reply_queue_count))
|
|
+ msix_task = scsi_lookup->msix_io;
|
|
+ else
|
|
+ msix_task = 0;
|
|
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
|
|
timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
|
|
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
|
|
pr_err(MPT3SAS_FMT "%s: timeout\n",
|
|
@@ -3151,7 +3157,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
|
|
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
|
|
mpi_request->DevHandle = cpu_to_le16(handle);
|
|
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
|
|
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
|
|
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
|
|
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
|
|
|
|
out:
|
|
@@ -3332,7 +3338,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
|
|
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
|
|
mpi_request->DevHandle = cpu_to_le16(handle);
|
|
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
|
|
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
|
|
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
|
|
index 75514a15bea0..f57d96984ae4 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_target.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_target.c
|
|
@@ -1578,7 +1578,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
|
|
qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
|
|
0, 0, 0, 0, 0, 0);
|
|
else {
|
|
- if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
|
|
+ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
|
|
qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
|
|
mcmd->fc_tm_rsp, false);
|
|
else
|
|
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
|
|
index 9096d311e45d..c2d9b793759d 100644
|
|
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
|
|
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
|
|
@@ -631,8 +631,6 @@ struct ll_file_data {
|
|
|
|
struct lov_stripe_md;
|
|
|
|
-extern spinlock_t inode_lock;
|
|
-
|
|
extern struct dentry *llite_root;
|
|
extern struct kset *llite_kset;
|
|
|
|
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
|
|
index 29cfc57d496e..e4110d6de0b5 100644
|
|
--- a/drivers/vhost/scsi.c
|
|
+++ b/drivers/vhost/scsi.c
|
|
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd {
|
|
struct scatterlist *tvc_prot_sgl;
|
|
struct page **tvc_upages;
|
|
/* Pointer to response header iovec */
|
|
- struct iovec *tvc_resp_iov;
|
|
+ struct iovec tvc_resp_iov;
|
|
/* Pointer to vhost_scsi for our device */
|
|
struct vhost_scsi *tvc_vhost;
|
|
/* Pointer to vhost_virtqueue for the cmd */
|
|
@@ -557,7 +557,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
|
|
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
|
|
se_cmd->scsi_sense_length);
|
|
|
|
- iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
|
|
+ iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
|
|
cmd->tvc_in_iovs, sizeof(v_rsp));
|
|
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
|
|
if (likely(ret == sizeof(v_rsp))) {
|
|
@@ -1054,7 +1054,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
|
}
|
|
cmd->tvc_vhost = vs;
|
|
cmd->tvc_vq = vq;
|
|
- cmd->tvc_resp_iov = &vq->iov[out];
|
|
+ cmd->tvc_resp_iov = vq->iov[out];
|
|
cmd->tvc_in_iovs = in;
|
|
|
|
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
|
|
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
|
|
index 5e5db3687e34..353f4bae658c 100644
|
|
--- a/fs/btrfs/file.c
|
|
+++ b/fs/btrfs/file.c
|
|
@@ -1526,27 +1526,24 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|
|
|
reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
|
|
|
|
- if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
|
|
- BTRFS_INODE_PREALLOC)) {
|
|
- ret = check_can_nocow(inode, pos, &write_bytes);
|
|
- if (ret < 0)
|
|
- break;
|
|
- if (ret > 0) {
|
|
- /*
|
|
- * For nodata cow case, no need to reserve
|
|
- * data space.
|
|
- */
|
|
- only_release_metadata = true;
|
|
- /*
|
|
- * our prealloc extent may be smaller than
|
|
- * write_bytes, so scale down.
|
|
- */
|
|
- num_pages = DIV_ROUND_UP(write_bytes + offset,
|
|
- PAGE_CACHE_SIZE);
|
|
- reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
|
|
- goto reserve_metadata;
|
|
- }
|
|
+ if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
|
|
+ BTRFS_INODE_PREALLOC)) &&
|
|
+ check_can_nocow(inode, pos, &write_bytes) > 0) {
|
|
+ /*
|
|
+ * For nodata cow case, no need to reserve
|
|
+ * data space.
|
|
+ */
|
|
+ only_release_metadata = true;
|
|
+ /*
|
|
+ * our prealloc extent may be smaller than
|
|
+ * write_bytes, so scale down.
|
|
+ */
|
|
+ num_pages = DIV_ROUND_UP(write_bytes + offset,
|
|
+ PAGE_CACHE_SIZE);
|
|
+ reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
|
|
+ goto reserve_metadata;
|
|
}
|
|
+
|
|
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
|
|
if (ret < 0)
|
|
break;
|
|
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
|
|
index 11309683d65f..27794b137b24 100644
|
|
--- a/fs/ecryptfs/file.c
|
|
+++ b/fs/ecryptfs/file.c
|
|
@@ -112,7 +112,6 @@ static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
|
|
.sb = inode->i_sb,
|
|
};
|
|
lower_file = ecryptfs_file_to_lower(file);
|
|
- lower_file->f_pos = ctx->pos;
|
|
rc = iterate_dir(lower_file, &buf.ctx);
|
|
ctx->pos = buf.ctx.pos;
|
|
if (rc < 0)
|
|
@@ -236,14 +235,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
|
|
}
|
|
ecryptfs_set_file_lower(
|
|
file, ecryptfs_inode_to_private(inode)->lower_file);
|
|
- if (d_is_dir(ecryptfs_dentry)) {
|
|
- ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
|
|
- mutex_lock(&crypt_stat->cs_mutex);
|
|
- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
|
|
- mutex_unlock(&crypt_stat->cs_mutex);
|
|
- rc = 0;
|
|
- goto out;
|
|
- }
|
|
rc = read_or_initialize_metadata(ecryptfs_dentry);
|
|
if (rc)
|
|
goto out_put;
|
|
@@ -260,6 +251,45 @@ out:
|
|
return rc;
|
|
}
|
|
|
|
+/**
|
|
+ * ecryptfs_dir_open
|
|
+ * @inode: inode speciying file to open
|
|
+ * @file: Structure to return filled in
|
|
+ *
|
|
+ * Opens the file specified by inode.
|
|
+ *
|
|
+ * Returns zero on success; non-zero otherwise
|
|
+ */
|
|
+static int ecryptfs_dir_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ struct dentry *ecryptfs_dentry = file->f_path.dentry;
|
|
+ /* Private value of ecryptfs_dentry allocated in
|
|
+ * ecryptfs_lookup() */
|
|
+ struct ecryptfs_file_info *file_info;
|
|
+ struct file *lower_file;
|
|
+
|
|
+ /* Released in ecryptfs_release or end of function if failure */
|
|
+ file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
|
|
+ ecryptfs_set_file_private(file, file_info);
|
|
+ if (unlikely(!file_info)) {
|
|
+ ecryptfs_printk(KERN_ERR,
|
|
+ "Error attempting to allocate memory\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
|
|
+ file->f_flags, current_cred());
|
|
+ if (IS_ERR(lower_file)) {
|
|
+ printk(KERN_ERR "%s: Error attempting to initialize "
|
|
+ "the lower file for the dentry with name "
|
|
+ "[%pd]; rc = [%ld]\n", __func__,
|
|
+ ecryptfs_dentry, PTR_ERR(lower_file));
|
|
+ kmem_cache_free(ecryptfs_file_info_cache, file_info);
|
|
+ return PTR_ERR(lower_file);
|
|
+ }
|
|
+ ecryptfs_set_file_lower(file, lower_file);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int ecryptfs_flush(struct file *file, fl_owner_t td)
|
|
{
|
|
struct file *lower_file = ecryptfs_file_to_lower(file);
|
|
@@ -280,6 +310,19 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
|
|
return 0;
|
|
}
|
|
|
|
+static int ecryptfs_dir_release(struct inode *inode, struct file *file)
|
|
+{
|
|
+ fput(ecryptfs_file_to_lower(file));
|
|
+ kmem_cache_free(ecryptfs_file_info_cache,
|
|
+ ecryptfs_file_to_private(file));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence)
|
|
+{
|
|
+ return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
|
|
+}
|
|
+
|
|
static int
|
|
ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|
{
|
|
@@ -359,20 +402,16 @@ const struct file_operations ecryptfs_dir_fops = {
|
|
#ifdef CONFIG_COMPAT
|
|
.compat_ioctl = ecryptfs_compat_ioctl,
|
|
#endif
|
|
- .open = ecryptfs_open,
|
|
- .flush = ecryptfs_flush,
|
|
- .release = ecryptfs_release,
|
|
+ .open = ecryptfs_dir_open,
|
|
+ .release = ecryptfs_dir_release,
|
|
.fsync = ecryptfs_fsync,
|
|
- .fasync = ecryptfs_fasync,
|
|
- .splice_read = generic_file_splice_read,
|
|
- .llseek = default_llseek,
|
|
+ .llseek = ecryptfs_dir_llseek,
|
|
};
|
|
|
|
const struct file_operations ecryptfs_main_fops = {
|
|
.llseek = generic_file_llseek,
|
|
.read_iter = ecryptfs_read_update_atime,
|
|
.write_iter = generic_file_write_iter,
|
|
- .iterate = ecryptfs_readdir,
|
|
.unlocked_ioctl = ecryptfs_unlocked_ioctl,
|
|
#ifdef CONFIG_COMPAT
|
|
.compat_ioctl = ecryptfs_compat_ioctl,
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index 9a5ad0f0d3ed..28702932a908 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -51,25 +51,31 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
|
|
struct ext4_inode_info *ei)
|
|
{
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
- __u16 csum_lo;
|
|
- __u16 csum_hi = 0;
|
|
__u32 csum;
|
|
+ __u16 dummy_csum = 0;
|
|
+ int offset = offsetof(struct ext4_inode, i_checksum_lo);
|
|
+ unsigned int csum_size = sizeof(dummy_csum);
|
|
|
|
- csum_lo = le16_to_cpu(raw->i_checksum_lo);
|
|
- raw->i_checksum_lo = 0;
|
|
- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
|
|
- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
|
|
- csum_hi = le16_to_cpu(raw->i_checksum_hi);
|
|
- raw->i_checksum_hi = 0;
|
|
- }
|
|
+ csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
|
|
+ offset += csum_size;
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
|
|
+ EXT4_GOOD_OLD_INODE_SIZE - offset);
|
|
|
|
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
|
|
- EXT4_INODE_SIZE(inode->i_sb));
|
|
-
|
|
- raw->i_checksum_lo = cpu_to_le16(csum_lo);
|
|
- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
|
|
- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
|
|
- raw->i_checksum_hi = cpu_to_le16(csum_hi);
|
|
+ if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
|
|
+ offset = offsetof(struct ext4_inode, i_checksum_hi);
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw +
|
|
+ EXT4_GOOD_OLD_INODE_SIZE,
|
|
+ offset - EXT4_GOOD_OLD_INODE_SIZE);
|
|
+ if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
|
|
+ csum_size);
|
|
+ offset += csum_size;
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
|
|
+ EXT4_INODE_SIZE(inode->i_sb) -
|
|
+ offset);
|
|
+ }
|
|
+ }
|
|
|
|
return csum;
|
|
}
|
|
@@ -5186,8 +5192,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
|
|
sbi->s_want_extra_isize,
|
|
iloc, handle);
|
|
if (ret) {
|
|
- ext4_set_inode_state(inode,
|
|
- EXT4_STATE_NO_EXPAND);
|
|
if (mnt_count !=
|
|
le16_to_cpu(sbi->s_es->s_mnt_count)) {
|
|
ext4_warning(inode->i_sb,
|
|
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
|
|
index 91bf36f22dbf..38eb0c8e43b9 100644
|
|
--- a/fs/ext4/namei.c
|
|
+++ b/fs/ext4/namei.c
|
|
@@ -420,15 +420,14 @@ static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
__u32 csum;
|
|
- __le32 save_csum;
|
|
int size;
|
|
+ __u32 dummy_csum = 0;
|
|
+ int offset = offsetof(struct dx_tail, dt_checksum);
|
|
|
|
size = count_offset + (count * sizeof(struct dx_entry));
|
|
- save_csum = t->dt_checksum;
|
|
- t->dt_checksum = 0;
|
|
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
|
|
- csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
|
|
- t->dt_checksum = save_csum;
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)t, offset);
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
|
|
|
|
return cpu_to_le32(csum);
|
|
}
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index c542ebcf7a92..5bab28caa9d4 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -2030,23 +2030,25 @@ failed:
|
|
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
|
|
struct ext4_group_desc *gdp)
|
|
{
|
|
- int offset;
|
|
+ int offset = offsetof(struct ext4_group_desc, bg_checksum);
|
|
__u16 crc = 0;
|
|
__le32 le_group = cpu_to_le32(block_group);
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
|
|
if (ext4_has_metadata_csum(sbi->s_sb)) {
|
|
/* Use new metadata_csum algorithm */
|
|
- __le16 save_csum;
|
|
__u32 csum32;
|
|
+ __u16 dummy_csum = 0;
|
|
|
|
- save_csum = gdp->bg_checksum;
|
|
- gdp->bg_checksum = 0;
|
|
csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
|
|
sizeof(le_group));
|
|
- csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
|
|
- sbi->s_desc_size);
|
|
- gdp->bg_checksum = save_csum;
|
|
+ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
|
|
+ csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
|
|
+ sizeof(dummy_csum));
|
|
+ offset += sizeof(dummy_csum);
|
|
+ if (offset < sbi->s_desc_size)
|
|
+ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
|
|
+ sbi->s_desc_size - offset);
|
|
|
|
crc = csum32 & 0xFFFF;
|
|
goto out;
|
|
@@ -2056,8 +2058,6 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
|
|
if (!ext4_has_feature_gdt_csum(sb))
|
|
return 0;
|
|
|
|
- offset = offsetof(struct ext4_group_desc, bg_checksum);
|
|
-
|
|
crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
|
|
crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
|
|
crc = crc16(crc, (__u8 *)gdp, offset);
|
|
@@ -2093,6 +2093,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
|
|
|
|
/* Called at mount-time, super-block is locked */
|
|
static int ext4_check_descriptors(struct super_block *sb,
|
|
+ ext4_fsblk_t sb_block,
|
|
ext4_group_t *first_not_zeroed)
|
|
{
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
@@ -2123,6 +2124,11 @@ static int ext4_check_descriptors(struct super_block *sb,
|
|
grp = i;
|
|
|
|
block_bitmap = ext4_block_bitmap(sb, gdp);
|
|
+ if (block_bitmap == sb_block) {
|
|
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
|
+ "Block bitmap for group %u overlaps "
|
|
+ "superblock", i);
|
|
+ }
|
|
if (block_bitmap < first_block || block_bitmap > last_block) {
|
|
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
|
"Block bitmap for group %u not in group "
|
|
@@ -2130,6 +2136,11 @@ static int ext4_check_descriptors(struct super_block *sb,
|
|
return 0;
|
|
}
|
|
inode_bitmap = ext4_inode_bitmap(sb, gdp);
|
|
+ if (inode_bitmap == sb_block) {
|
|
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
|
+ "Inode bitmap for group %u overlaps "
|
|
+ "superblock", i);
|
|
+ }
|
|
if (inode_bitmap < first_block || inode_bitmap > last_block) {
|
|
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
|
"Inode bitmap for group %u not in group "
|
|
@@ -2137,6 +2148,11 @@ static int ext4_check_descriptors(struct super_block *sb,
|
|
return 0;
|
|
}
|
|
inode_table = ext4_inode_table(sb, gdp);
|
|
+ if (inode_table == sb_block) {
|
|
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
|
+ "Inode table for group %u overlaps "
|
|
+ "superblock", i);
|
|
+ }
|
|
if (inode_table < first_block ||
|
|
inode_table + sbi->s_itb_per_group - 1 > last_block) {
|
|
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
|
@@ -3640,7 +3656,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|
goto failed_mount2;
|
|
}
|
|
}
|
|
- if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
|
|
+ if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
|
|
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
|
|
ret = -EFSCORRUPTED;
|
|
goto failed_mount2;
|
|
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
|
|
index 6b6b3e751f8c..263002f0389d 100644
|
|
--- a/fs/ext4/xattr.c
|
|
+++ b/fs/ext4/xattr.c
|
|
@@ -123,17 +123,18 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
|
|
{
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
__u32 csum;
|
|
- __le32 save_csum;
|
|
__le64 dsk_block_nr = cpu_to_le64(block_nr);
|
|
+ __u32 dummy_csum = 0;
|
|
+ int offset = offsetof(struct ext4_xattr_header, h_checksum);
|
|
|
|
- save_csum = hdr->h_checksum;
|
|
- hdr->h_checksum = 0;
|
|
csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
|
|
sizeof(dsk_block_nr));
|
|
- csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
|
|
- EXT4_BLOCK_SIZE(inode->i_sb));
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
|
|
+ offset += sizeof(dummy_csum);
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
|
|
+ EXT4_BLOCK_SIZE(inode->i_sb) - offset);
|
|
|
|
- hdr->h_checksum = save_csum;
|
|
return cpu_to_le32(csum);
|
|
}
|
|
|
|
@@ -1264,15 +1265,19 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
|
|
size_t min_offs, free;
|
|
int total_ino;
|
|
void *base, *start, *end;
|
|
- int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
|
|
+ int error = 0, tried_min_extra_isize = 0;
|
|
int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
|
|
+ int isize_diff; /* How much do we need to grow i_extra_isize */
|
|
|
|
down_write(&EXT4_I(inode)->xattr_sem);
|
|
+ /*
|
|
+ * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
|
|
+ */
|
|
+ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
|
retry:
|
|
- if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
|
|
- up_write(&EXT4_I(inode)->xattr_sem);
|
|
- return 0;
|
|
- }
|
|
+ isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
|
|
+ if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
|
|
+ goto out;
|
|
|
|
header = IHDR(inode, raw_inode);
|
|
entry = IFIRST(header);
|
|
@@ -1289,7 +1294,7 @@ retry:
|
|
total_ino = sizeof(struct ext4_xattr_ibody_header);
|
|
|
|
free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
|
|
- if (free >= new_extra_isize) {
|
|
+ if (free >= isize_diff) {
|
|
entry = IFIRST(header);
|
|
ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
|
|
- new_extra_isize, (void *)raw_inode +
|
|
@@ -1297,8 +1302,7 @@ retry:
|
|
(void *)header, total_ino,
|
|
inode->i_sb->s_blocksize);
|
|
EXT4_I(inode)->i_extra_isize = new_extra_isize;
|
|
- error = 0;
|
|
- goto cleanup;
|
|
+ goto out;
|
|
}
|
|
|
|
/*
|
|
@@ -1321,7 +1325,7 @@ retry:
|
|
end = bh->b_data + bh->b_size;
|
|
min_offs = end - base;
|
|
free = ext4_xattr_free_space(first, &min_offs, base, NULL);
|
|
- if (free < new_extra_isize) {
|
|
+ if (free < isize_diff) {
|
|
if (!tried_min_extra_isize && s_min_extra_isize) {
|
|
tried_min_extra_isize++;
|
|
new_extra_isize = s_min_extra_isize;
|
|
@@ -1335,7 +1339,7 @@ retry:
|
|
free = inode->i_sb->s_blocksize;
|
|
}
|
|
|
|
- while (new_extra_isize > 0) {
|
|
+ while (isize_diff > 0) {
|
|
size_t offs, size, entry_size;
|
|
struct ext4_xattr_entry *small_entry = NULL;
|
|
struct ext4_xattr_info i = {
|
|
@@ -1366,7 +1370,7 @@ retry:
|
|
EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
|
|
EXT4_XATTR_LEN(last->e_name_len);
|
|
if (total_size <= free && total_size < min_total_size) {
|
|
- if (total_size < new_extra_isize) {
|
|
+ if (total_size < isize_diff) {
|
|
small_entry = last;
|
|
} else {
|
|
entry = last;
|
|
@@ -1421,22 +1425,22 @@ retry:
|
|
error = ext4_xattr_ibody_set(handle, inode, &i, is);
|
|
if (error)
|
|
goto cleanup;
|
|
+ total_ino -= entry_size;
|
|
|
|
entry = IFIRST(header);
|
|
- if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
|
|
- shift_bytes = new_extra_isize;
|
|
+ if (entry_size + EXT4_XATTR_SIZE(size) >= isize_diff)
|
|
+ shift_bytes = isize_diff;
|
|
else
|
|
- shift_bytes = entry_size + size;
|
|
+ shift_bytes = entry_size + EXT4_XATTR_SIZE(size);
|
|
/* Adjust the offsets and shift the remaining entries ahead */
|
|
- ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
|
|
- shift_bytes, (void *)raw_inode +
|
|
- EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
|
|
- (void *)header, total_ino - entry_size,
|
|
- inode->i_sb->s_blocksize);
|
|
+ ext4_xattr_shift_entries(entry, -shift_bytes,
|
|
+ (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
|
|
+ EXT4_I(inode)->i_extra_isize + shift_bytes,
|
|
+ (void *)header, total_ino, inode->i_sb->s_blocksize);
|
|
|
|
- extra_isize += shift_bytes;
|
|
- new_extra_isize -= shift_bytes;
|
|
- EXT4_I(inode)->i_extra_isize = extra_isize;
|
|
+ isize_diff -= shift_bytes;
|
|
+ EXT4_I(inode)->i_extra_isize += shift_bytes;
|
|
+ header = IHDR(inode, raw_inode);
|
|
|
|
i.name = b_entry_name;
|
|
i.value = buffer;
|
|
@@ -1458,6 +1462,8 @@ retry:
|
|
kfree(bs);
|
|
}
|
|
brelse(bh);
|
|
+out:
|
|
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
|
up_write(&EXT4_I(inode)->xattr_sem);
|
|
return 0;
|
|
|
|
@@ -1469,6 +1475,10 @@ cleanup:
|
|
kfree(is);
|
|
kfree(bs);
|
|
brelse(bh);
|
|
+ /*
|
|
+ * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
|
|
+ * size expansion failed.
|
|
+ */
|
|
up_write(&EXT4_I(inode)->xattr_sem);
|
|
return error;
|
|
}
|
|
diff --git a/fs/namei.c b/fs/namei.c
|
|
index 209ca7737cb2..0b0acba72a71 100644
|
|
--- a/fs/namei.c
|
|
+++ b/fs/namei.c
|
|
@@ -887,6 +887,7 @@ static inline int may_follow_link(struct nameidata *nd)
|
|
{
|
|
const struct inode *inode;
|
|
const struct inode *parent;
|
|
+ kuid_t puid;
|
|
|
|
if (!sysctl_protected_symlinks)
|
|
return 0;
|
|
@@ -902,7 +903,8 @@ static inline int may_follow_link(struct nameidata *nd)
|
|
return 0;
|
|
|
|
/* Allowed if parent directory and link owner match. */
|
|
- if (uid_eq(parent->i_uid, inode->i_uid))
|
|
+ puid = parent->i_uid;
|
|
+ if (uid_valid(puid) && uid_eq(puid, inode->i_uid))
|
|
return 0;
|
|
|
|
if (nd->flags & LOOKUP_RCU)
|
|
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
|
|
index eff6319d5037..9e52609cd683 100644
|
|
--- a/fs/overlayfs/copy_up.c
|
|
+++ b/fs/overlayfs/copy_up.c
|
|
@@ -48,6 +48,8 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
|
|
}
|
|
|
|
for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
|
|
+ if (ovl_is_private_xattr(name))
|
|
+ continue;
|
|
retry:
|
|
size = vfs_getxattr(old, name, value, value_size);
|
|
if (size == -ERANGE)
|
|
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
|
|
index 4f729ffff75d..220b04f04523 100644
|
|
--- a/fs/overlayfs/inode.c
|
|
+++ b/fs/overlayfs/inode.c
|
|
@@ -219,7 +219,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
|
|
}
|
|
|
|
|
|
-static bool ovl_is_private_xattr(const char *name)
|
|
+bool ovl_is_private_xattr(const char *name)
|
|
{
|
|
return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
|
|
}
|
|
@@ -277,7 +277,8 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
|
|
struct path realpath;
|
|
enum ovl_path_type type = ovl_path_real(dentry, &realpath);
|
|
ssize_t res;
|
|
- int off;
|
|
+ size_t len;
|
|
+ char *s;
|
|
|
|
res = vfs_listxattr(realpath.dentry, list, size);
|
|
if (res <= 0 || size == 0)
|
|
@@ -287,17 +288,19 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
|
|
return res;
|
|
|
|
/* filter out private xattrs */
|
|
- for (off = 0; off < res;) {
|
|
- char *s = list + off;
|
|
- size_t slen = strlen(s) + 1;
|
|
+ for (s = list, len = res; len;) {
|
|
+ size_t slen = strnlen(s, len) + 1;
|
|
|
|
- BUG_ON(off + slen > res);
|
|
+ /* underlying fs providing us with an broken xattr list? */
|
|
+ if (WARN_ON(slen > len))
|
|
+ return -EIO;
|
|
|
|
+ len -= slen;
|
|
if (ovl_is_private_xattr(s)) {
|
|
res -= slen;
|
|
- memmove(s, s + slen, res - off);
|
|
+ memmove(s, s + slen, len);
|
|
} else {
|
|
- off += slen;
|
|
+ s += slen;
|
|
}
|
|
}
|
|
|
|
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
|
|
index 735e1d49b301..c319d5eaabcf 100644
|
|
--- a/fs/overlayfs/overlayfs.h
|
|
+++ b/fs/overlayfs/overlayfs.h
|
|
@@ -174,6 +174,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
|
|
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
|
|
int ovl_removexattr(struct dentry *dentry, const char *name);
|
|
struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
|
|
+bool ovl_is_private_xattr(const char *name);
|
|
|
|
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
|
|
struct ovl_entry *oe);
|
|
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
|
|
index 70a7bbe199d0..d70208c0de84 100644
|
|
--- a/fs/overlayfs/super.c
|
|
+++ b/fs/overlayfs/super.c
|
|
@@ -763,6 +763,10 @@ retry:
|
|
struct kstat stat = {
|
|
.mode = S_IFDIR | 0,
|
|
};
|
|
+ struct iattr attr = {
|
|
+ .ia_valid = ATTR_MODE,
|
|
+ .ia_mode = stat.mode,
|
|
+ };
|
|
|
|
if (work->d_inode) {
|
|
err = -EEXIST;
|
|
@@ -778,6 +782,21 @@ retry:
|
|
err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
|
|
if (err)
|
|
goto out_dput;
|
|
+
|
|
+ err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
|
|
+ if (err && err != -ENODATA && err != -EOPNOTSUPP)
|
|
+ goto out_dput;
|
|
+
|
|
+ err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
|
|
+ if (err && err != -ENODATA && err != -EOPNOTSUPP)
|
|
+ goto out_dput;
|
|
+
|
|
+ /* Clear any inherited mode bits */
|
|
+ inode_lock(work->d_inode);
|
|
+ err = notify_change(work, &attr, NULL);
|
|
+ inode_unlock(work->d_inode);
|
|
+ if (err)
|
|
+ goto out_dput;
|
|
}
|
|
out_unlock:
|
|
mutex_unlock(&dir->i_mutex);
|
|
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
|
|
index f6478301db00..d598b9c809c1 100644
|
|
--- a/fs/proc/task_mmu.c
|
|
+++ b/fs/proc/task_mmu.c
|
|
@@ -248,23 +248,29 @@ static int do_maps_open(struct inode *inode, struct file *file,
|
|
sizeof(struct proc_maps_private));
|
|
}
|
|
|
|
-static pid_t pid_of_stack(struct proc_maps_private *priv,
|
|
- struct vm_area_struct *vma, bool is_pid)
|
|
+/*
|
|
+ * Indicate if the VMA is a stack for the given task; for
|
|
+ * /proc/PID/maps that is the stack of the main task.
|
|
+ */
|
|
+static int is_stack(struct proc_maps_private *priv,
|
|
+ struct vm_area_struct *vma, int is_pid)
|
|
{
|
|
- struct inode *inode = priv->inode;
|
|
- struct task_struct *task;
|
|
- pid_t ret = 0;
|
|
+ int stack = 0;
|
|
+
|
|
+ if (is_pid) {
|
|
+ stack = vma->vm_start <= vma->vm_mm->start_stack &&
|
|
+ vma->vm_end >= vma->vm_mm->start_stack;
|
|
+ } else {
|
|
+ struct inode *inode = priv->inode;
|
|
+ struct task_struct *task;
|
|
|
|
- rcu_read_lock();
|
|
- task = pid_task(proc_pid(inode), PIDTYPE_PID);
|
|
- if (task) {
|
|
- task = task_of_stack(task, vma, is_pid);
|
|
+ rcu_read_lock();
|
|
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
|
|
if (task)
|
|
- ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
|
|
+ stack = vma_is_stack_for_task(vma, task);
|
|
+ rcu_read_unlock();
|
|
}
|
|
- rcu_read_unlock();
|
|
-
|
|
- return ret;
|
|
+ return stack;
|
|
}
|
|
|
|
static void
|
|
@@ -324,8 +330,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
|
|
|
|
name = arch_vma_name(vma);
|
|
if (!name) {
|
|
- pid_t tid;
|
|
-
|
|
if (!mm) {
|
|
name = "[vdso]";
|
|
goto done;
|
|
@@ -391,22 +391,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
|
|
goto done;
|
|
}
|
|
|
|
- tid = pid_of_stack(priv, vma, is_pid);
|
|
- if (tid != 0) {
|
|
- /*
|
|
- * Thread stack in /proc/PID/task/TID/maps or
|
|
- * the main process stack.
|
|
- */
|
|
- if (!is_pid || (vma->vm_start <= mm->start_stack &&
|
|
- vma->vm_end >= mm->start_stack)) {
|
|
- name = "[stack]";
|
|
- } else {
|
|
- /* Thread stack in /proc/PID/maps */
|
|
- seq_pad(m, ' ');
|
|
- seq_printf(m, "[stack:%d]", tid);
|
|
- }
|
|
- goto done;
|
|
- }
|
|
+ if (is_stack(priv, vma, is_pid))
|
|
+ name = "[stack]";
|
|
|
|
if (vma_get_anon_name(vma)) {
|
|
seq_pad(m, ' ');
|
|
|
|
@@ -1566,19 +1557,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
|
|
seq_file_path(m, file, "\n\t= ");
|
|
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
|
|
seq_puts(m, " heap");
|
|
- } else {
|
|
- pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
|
|
- if (tid != 0) {
|
|
- /*
|
|
- * Thread stack in /proc/PID/task/TID/maps or
|
|
- * the main process stack.
|
|
- */
|
|
- if (!is_pid || (vma->vm_start <= mm->start_stack &&
|
|
- vma->vm_end >= mm->start_stack))
|
|
- seq_puts(m, " stack");
|
|
- else
|
|
- seq_printf(m, " stack:%d", tid);
|
|
- }
|
|
+ } else if (is_stack(proc_priv, vma, is_pid)) {
|
|
+ seq_puts(m, " stack");
|
|
}
|
|
|
|
if (is_vm_hugetlb_page(vma))
|
|
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
|
|
index e0d64c92e4f6..faacb0c0d857 100644
|
|
--- a/fs/proc/task_nommu.c
|
|
+++ b/fs/proc/task_nommu.c
|
|
@@ -123,23 +123,26 @@ unsigned long task_statm(struct mm_struct *mm,
|
|
return size;
|
|
}
|
|
|
|
-static pid_t pid_of_stack(struct proc_maps_private *priv,
|
|
- struct vm_area_struct *vma, bool is_pid)
|
|
+static int is_stack(struct proc_maps_private *priv,
|
|
+ struct vm_area_struct *vma, int is_pid)
|
|
{
|
|
- struct inode *inode = priv->inode;
|
|
- struct task_struct *task;
|
|
- pid_t ret = 0;
|
|
-
|
|
- rcu_read_lock();
|
|
- task = pid_task(proc_pid(inode), PIDTYPE_PID);
|
|
- if (task) {
|
|
- task = task_of_stack(task, vma, is_pid);
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
+ int stack = 0;
|
|
+
|
|
+ if (is_pid) {
|
|
+ stack = vma->vm_start <= mm->start_stack &&
|
|
+ vma->vm_end >= mm->start_stack;
|
|
+ } else {
|
|
+ struct inode *inode = priv->inode;
|
|
+ struct task_struct *task;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
|
|
if (task)
|
|
- ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
|
|
+ stack = vma_is_stack_for_task(vma, task);
|
|
+ rcu_read_unlock();
|
|
}
|
|
- rcu_read_unlock();
|
|
-
|
|
- return ret;
|
|
+ return stack;
|
|
}
|
|
|
|
/*
|
|
@@ -181,21 +184,9 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
|
|
if (file) {
|
|
seq_pad(m, ' ');
|
|
seq_file_path(m, file, "");
|
|
- } else if (mm) {
|
|
- pid_t tid = pid_of_stack(priv, vma, is_pid);
|
|
-
|
|
- if (tid != 0) {
|
|
- seq_pad(m, ' ');
|
|
- /*
|
|
- * Thread stack in /proc/PID/task/TID/maps or
|
|
- * the main process stack.
|
|
- */
|
|
- if (!is_pid || (vma->vm_start <= mm->start_stack &&
|
|
- vma->vm_end >= mm->start_stack))
|
|
- seq_printf(m, "[stack]");
|
|
- else
|
|
- seq_printf(m, "[stack:%d]", tid);
|
|
- }
|
|
+ } else if (mm && is_stack(priv, vma, is_pid)) {
|
|
+ seq_pad(m, ' ');
|
|
+ seq_printf(m, "[stack]");
|
|
}
|
|
|
|
seq_putc(m, '\n');
|
|
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
|
|
index b45345d701e7..51157da3f76e 100644
|
|
--- a/fs/ubifs/tnc_commit.c
|
|
+++ b/fs/ubifs/tnc_commit.c
|
|
@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
|
|
|
|
p = c->gap_lebs;
|
|
do {
|
|
- ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
|
|
+ ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
|
|
written = layout_leb_in_gaps(c, p);
|
|
if (written < 0) {
|
|
err = written;
|
|
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
|
|
index 8a53eaa349f4..7088be6afb3c 100644
|
|
--- a/fs/xfs/libxfs/xfs_sb.c
|
|
+++ b/fs/xfs/libxfs/xfs_sb.c
|
|
@@ -581,7 +581,8 @@ xfs_sb_verify(
|
|
* Only check the in progress field for the primary superblock as
|
|
* mkfs.xfs doesn't clear it from secondary superblocks.
|
|
*/
|
|
- return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
|
|
+ return xfs_mount_validate_sb(mp, &sb,
|
|
+ bp->b_maps[0].bm_bn == XFS_SB_DADDR,
|
|
check_version);
|
|
}
|
|
|
|
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
|
|
index 17c445612e01..2cdc723d750f 100644
|
|
--- a/include/drm/i915_pciids.h
|
|
+++ b/include/drm/i915_pciids.h
|
|
@@ -277,7 +277,9 @@
|
|
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
|
|
|
|
#define INTEL_SKL_GT3_IDS(info) \
|
|
+ INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
|
|
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
|
|
+ INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
|
|
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
|
|
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
|
|
|
|
@@ -289,6 +291,8 @@
|
|
#define INTEL_BXT_IDS(info) \
|
|
INTEL_VGA_DEVICE(0x0A84, info), \
|
|
INTEL_VGA_DEVICE(0x1A84, info), \
|
|
- INTEL_VGA_DEVICE(0x5A84, info)
|
|
+ INTEL_VGA_DEVICE(0x1A85, info), \
|
|
+ INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
|
|
+ INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
|
|
|
|
#endif /* _I915_PCIIDS_H */
|
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
|
index 168755791ec8..fe14382f9664 100644
|
|
--- a/include/linux/blkdev.h
|
|
+++ b/include/linux/blkdev.h
|
|
@@ -890,7 +890,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
|
|
{
|
|
struct request_queue *q = rq->q;
|
|
|
|
- if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
|
|
+ if (unlikely(rq->cmd_type != REQ_TYPE_FS))
|
|
return q->limits.max_hw_sectors;
|
|
|
|
if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
|
|
diff --git a/include/linux/capability.h b/include/linux/capability.h
|
|
index af9f0b9e80e6..5f8249d378a2 100644
|
|
--- a/include/linux/capability.h
|
|
+++ b/include/linux/capability.h
|
|
@@ -214,6 +214,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
|
|
struct user_namespace *ns, int cap);
|
|
extern bool capable(int cap);
|
|
extern bool ns_capable(struct user_namespace *ns, int cap);
|
|
+extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
|
|
#else
|
|
static inline bool has_capability(struct task_struct *t, int cap)
|
|
{
|
|
@@ -241,6 +242,10 @@ static inline bool ns_capable(struct user_namespace *ns, int cap)
|
|
{
|
|
return true;
|
|
}
|
|
+static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
|
|
+{
|
|
+ return true;
|
|
+}
|
|
#endif /* CONFIG_MULTIUSER */
|
|
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
|
|
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
|
|
diff --git a/include/linux/fs.h b/include/linux/fs.h
|
|
index ab3d8d9bb3ef..0166582c4d78 100644
|
|
--- a/include/linux/fs.h
|
|
+++ b/include/linux/fs.h
|
|
@@ -710,6 +710,31 @@ enum inode_i_mutex_lock_class
|
|
I_MUTEX_PARENT2,
|
|
};
|
|
|
|
+static inline void inode_lock(struct inode *inode)
|
|
+{
|
|
+ mutex_lock(&inode->i_mutex);
|
|
+}
|
|
+
|
|
+static inline void inode_unlock(struct inode *inode)
|
|
+{
|
|
+ mutex_unlock(&inode->i_mutex);
|
|
+}
|
|
+
|
|
+static inline int inode_trylock(struct inode *inode)
|
|
+{
|
|
+ return mutex_trylock(&inode->i_mutex);
|
|
+}
|
|
+
|
|
+static inline int inode_is_locked(struct inode *inode)
|
|
+{
|
|
+ return mutex_is_locked(&inode->i_mutex);
|
|
+}
|
|
+
|
|
+static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
|
|
+{
|
|
+ mutex_lock_nested(&inode->i_mutex, subclass);
|
|
+}
|
|
+
|
|
void lock_two_nondirectories(struct inode *, struct inode*);
|
|
void unlock_two_nondirectories(struct inode *, struct inode*);
|
|
|
|
@@ -3029,8 +3054,8 @@ static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
|
|
}
|
|
static inline bool dir_relax(struct inode *inode)
|
|
{
|
|
- mutex_unlock(&inode->i_mutex);
|
|
- mutex_lock(&inode->i_mutex);
|
|
+ inode_unlock(inode);
|
|
+ inode_lock(inode);
|
|
return !IS_DEADDIR(inode);
|
|
}
|
|
|
|
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
|
|
index 034117b3be5f..f09648d14694 100644
|
|
--- a/include/linux/lightnvm.h
|
|
+++ b/include/linux/lightnvm.h
|
|
@@ -58,8 +58,9 @@ enum {
|
|
/* Block Types */
|
|
NVM_BLK_T_FREE = 0x0,
|
|
NVM_BLK_T_BAD = 0x1,
|
|
- NVM_BLK_T_DEV = 0x2,
|
|
- NVM_BLK_T_HOST = 0x4,
|
|
+ NVM_BLK_T_GRWN_BAD = 0x2,
|
|
+ NVM_BLK_T_DEV = 0x4,
|
|
+ NVM_BLK_T_HOST = 0x8,
|
|
};
|
|
|
|
struct nvm_id_group {
|
|
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
|
index f24df9c0b9df..8a761248d01e 100644
|
|
--- a/include/linux/mm.h
|
|
+++ b/include/linux/mm.h
|
|
@@ -1311,8 +1311,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
|
|
!vma_growsup(vma->vm_next, addr);
|
|
}
|
|
|
|
-extern struct task_struct *task_of_stack(struct task_struct *task,
|
|
- struct vm_area_struct *vma, bool in_group);
|
|
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
|
|
|
|
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
unsigned long old_addr, struct vm_area_struct *new_vma,
|
|
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
|
|
index f9828a48f16a..6cdd50f7f52d 100644
|
|
--- a/include/linux/perf_event.h
|
|
+++ b/include/linux/perf_event.h
|
|
@@ -121,6 +121,7 @@ struct hw_perf_event {
|
|
struct { /* intel_cqm */
|
|
int cqm_state;
|
|
u32 cqm_rmid;
|
|
+ int is_group_event;
|
|
struct list_head cqm_events_entry;
|
|
struct list_head cqm_groups_entry;
|
|
struct list_head cqm_group_entry;
|
|
diff --git a/include/linux/time.h b/include/linux/time.h
|
|
index beebe3a02d43..297f09f23896 100644
|
|
--- a/include/linux/time.h
|
|
+++ b/include/linux/time.h
|
|
@@ -125,6 +125,32 @@ static inline bool timeval_valid(const struct timeval *tv)
|
|
|
|
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
|
|
|
|
+/*
|
|
+ * Validates if a timespec/timeval used to inject a time offset is valid.
|
|
+ * Offsets can be postive or negative. The value of the timeval/timespec
|
|
+ * is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must
|
|
+ * always be non-negative.
|
|
+ */
|
|
+static inline bool timeval_inject_offset_valid(const struct timeval *tv)
|
|
+{
|
|
+ /* We don't check the tv_sec as it can be positive or negative */
|
|
+
|
|
+ /* Can't have more microseconds then a second */
|
|
+ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static inline bool timespec_inject_offset_valid(const struct timespec *ts)
|
|
+{
|
|
+ /* We don't check the tv_sec as it can be positive or negative */
|
|
+
|
|
+ /* Can't have more nanoseconds then a second */
|
|
+ if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
#define CURRENT_TIME (current_kernel_time())
|
|
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
|
|
|
|
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
|
|
index e4c0a35d6417..e347b24ef9fb 100644
|
|
--- a/include/uapi/linux/hyperv.h
|
|
+++ b/include/uapi/linux/hyperv.h
|
|
@@ -313,6 +313,7 @@ enum hv_kvp_exchg_pool {
|
|
#define HV_INVALIDARG 0x80070057
|
|
#define HV_GUID_NOTFOUND 0x80041002
|
|
#define HV_ERROR_ALREADY_EXISTS 0x80070050
|
|
+#define HV_ERROR_DISK_FULL 0x80070070
|
|
|
|
#define ADDR_FAMILY_NONE 0x00
|
|
#define ADDR_FAMILY_IPV4 0x01
|
|
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
|
|
index a0e87d16b726..421d27413731 100644
|
|
--- a/include/uapi/linux/videodev2.h
|
|
+++ b/include/uapi/linux/videodev2.h
|
|
@@ -621,6 +621,9 @@ struct v4l2_pix_format {
|
|
#define V4L2_PIX_FMT_JPGL v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */
|
|
#define V4L2_PIX_FMT_SE401 v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */
|
|
#define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */
|
|
+#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
|
|
+#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
|
|
+#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
|
|
|
|
/* SDR formats - used only for Software Defined Radio devices */
|
|
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
|
|
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
|
|
index 831351b2e660..2302f3ce5f86 100644
|
|
--- a/include/uapi/scsi/cxlflash_ioctl.h
|
|
+++ b/include/uapi/scsi/cxlflash_ioctl.h
|
|
@@ -31,6 +31,16 @@ struct dk_cxlflash_hdr {
|
|
};
|
|
|
|
/*
|
|
+ * Return flag definitions available to all ioctls
|
|
+ *
|
|
+ * Similar to the input flags, these are grown from the bottom-up with the
|
|
+ * intention that ioctl-specific return flag definitions would grow from the
|
|
+ * top-down, allowing the two sets to co-exist. While not required/enforced
|
|
+ * at this time, this provides future flexibility.
|
|
+ */
|
|
+#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL
|
|
+
|
|
+/*
|
|
* Notes:
|
|
* -----
|
|
* The 'context_id' field of all ioctl structures contains the context
|
|
diff --git a/kernel/capability.c b/kernel/capability.c
|
|
index 45432b54d5c6..00411c82dac5 100644
|
|
--- a/kernel/capability.c
|
|
+++ b/kernel/capability.c
|
|
@@ -361,6 +361,24 @@ bool has_capability_noaudit(struct task_struct *t, int cap)
|
|
return has_ns_capability_noaudit(t, &init_user_ns, cap);
|
|
}
|
|
|
|
+static bool ns_capable_common(struct user_namespace *ns, int cap, bool audit)
|
|
+{
|
|
+ int capable;
|
|
+
|
|
+ if (unlikely(!cap_valid(cap))) {
|
|
+ pr_crit("capable() called with invalid cap=%u\n", cap);
|
|
+ BUG();
|
|
+ }
|
|
+
|
|
+ capable = audit ? security_capable(current_cred(), ns, cap) :
|
|
+ security_capable_noaudit(current_cred(), ns, cap);
|
|
+ if (capable == 0) {
|
|
+ current->flags |= PF_SUPERPRIV;
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
/**
|
|
* ns_capable - Determine if the current task has a superior capability in effect
|
|
* @ns: The usernamespace we want the capability in
|
|
@@ -374,19 +392,27 @@ bool has_capability_noaudit(struct task_struct *t, int cap)
|
|
*/
|
|
bool ns_capable(struct user_namespace *ns, int cap)
|
|
{
|
|
- if (unlikely(!cap_valid(cap))) {
|
|
- pr_crit("capable() called with invalid cap=%u\n", cap);
|
|
- BUG();
|
|
- }
|
|
-
|
|
- if (security_capable(current_cred(), ns, cap) == 0) {
|
|
- current->flags |= PF_SUPERPRIV;
|
|
- return true;
|
|
- }
|
|
- return false;
|
|
+ return ns_capable_common(ns, cap, true);
|
|
}
|
|
EXPORT_SYMBOL(ns_capable);
|
|
|
|
+/**
|
|
+ * ns_capable_noaudit - Determine if the current task has a superior capability
|
|
+ * (unaudited) in effect
|
|
+ * @ns: The usernamespace we want the capability in
|
|
+ * @cap: The capability to be tested for
|
|
+ *
|
|
+ * Return true if the current task has the given superior capability currently
|
|
+ * available for use, false if not.
|
|
+ *
|
|
+ * This sets PF_SUPERPRIV on the task if the capability is available on the
|
|
+ * assumption that it's about to be used.
|
|
+ */
|
|
+bool ns_capable_noaudit(struct user_namespace *ns, int cap)
|
|
+{
|
|
+ return ns_capable_common(ns, cap, false);
|
|
+}
|
|
+EXPORT_SYMBOL(ns_capable_noaudit);
|
|
|
|
/**
|
|
* capable - Determine if the current task has a superior capability in effect
|
|
diff --git a/kernel/cred.c b/kernel/cred.c
|
|
index 71179a09c1d6..ff8606f77d90 100644
|
|
--- a/kernel/cred.c
|
|
+++ b/kernel/cred.c
|
|
@@ -689,6 +689,8 @@ EXPORT_SYMBOL(set_security_override_from_ctx);
|
|
*/
|
|
int set_create_files_as(struct cred *new, struct inode *inode)
|
|
{
|
|
+ if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
|
|
+ return -EINVAL;
|
|
new->fsuid = inode->i_uid;
|
|
new->fsgid = inode->i_gid;
|
|
return security_kernel_create_files_as(new, inode);
|
|
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
|
|
index 7dad84913abf..da0c09ff6112 100644
|
|
--- a/kernel/events/uprobes.c
|
|
+++ b/kernel/events/uprobes.c
|
|
@@ -171,8 +171,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
|
|
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
|
err = -EAGAIN;
|
|
ptep = page_check_address(page, mm, addr, &ptl, 0);
|
|
- if (!ptep)
|
|
+ if (!ptep) {
|
|
+ mem_cgroup_cancel_charge(kpage, memcg);
|
|
goto unlock;
|
|
+ }
|
|
|
|
get_page(kpage);
|
|
page_add_new_anon_rmap(kpage, vma, addr);
|
|
@@ -199,7 +201,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
err = 0;
|
|
unlock:
|
|
- mem_cgroup_cancel_charge(kpage, memcg);
|
|
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
|
unlock_page(page);
|
|
return err;
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index 1155eac61687..c485cb156772 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -1369,7 +1369,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
p->real_start_time = ktime_get_boot_ns();
|
|
p->io_context = NULL;
|
|
p->audit_context = NULL;
|
|
- threadgroup_change_begin(current);
|
|
cgroup_fork(p);
|
|
#ifdef CONFIG_NUMA
|
|
p->mempolicy = mpol_dup(p->mempolicy);
|
|
@@ -1521,6 +1520,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
INIT_LIST_HEAD(&p->thread_group);
|
|
p->task_works = NULL;
|
|
|
|
+ threadgroup_change_begin(current);
|
|
/*
|
|
* Ensure that the cgroup subsystem policies allow the new process to be
|
|
* forked. It should be noted the the new process's css_set can be changed
|
|
@@ -1621,6 +1621,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
bad_fork_cancel_cgroup:
|
|
cgroup_cancel_fork(p, cgrp_ss_priv);
|
|
bad_fork_free_pid:
|
|
+ threadgroup_change_end(current);
|
|
if (pid != &init_struct_pid)
|
|
free_pid(pid);
|
|
bad_fork_cleanup_io:
|
|
@@ -1651,7 +1652,6 @@ bad_fork_cleanup_policy:
|
|
mpol_put(p->mempolicy);
|
|
bad_fork_cleanup_threadgroup_lock:
|
|
#endif
|
|
- threadgroup_change_end(current);
|
|
delayacct_tsk_free(p);
|
|
bad_fork_cleanup_count:
|
|
atomic_dec(&p->cred->user->processes);
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index b8b516c37bf1..8f258f437ac2 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -1191,8 +1191,6 @@ static void task_numa_assign(struct task_numa_env *env,
|
|
{
|
|
if (env->best_task)
|
|
put_task_struct(env->best_task);
|
|
- if (p)
|
|
- get_task_struct(p);
|
|
|
|
env->best_task = p;
|
|
env->best_imp = imp;
|
|
@@ -1260,20 +1258,30 @@ static void task_numa_compare(struct task_numa_env *env,
|
|
long imp = env->p->numa_group ? groupimp : taskimp;
|
|
long moveimp = imp;
|
|
int dist = env->dist;
|
|
+ bool assigned = false;
|
|
|
|
rcu_read_lock();
|
|
|
|
raw_spin_lock_irq(&dst_rq->lock);
|
|
cur = dst_rq->curr;
|
|
/*
|
|
- * No need to move the exiting task, and this ensures that ->curr
|
|
- * wasn't reaped and thus get_task_struct() in task_numa_assign()
|
|
- * is safe under RCU read lock.
|
|
- * Note that rcu_read_lock() itself can't protect from the final
|
|
- * put_task_struct() after the last schedule().
|
|
+ * No need to move the exiting task or idle task.
|
|
*/
|
|
if ((cur->flags & PF_EXITING) || is_idle_task(cur))
|
|
cur = NULL;
|
|
+ else {
|
|
+ /*
|
|
+ * The task_struct must be protected here to protect the
|
|
+ * p->numa_faults access in the task_weight since the
|
|
+ * numa_faults could already be freed in the following path:
|
|
+ * finish_task_switch()
|
|
+ * --> put_task_struct()
|
|
+ * --> __put_task_struct()
|
|
+ * --> task_numa_free()
|
|
+ */
|
|
+ get_task_struct(cur);
|
|
+ }
|
|
+
|
|
raw_spin_unlock_irq(&dst_rq->lock);
|
|
|
|
/*
|
|
@@ -1357,6 +1365,7 @@ balance:
|
|
*/
|
|
if (!load_too_imbalanced(src_load, dst_load, env)) {
|
|
imp = moveimp - 1;
|
|
+ put_task_struct(cur);
|
|
cur = NULL;
|
|
goto assign;
|
|
}
|
|
@@ -1382,9 +1391,16 @@ balance:
|
|
env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
|
|
|
|
assign:
|
|
+ assigned = true;
|
|
task_numa_assign(env, cur, imp);
|
|
unlock:
|
|
rcu_read_unlock();
|
|
+ /*
|
|
+ * The dst_rq->curr isn't assigned. The protection for task_struct is
|
|
+ * finished.
|
|
+ */
|
|
+ if (cur && !assigned)
|
|
+ put_task_struct(cur);
|
|
}
|
|
|
|
static void task_numa_find_cpu(struct task_numa_env *env,
|
|
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
|
|
index 1347882d131e..b98810d2f3b4 100644
|
|
--- a/kernel/time/clocksource.c
|
|
+++ b/kernel/time/clocksource.c
|
|
@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
|
|
/* cs is a watchdog. */
|
|
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
|
|
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
|
|
+ }
|
|
+ spin_unlock_irqrestore(&watchdog_lock, flags);
|
|
+}
|
|
+
|
|
+static void clocksource_select_watchdog(bool fallback)
|
|
+{
|
|
+ struct clocksource *cs, *old_wd;
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&watchdog_lock, flags);
|
|
+ /* save current watchdog */
|
|
+ old_wd = watchdog;
|
|
+ if (fallback)
|
|
+ watchdog = NULL;
|
|
+
|
|
+ list_for_each_entry(cs, &clocksource_list, list) {
|
|
+ /* cs is a clocksource to be watched. */
|
|
+ if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
|
|
+ continue;
|
|
+
|
|
+ /* Skip current if we were requested for a fallback. */
|
|
+ if (fallback && cs == old_wd)
|
|
+ continue;
|
|
+
|
|
/* Pick the best watchdog. */
|
|
- if (!watchdog || cs->rating > watchdog->rating) {
|
|
+ if (!watchdog || cs->rating > watchdog->rating)
|
|
watchdog = cs;
|
|
- /* Reset watchdog cycles */
|
|
- clocksource_reset_watchdog();
|
|
- }
|
|
}
|
|
+ /* If we failed to find a fallback restore the old one. */
|
|
+ if (!watchdog)
|
|
+ watchdog = old_wd;
|
|
+
|
|
+ /* If we changed the watchdog we need to reset cycles. */
|
|
+ if (watchdog != old_wd)
|
|
+ clocksource_reset_watchdog();
|
|
+
|
|
/* Check if the watchdog timer needs to be started. */
|
|
clocksource_start_watchdog();
|
|
spin_unlock_irqrestore(&watchdog_lock, flags);
|
|
@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
|
|
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
|
|
}
|
|
|
|
+static void clocksource_select_watchdog(bool fallback) { }
|
|
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
|
|
static inline void clocksource_resume_watchdog(void) { }
|
|
static inline int __clocksource_watchdog_kthread(void) { return 0; }
|
|
@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
|
|
clocksource_enqueue(cs);
|
|
clocksource_enqueue_watchdog(cs);
|
|
clocksource_select();
|
|
+ clocksource_select_watchdog(false);
|
|
mutex_unlock(&clocksource_mutex);
|
|
return 0;
|
|
}
|
|
@@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
|
|
mutex_lock(&clocksource_mutex);
|
|
__clocksource_change_rating(cs, rating);
|
|
clocksource_select();
|
|
+ clocksource_select_watchdog(false);
|
|
mutex_unlock(&clocksource_mutex);
|
|
}
|
|
EXPORT_SYMBOL(clocksource_change_rating);
|
|
@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating);
|
|
*/
|
|
static int clocksource_unbind(struct clocksource *cs)
|
|
{
|
|
- /*
|
|
- * I really can't convince myself to support this on hardware
|
|
- * designed by lobotomized monkeys.
|
|
- */
|
|
- if (clocksource_is_watchdog(cs))
|
|
- return -EBUSY;
|
|
+ if (clocksource_is_watchdog(cs)) {
|
|
+ /* Select and try to install a replacement watchdog. */
|
|
+ clocksource_select_watchdog(true);
|
|
+ if (clocksource_is_watchdog(cs))
|
|
+ return -EBUSY;
|
|
+ }
|
|
|
|
if (cs == curr_clocksource) {
|
|
/* Select and try to install a replacement clock source */
|
|
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
|
index fa909f9fd559..17f7bcff1e02 100644
|
|
--- a/kernel/time/hrtimer.c
|
|
+++ b/kernel/time/hrtimer.c
|
|
@@ -94,6 +94,9 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
|
|
};
|
|
|
|
static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
|
|
+ /* Make sure we catch unsupported clockids */
|
|
+ [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
|
|
+
|
|
[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
|
|
[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
|
|
[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
|
|
@@ -102,7 +105,9 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
|
|
|
|
static inline int hrtimer_clockid_to_base(clockid_t clock_id)
|
|
{
|
|
- return hrtimer_clock_to_base_table[clock_id];
|
|
+ int base = hrtimer_clock_to_base_table[clock_id];
|
|
+ BUG_ON(base == HRTIMER_MAX_CLOCK_BASES);
|
|
+ return base;
|
|
}
|
|
|
|
/*
|
|
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
|
|
index 149cc8086aea..ab861771e37f 100644
|
|
--- a/kernel/time/ntp.c
|
|
+++ b/kernel/time/ntp.c
|
|
@@ -674,8 +674,24 @@ int ntp_validate_timex(struct timex *txc)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
|
|
- return -EPERM;
|
|
+ if (txc->modes & ADJ_SETOFFSET) {
|
|
+ /* In order to inject time, you gotta be super-user! */
|
|
+ if (!capable(CAP_SYS_TIME))
|
|
+ return -EPERM;
|
|
+
|
|
+ if (txc->modes & ADJ_NANO) {
|
|
+ struct timespec ts;
|
|
+
|
|
+ ts.tv_sec = txc->time.tv_sec;
|
|
+ ts.tv_nsec = txc->time.tv_usec;
|
|
+ if (!timespec_inject_offset_valid(&ts))
|
|
+ return -EINVAL;
|
|
+
|
|
+ } else {
|
|
+ if (!timeval_inject_offset_valid(&txc->time))
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
|
|
/*
|
|
* Check for potential multiplication overflows that can
|
|
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
|
|
index 99188ee5d9d0..4ff237dbc006 100644
|
|
--- a/kernel/time/timekeeping.c
|
|
+++ b/kernel/time/timekeeping.c
|
|
@@ -383,7 +383,10 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
|
do {
|
|
seq = raw_read_seqcount_latch(&tkf->seq);
|
|
tkr = tkf->base + (seq & 0x01);
|
|
- now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
|
|
+ now = ktime_to_ns(tkr->base);
|
|
+
|
|
+ now += clocksource_delta(tkr->read(tkr->clock),
|
|
+ tkr->cycle_last, tkr->mask);
|
|
} while (read_seqcount_retry(&tkf->seq, seq));
|
|
|
|
return now;
|
|
@@ -958,7 +961,7 @@ int timekeeping_inject_offset(struct timespec *ts)
|
|
struct timespec64 ts64, tmp;
|
|
int ret = 0;
|
|
|
|
- if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
|
|
+ if (!timespec_inject_offset_valid(ts))
|
|
return -EINVAL;
|
|
|
|
ts64 = timespec_to_timespec64(*ts);
|
|
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
|
|
index f6bd65236712..107310a6f36f 100644
|
|
--- a/kernel/time/timekeeping_debug.c
|
|
+++ b/kernel/time/timekeeping_debug.c
|
|
@@ -23,7 +23,9 @@
|
|
|
|
#include "timekeeping_internal.h"
|
|
|
|
-static unsigned int sleep_time_bin[32] = {0};
|
|
+#define NUM_BINS 32
|
|
+
|
|
+static unsigned int sleep_time_bin[NUM_BINS] = {0};
|
|
|
|
static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
|
|
{
|
|
@@ -69,6 +71,9 @@ late_initcall(tk_debug_sleep_time_init);
|
|
|
|
void tk_debug_account_sleep_time(struct timespec64 *t)
|
|
{
|
|
- sleep_time_bin[fls(t->tv_sec)]++;
|
|
+ /* Cap bin index so we don't overflow the array */
|
|
+ int bin = min(fls(t->tv_sec), NUM_BINS-1);
|
|
+
|
|
+ sleep_time_bin[bin]++;
|
|
}
|
|
|
|
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
|
|
index 2b3f46c049d4..554522934c44 100644
|
|
--- a/lib/asn1_decoder.c
|
|
+++ b/lib/asn1_decoder.c
|
|
@@ -74,7 +74,7 @@ next_tag:
|
|
|
|
/* Extract a tag from the data */
|
|
tag = data[dp++];
|
|
- if (tag == 0) {
|
|
+ if (tag == ASN1_EOC) {
|
|
/* It appears to be an EOC. */
|
|
if (data[dp++] != 0)
|
|
goto invalid_eoc;
|
|
@@ -96,10 +96,8 @@ next_tag:
|
|
|
|
/* Extract the length */
|
|
len = data[dp++];
|
|
- if (len <= 0x7f) {
|
|
- dp += len;
|
|
- goto next_tag;
|
|
- }
|
|
+ if (len <= 0x7f)
|
|
+ goto check_length;
|
|
|
|
if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
|
|
/* Indefinite length */
|
|
@@ -110,14 +108,18 @@ next_tag:
|
|
}
|
|
|
|
n = len - 0x80;
|
|
- if (unlikely(n > sizeof(size_t) - 1))
|
|
+ if (unlikely(n > sizeof(len) - 1))
|
|
goto length_too_long;
|
|
if (unlikely(n > datalen - dp))
|
|
goto data_overrun_error;
|
|
- for (len = 0; n > 0; n--) {
|
|
+ len = 0;
|
|
+ for (; n > 0; n--) {
|
|
len <<= 8;
|
|
len |= data[dp++];
|
|
}
|
|
+check_length:
|
|
+ if (len > datalen - dp)
|
|
+ goto data_overrun_error;
|
|
dp += len;
|
|
goto next_tag;
|
|
|
|
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
|
|
index e00ff00e861c..e37dbf53e226 100644
|
|
--- a/lib/mpi/mpicoder.c
|
|
+++ b/lib/mpi/mpicoder.c
|
|
@@ -367,7 +367,9 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
|
|
buf_len = sgl->length;
|
|
p2 = sg_virt(sgl);
|
|
|
|
- for (i = a->nlimbs - 1; i >= 0; i--) {
|
|
+ for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
|
|
+ lzeros %= BYTES_PER_MPI_LIMB;
|
|
+ i >= 0; i--) {
|
|
alimb = a->d[i];
|
|
p = (u8 *)&alimb2;
|
|
#if BYTES_PER_MPI_LIMB == 4
|
|
@@ -388,17 +390,12 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
|
|
#error please implement for this limb size.
|
|
#endif
|
|
if (lzeros > 0) {
|
|
- if (lzeros >= sizeof(alimb)) {
|
|
- p -= sizeof(alimb);
|
|
- continue;
|
|
- } else {
|
|
- mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
|
|
- mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
|
|
- + lzeros;
|
|
- *limb1 = *limb2;
|
|
- p -= lzeros;
|
|
- y = lzeros;
|
|
- }
|
|
+ mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
|
|
+ mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
|
|
+ + lzeros;
|
|
+ *limb1 = *limb2;
|
|
+ p -= lzeros;
|
|
+ y = lzeros;
|
|
lzeros -= sizeof(alimb);
|
|
}
|
|
|
|
diff --git a/mm/util.c b/mm/util.c
|
|
index 9af1c12b310c..d5259b62f8d7 100644
|
|
--- a/mm/util.c
|
|
+++ b/mm/util.c
|
|
@@ -199,36 +199,11 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
}
|
|
|
|
/* Check if the vma is being used as a stack by this task */
|
|
-static int vm_is_stack_for_task(struct task_struct *t,
|
|
- struct vm_area_struct *vma)
|
|
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
|
|
{
|
|
return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
|
|
}
|
|
|
|
-/*
|
|
- * Check if the vma is being used as a stack.
|
|
- * If is_group is non-zero, check in the entire thread group or else
|
|
- * just check in the current task. Returns the task_struct of the task
|
|
- * that the vma is stack for. Must be called under rcu_read_lock().
|
|
- */
|
|
-struct task_struct *task_of_stack(struct task_struct *task,
|
|
- struct vm_area_struct *vma, bool in_group)
|
|
-{
|
|
- if (vm_is_stack_for_task(task, vma))
|
|
- return task;
|
|
-
|
|
- if (in_group) {
|
|
- struct task_struct *t;
|
|
-
|
|
- for_each_thread(task, t) {
|
|
- if (vm_is_stack_for_task(t, vma))
|
|
- return t;
|
|
- }
|
|
- }
|
|
-
|
|
- return NULL;
|
|
-}
|
|
-
|
|
#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
|
|
void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
{
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index 44e1632370dd..0b1ea5abcc04 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -1275,6 +1275,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
|
|
int peeked, off = 0;
|
|
int err;
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
+ bool checksum_valid = false;
|
|
bool slow;
|
|
|
|
if (flags & MSG_ERRQUEUE)
|
|
@@ -1300,11 +1301,12 @@ try_again:
|
|
*/
|
|
|
|
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
|
- if (udp_lib_checksum_complete(skb))
|
|
+ checksum_valid = !udp_lib_checksum_complete(skb);
|
|
+ if (!checksum_valid)
|
|
goto csum_copy_err;
|
|
}
|
|
|
|
- if (skb_csum_unnecessary(skb))
|
|
+ if (checksum_valid || skb_csum_unnecessary(skb))
|
|
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
|
|
msg, copied);
|
|
else {
|
|
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
|
|
index 275af43306f9..e6092bd72ee2 100644
|
|
--- a/net/ipv6/udp.c
|
|
+++ b/net/ipv6/udp.c
|
|
@@ -402,6 +402,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|
int peeked, off = 0;
|
|
int err;
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
+ bool checksum_valid = false;
|
|
int is_udp4;
|
|
bool slow;
|
|
|
|
@@ -433,11 +434,12 @@ try_again:
|
|
*/
|
|
|
|
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
|
- if (udp_lib_checksum_complete(skb))
|
|
+ checksum_valid = !udp_lib_checksum_complete(skb);
|
|
+ if (!checksum_valid)
|
|
goto csum_copy_err;
|
|
}
|
|
|
|
- if (skb_csum_unnecessary(skb))
|
|
+ if (checksum_valid || skb_csum_unnecessary(skb))
|
|
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
|
|
msg, copied);
|
|
else {
|
|
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
|
|
index 25391fb25516..2fc6ca9d1286 100644
|
|
--- a/net/netfilter/x_tables.c
|
|
+++ b/net/netfilter/x_tables.c
|
|
@@ -897,6 +897,12 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
|
|
struct xt_table_info *info = NULL;
|
|
size_t sz = sizeof(*info) + size;
|
|
|
|
+ if (sz < sizeof(*info))
|
|
+ return NULL;
|
|
+
|
|
+ if (sz < sizeof(*info))
|
|
+ return NULL;
|
|
+
|
|
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
|
|
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
|
|
return NULL;
|
|
diff --git a/net/rds/recv.c b/net/rds/recv.c
|
|
index a00462b0d01d..0514af3ab378 100644
|
|
--- a/net/rds/recv.c
|
|
+++ b/net/rds/recv.c
|
|
@@ -545,5 +545,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
|
|
minfo.fport = inc->i_hdr.h_dport;
|
|
}
|
|
|
|
+ minfo.flags = 0;
|
|
+
|
|
rds_info_copy(iter, &minfo, sizeof(minfo));
|
|
}
|
|
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
|
|
index ed98c1fc3de1..46a71c701e7c 100644
|
|
--- a/net/sysctl_net.c
|
|
+++ b/net/sysctl_net.c
|
|
@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
|
|
kgid_t root_gid = make_kgid(net->user_ns, 0);
|
|
|
|
/* Allow network administrator to have same access as root. */
|
|
- if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
|
|
+ if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN) ||
|
|
uid_eq(root_uid, current_euid())) {
|
|
int mode = (table->mode >> 6) & 7;
|
|
return (mode << 6) | (mode << 3) | mode;
|
|
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
|
|
index 2ed732bfe94b..a0c90572d0e5 100644
|
|
--- a/net/tipc/netlink_compat.c
|
|
+++ b/net/tipc/netlink_compat.c
|
|
@@ -574,7 +574,8 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
|
|
|
|
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
|
|
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
|
|
- strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
|
|
+ nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
|
|
+ TIPC_MAX_LINK_NAME);
|
|
|
|
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
|
|
&link_info, sizeof(link_info));
|
|
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
|
|
index 69ee2eeef968..f9ff73a8d815 100644
|
|
--- a/net/tipc/subscr.c
|
|
+++ b/net/tipc/subscr.c
|
|
@@ -296,7 +296,8 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
|
|
if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
|
|
return tipc_conn_terminate(tn->topsrv, subscrb->conid);
|
|
|
|
- tipc_nametbl_subscribe(sub);
|
|
+ if (sub)
|
|
+ tipc_nametbl_subscribe(sub);
|
|
}
|
|
|
|
/* Handle one request to establish a new subscriber */
|
|
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
|
|
index 795437b10082..b450a27588c8 100644
|
|
--- a/sound/core/rawmidi.c
|
|
+++ b/sound/core/rawmidi.c
|
|
@@ -1633,11 +1633,13 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
|
|
return -EBUSY;
|
|
}
|
|
list_add_tail(&rmidi->list, &snd_rawmidi_devices);
|
|
+ mutex_unlock(®ister_mutex);
|
|
err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
|
|
rmidi->card, rmidi->device,
|
|
&snd_rawmidi_f_ops, rmidi, &rmidi->dev);
|
|
if (err < 0) {
|
|
rmidi_err(rmidi, "unable to register\n");
|
|
+ mutex_lock(®ister_mutex);
|
|
list_del(&rmidi->list);
|
|
mutex_unlock(®ister_mutex);
|
|
return err;
|
|
@@ -1645,6 +1647,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
|
|
if (rmidi->ops && rmidi->ops->dev_register &&
|
|
(err = rmidi->ops->dev_register(rmidi)) < 0) {
|
|
snd_unregister_device(&rmidi->dev);
|
|
+ mutex_lock(®ister_mutex);
|
|
list_del(&rmidi->list);
|
|
mutex_unlock(®ister_mutex);
|
|
return err;
|
|
@@ -1677,7 +1680,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
|
|
}
|
|
}
|
|
#endif /* CONFIG_SND_OSSEMUL */
|
|
- mutex_unlock(®ister_mutex);
|
|
sprintf(name, "midi%d", rmidi->device);
|
|
entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
|
|
if (entry) {
|
|
diff --git a/sound/core/timer.c b/sound/core/timer.c
|
|
index 637d034bb084..ae4ea2e2e7fe 100644
|
|
--- a/sound/core/timer.c
|
|
+++ b/sound/core/timer.c
|
|
@@ -296,8 +296,21 @@ int snd_timer_open(struct snd_timer_instance **ti,
|
|
get_device(&timer->card->card_dev);
|
|
timeri->slave_class = tid->dev_sclass;
|
|
timeri->slave_id = slave_id;
|
|
- if (list_empty(&timer->open_list_head) && timer->hw.open)
|
|
- timer->hw.open(timer);
|
|
+
|
|
+ if (list_empty(&timer->open_list_head) && timer->hw.open) {
|
|
+ int err = timer->hw.open(timer);
|
|
+ if (err) {
|
|
+ kfree(timeri->owner);
|
|
+ kfree(timeri);
|
|
+
|
|
+ if (timer->card)
|
|
+ put_device(&timer->card->card_dev);
|
|
+ module_put(timer->module);
|
|
+ mutex_unlock(®ister_mutex);
|
|
+ return err;
|
|
+ }
|
|
+ }
|
|
+
|
|
list_add_tail(&timeri->open_list, &timer->open_list_head);
|
|
snd_timer_check_master(timeri);
|
|
mutex_unlock(®ister_mutex);
|
|
@@ -837,6 +850,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
|
|
timer->tmr_subdevice = tid->subdevice;
|
|
if (id)
|
|
strlcpy(timer->id, id, sizeof(timer->id));
|
|
+ timer->sticks = 1;
|
|
INIT_LIST_HEAD(&timer->device_list);
|
|
INIT_LIST_HEAD(&timer->open_list_head);
|
|
INIT_LIST_HEAD(&timer->active_list_head);
|
|
@@ -1967,6 +1981,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
|
|
tu->qused--;
|
|
spin_unlock_irq(&tu->qlock);
|
|
|
|
+ mutex_lock(&tu->ioctl_lock);
|
|
if (tu->tread) {
|
|
if (copy_to_user(buffer, &tu->tqueue[qhead],
|
|
sizeof(struct snd_timer_tread)))
|
|
@@ -1976,6 +1991,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
|
|
sizeof(struct snd_timer_read)))
|
|
err = -EFAULT;
|
|
}
|
|
+ mutex_unlock(&tu->ioctl_lock);
|
|
|
|
spin_lock_irq(&tu->qlock);
|
|
if (err < 0)
|
|
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
|
|
index c7cb7deafe48..2c316a9bc7f6 100644
|
|
--- a/sound/firewire/fireworks/fireworks.h
|
|
+++ b/sound/firewire/fireworks/fireworks.h
|
|
@@ -106,7 +106,6 @@ struct snd_efw {
|
|
u8 *resp_buf;
|
|
u8 *pull_ptr;
|
|
u8 *push_ptr;
|
|
- unsigned int resp_queues;
|
|
};
|
|
|
|
int snd_efw_transaction_cmd(struct fw_unit *unit,
|
|
diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
|
|
index 33df8655fe81..2e1d9a23920c 100644
|
|
--- a/sound/firewire/fireworks/fireworks_hwdep.c
|
|
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
|
|
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
|
|
{
|
|
unsigned int length, till_end, type;
|
|
struct snd_efw_transaction *t;
|
|
+ u8 *pull_ptr;
|
|
long count = 0;
|
|
|
|
if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
|
|
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
|
|
buf += sizeof(type);
|
|
|
|
/* write into buffer as many responses as possible */
|
|
- while (efw->resp_queues > 0) {
|
|
- t = (struct snd_efw_transaction *)(efw->pull_ptr);
|
|
+ spin_lock_irq(&efw->lock);
|
|
+
|
|
+ /*
|
|
+ * When another task reaches here during this task's access to user
|
|
+ * space, it picks up current position in buffer and can read the same
|
|
+ * series of responses.
|
|
+ */
|
|
+ pull_ptr = efw->pull_ptr;
|
|
+
|
|
+ while (efw->push_ptr != pull_ptr) {
|
|
+ t = (struct snd_efw_transaction *)(pull_ptr);
|
|
length = be32_to_cpu(t->length) * sizeof(__be32);
|
|
|
|
/* confirm enough space for this response */
|
|
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
|
|
/* copy from ring buffer to user buffer */
|
|
while (length > 0) {
|
|
till_end = snd_efw_resp_buf_size -
|
|
- (unsigned int)(efw->pull_ptr - efw->resp_buf);
|
|
+ (unsigned int)(pull_ptr - efw->resp_buf);
|
|
till_end = min_t(unsigned int, length, till_end);
|
|
|
|
- if (copy_to_user(buf, efw->pull_ptr, till_end))
|
|
+ spin_unlock_irq(&efw->lock);
|
|
+
|
|
+ if (copy_to_user(buf, pull_ptr, till_end))
|
|
return -EFAULT;
|
|
|
|
- efw->pull_ptr += till_end;
|
|
- if (efw->pull_ptr >= efw->resp_buf +
|
|
- snd_efw_resp_buf_size)
|
|
- efw->pull_ptr -= snd_efw_resp_buf_size;
|
|
+ spin_lock_irq(&efw->lock);
|
|
+
|
|
+ pull_ptr += till_end;
|
|
+ if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
|
|
+ pull_ptr -= snd_efw_resp_buf_size;
|
|
|
|
length -= till_end;
|
|
buf += till_end;
|
|
count += till_end;
|
|
remained -= till_end;
|
|
}
|
|
-
|
|
- efw->resp_queues--;
|
|
}
|
|
|
|
+ /*
|
|
+ * All of tasks can read from the buffer nearly simultaneously, but the
|
|
+ * last position for each task is different depending on the length of
|
|
+ * given buffer. Here, for simplicity, a position of buffer is set by
|
|
+ * the latest task. It's better for a listening application to allow one
|
|
+ * thread to read from the buffer. Unless, each task can read different
|
|
+ * sequence of responses depending on variation of buffer length.
|
|
+ */
|
|
+ efw->pull_ptr = pull_ptr;
|
|
+
|
|
+ spin_unlock_irq(&efw->lock);
|
|
+
|
|
return count;
|
|
}
|
|
|
|
@@ -76,14 +99,17 @@ static long
|
|
hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
|
|
loff_t *offset)
|
|
{
|
|
- union snd_firewire_event event;
|
|
+ union snd_firewire_event event = {
|
|
+ .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
|
|
+ };
|
|
|
|
- memset(&event, 0, sizeof(event));
|
|
+ spin_lock_irq(&efw->lock);
|
|
|
|
- event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
|
|
event.lock_status.status = (efw->dev_lock_count > 0);
|
|
efw->dev_lock_changed = false;
|
|
|
|
+ spin_unlock_irq(&efw->lock);
|
|
+
|
|
count = min_t(long, count, sizeof(event.lock_status));
|
|
|
|
if (copy_to_user(buf, &event, count))
|
|
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
|
{
|
|
struct snd_efw *efw = hwdep->private_data;
|
|
DEFINE_WAIT(wait);
|
|
+ bool dev_lock_changed;
|
|
+ bool queued;
|
|
|
|
spin_lock_irq(&efw->lock);
|
|
|
|
- while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
|
|
+ dev_lock_changed = efw->dev_lock_changed;
|
|
+ queued = efw->push_ptr != efw->pull_ptr;
|
|
+
|
|
+ while (!dev_lock_changed && !queued) {
|
|
prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
|
|
spin_unlock_irq(&efw->lock);
|
|
schedule();
|
|
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
|
if (signal_pending(current))
|
|
return -ERESTARTSYS;
|
|
spin_lock_irq(&efw->lock);
|
|
+ dev_lock_changed = efw->dev_lock_changed;
|
|
+ queued = efw->push_ptr != efw->pull_ptr;
|
|
}
|
|
|
|
- if (efw->dev_lock_changed)
|
|
+ spin_unlock_irq(&efw->lock);
|
|
+
|
|
+ if (dev_lock_changed)
|
|
count = hwdep_read_locked(efw, buf, count, offset);
|
|
- else if (efw->resp_queues > 0)
|
|
+ else if (queued)
|
|
count = hwdep_read_resp_buf(efw, buf, count, offset);
|
|
|
|
- spin_unlock_irq(&efw->lock);
|
|
-
|
|
return count;
|
|
}
|
|
|
|
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
|
|
poll_wait(file, &efw->hwdep_wait, wait);
|
|
|
|
spin_lock_irq(&efw->lock);
|
|
- if (efw->dev_lock_changed || (efw->resp_queues > 0))
|
|
+ if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
|
|
events = POLLIN | POLLRDNORM;
|
|
else
|
|
events = 0;
|
|
diff --git a/sound/firewire/fireworks/fireworks_proc.c b/sound/firewire/fireworks/fireworks_proc.c
|
|
index 0639dcb13f7d..beb0a0ffee57 100644
|
|
--- a/sound/firewire/fireworks/fireworks_proc.c
|
|
+++ b/sound/firewire/fireworks/fireworks_proc.c
|
|
@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
|
|
else
|
|
consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
|
|
|
|
- snd_iprintf(buffer, "%d %d/%d\n",
|
|
- efw->resp_queues, consumed, snd_efw_resp_buf_size);
|
|
+ snd_iprintf(buffer, "%d/%d\n",
|
|
+ consumed, snd_efw_resp_buf_size);
|
|
}
|
|
|
|
static void
|
|
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
|
|
index f550808d1784..36a08ba51ec7 100644
|
|
--- a/sound/firewire/fireworks/fireworks_transaction.c
|
|
+++ b/sound/firewire/fireworks/fireworks_transaction.c
|
|
@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
|
|
size_t capacity, till_end;
|
|
struct snd_efw_transaction *t;
|
|
|
|
- spin_lock_irq(&efw->lock);
|
|
-
|
|
t = (struct snd_efw_transaction *)data;
|
|
length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
|
|
|
|
+ spin_lock_irq(&efw->lock);
|
|
+
|
|
if (efw->push_ptr < efw->pull_ptr)
|
|
capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
|
|
else
|
|
@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
|
|
}
|
|
|
|
/* for hwdep */
|
|
- efw->resp_queues++;
|
|
wake_up(&efw->hwdep_wait);
|
|
|
|
*rcode = RCODE_COMPLETE;
|
|
diff --git a/sound/firewire/tascam/tascam-hwdep.c b/sound/firewire/tascam/tascam-hwdep.c
|
|
index 131267c3a042..106406cbfaa3 100644
|
|
--- a/sound/firewire/tascam/tascam-hwdep.c
|
|
+++ b/sound/firewire/tascam/tascam-hwdep.c
|
|
@@ -16,31 +16,14 @@
|
|
|
|
#include "tascam.h"
|
|
|
|
-static long hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
|
|
- long count)
|
|
-{
|
|
- union snd_firewire_event event;
|
|
-
|
|
- memset(&event, 0, sizeof(event));
|
|
-
|
|
- event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
|
|
- event.lock_status.status = (tscm->dev_lock_count > 0);
|
|
- tscm->dev_lock_changed = false;
|
|
-
|
|
- count = min_t(long, count, sizeof(event.lock_status));
|
|
-
|
|
- if (copy_to_user(buf, &event, count))
|
|
- return -EFAULT;
|
|
-
|
|
- return count;
|
|
-}
|
|
-
|
|
static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
|
loff_t *offset)
|
|
{
|
|
struct snd_tscm *tscm = hwdep->private_data;
|
|
DEFINE_WAIT(wait);
|
|
- union snd_firewire_event event;
|
|
+ union snd_firewire_event event = {
|
|
+ .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
|
|
+ };
|
|
|
|
spin_lock_irq(&tscm->lock);
|
|
|
|
@@ -54,10 +37,16 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
|
spin_lock_irq(&tscm->lock);
|
|
}
|
|
|
|
- memset(&event, 0, sizeof(event));
|
|
- count = hwdep_read_locked(tscm, buf, count);
|
|
+ event.lock_status.status = (tscm->dev_lock_count > 0);
|
|
+ tscm->dev_lock_changed = false;
|
|
+
|
|
spin_unlock_irq(&tscm->lock);
|
|
|
|
+ count = min_t(long, count, sizeof(event.lock_status));
|
|
+
|
|
+ if (copy_to_user(buf, &event, count))
|
|
+ return -EFAULT;
|
|
+
|
|
return count;
|
|
}
|
|
|
|
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
|
|
index 12f7f6fdae4d..d4671973d889 100644
|
|
--- a/sound/pci/hda/hda_intel.c
|
|
+++ b/sound/pci/hda/hda_intel.c
|
|
@@ -2366,6 +2366,10 @@ static const struct pci_device_id azx_ids[] = {
|
|
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
|
|
{ PCI_DEVICE(0x1002, 0xaae8),
|
|
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
|
|
+ { PCI_DEVICE(0x1002, 0xaae0),
|
|
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
|
|
+ { PCI_DEVICE(0x1002, 0xaaf0),
|
|
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
|
|
/* VIA VT8251/VT8237A */
|
|
{ PCI_DEVICE(0x1106, 0x3288),
|
|
.driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
|
|
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
|
|
index f7bcd8dbac14..a8045b8a2a18 100644
|
|
--- a/sound/pci/hda/patch_hdmi.c
|
|
+++ b/sound/pci/hda/patch_hdmi.c
|
|
@@ -51,8 +51,10 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
|
|
#define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
|
|
#define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
|
|
#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
|
|
+#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
|
|
#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
|
|
- || is_skylake(codec) || is_broxton(codec))
|
|
+ || is_skylake(codec) || is_broxton(codec) \
|
|
+ || is_kabylake(codec))
|
|
|
|
#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
|
|
#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
|
|
@@ -3584,6 +3586,7 @@ HDA_CODEC_ENTRY(0x80862807, "Haswell HDMI", patch_generic_hdmi),
|
|
HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_generic_hdmi),
|
|
HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_generic_hdmi),
|
|
HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_generic_hdmi),
|
|
+HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_generic_hdmi),
|
|
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
|
|
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_generic_hdmi),
|
|
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_generic_hdmi),
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index f25479ba3981..eaee626ab185 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -4840,6 +4840,7 @@ enum {
|
|
ALC221_FIXUP_HP_FRONT_MIC,
|
|
ALC292_FIXUP_TPT460,
|
|
ALC298_FIXUP_SPK_VOLUME,
|
|
+ ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
|
|
};
|
|
|
|
static const struct hda_fixup alc269_fixups[] = {
|
|
@@ -5501,6 +5502,15 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
|
|
},
|
|
+ [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x1b, 0x90170151 },
|
|
+ { }
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
|
|
+ },
|
|
};
|
|
|
|
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
@@ -5545,6 +5555,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
|
|
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
|
|
SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
|
|
+ SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
|
|
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
|
|
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
|
|
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
|
|
@@ -5879,6 +5890,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
|
{0x12, 0x90a60170},
|
|
{0x14, 0x90170120},
|
|
{0x21, 0x02211030}),
|
|
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
|
+ {0x12, 0x90a60180},
|
|
+ {0x14, 0x90170120},
|
|
+ {0x21, 0x02211030}),
|
|
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
|
ALC256_STANDARD_PINS),
|
|
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
|
|
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
|
|
index ba8def5665c4..6726143c7fc5 100644
|
|
--- a/sound/soc/atmel/atmel_ssc_dai.c
|
|
+++ b/sound/soc/atmel/atmel_ssc_dai.c
|
|
@@ -298,8 +298,9 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
|
|
clk_enable(ssc_p->ssc->clk);
|
|
ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
|
|
|
|
- /* Reset the SSC to keep it at a clean status */
|
|
- ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
|
|
+ /* Reset the SSC unless initialized to keep it in a clean state */
|
|
+ if (!ssc_p->initialized)
|
|
+ ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
|
|
|
|
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
|
dir = 0;
|
|
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
|
|
index a3e1252ce242..3039e907f1f8 100644
|
|
--- a/sound/usb/quirks.c
|
|
+++ b/sound/usb/quirks.c
|
|
@@ -1142,6 +1142,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
|
|
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
|
|
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
|
|
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
|
|
+ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
|
|
case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
|
|
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
|
|
case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
|
|
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
|
|
index 5480e4e424eb..f1d742682317 100644
|
|
--- a/tools/hv/hv_fcopy_daemon.c
|
|
+++ b/tools/hv/hv_fcopy_daemon.c
|
|
@@ -37,12 +37,14 @@
|
|
|
|
static int target_fd;
|
|
static char target_fname[W_MAX_PATH];
|
|
+static unsigned long long filesize;
|
|
|
|
static int hv_start_fcopy(struct hv_start_fcopy *smsg)
|
|
{
|
|
int error = HV_E_FAIL;
|
|
char *q, *p;
|
|
|
|
+ filesize = 0;
|
|
p = (char *)smsg->path_name;
|
|
snprintf(target_fname, sizeof(target_fname), "%s/%s",
|
|
(char *)smsg->path_name, (char *)smsg->file_name);
|
|
@@ -98,14 +100,26 @@ done:
|
|
static int hv_copy_data(struct hv_do_fcopy *cpmsg)
|
|
{
|
|
ssize_t bytes_written;
|
|
+ int ret = 0;
|
|
|
|
bytes_written = pwrite(target_fd, cpmsg->data, cpmsg->size,
|
|
cpmsg->offset);
|
|
|
|
- if (bytes_written != cpmsg->size)
|
|
- return HV_E_FAIL;
|
|
+ filesize += cpmsg->size;
|
|
+ if (bytes_written != cpmsg->size) {
|
|
+ switch (errno) {
|
|
+ case ENOSPC:
|
|
+ ret = HV_ERROR_DISK_FULL;
|
|
+ break;
|
|
+ default:
|
|
+ ret = HV_E_FAIL;
|
|
+ break;
|
|
+ }
|
|
+ syslog(LOG_ERR, "pwrite failed to write %llu bytes: %ld (%s)",
|
|
+ filesize, (long)bytes_written, strerror(errno));
|
|
+ }
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static int hv_copy_finished(void)
|