mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-30 02:31:46 +00:00
6252 lines
191 KiB
Diff
6252 lines
191 KiB
Diff
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
|
|
index d499676890d8..a054b5ad410a 100644
|
|
--- a/Documentation/networking/ip-sysctl.txt
|
|
+++ b/Documentation/networking/ip-sysctl.txt
|
|
@@ -133,14 +133,11 @@ min_adv_mss - INTEGER
|
|
|
|
IP Fragmentation:
|
|
|
|
-ipfrag_high_thresh - INTEGER
|
|
- Maximum memory used to reassemble IP fragments. When
|
|
- ipfrag_high_thresh bytes of memory is allocated for this purpose,
|
|
- the fragment handler will toss packets until ipfrag_low_thresh
|
|
- is reached. This also serves as a maximum limit to namespaces
|
|
- different from the initial one.
|
|
-
|
|
-ipfrag_low_thresh - INTEGER
|
|
+ipfrag_high_thresh - LONG INTEGER
|
|
+ Maximum memory used to reassemble IP fragments.
|
|
+
|
|
+ipfrag_low_thresh - LONG INTEGER
|
|
+ (Obsolete since linux-4.17)
|
|
Maximum memory used to reassemble IP fragments before the kernel
|
|
begins to remove incomplete fragment queues to free up resources.
|
|
The kernel still accepts new fragments for defragmentation.
|
|
diff --git a/Makefile b/Makefile
|
|
index aa458afa7fa2..dd4eaeeb2050 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 70
|
|
+SUBLEVEL = 71
|
|
EXTRAVERSION =
|
|
NAME = Petit Gorille
|
|
|
|
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
|
|
index a8242362e551..ece78630d711 100644
|
|
--- a/arch/arc/configs/axs101_defconfig
|
|
+++ b/arch/arc/configs/axs101_defconfig
|
|
@@ -1,5 +1,4 @@
|
|
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
|
-# CONFIG_SWAP is not set
|
|
CONFIG_SYSVIPC=y
|
|
CONFIG_POSIX_MQUEUE=y
|
|
# CONFIG_CROSS_MEMORY_ATTACH is not set
|
|
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
|
|
index ef3c31cd7737..240c9251a7d4 100644
|
|
--- a/arch/arc/configs/axs103_defconfig
|
|
+++ b/arch/arc/configs/axs103_defconfig
|
|
@@ -1,5 +1,4 @@
|
|
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
|
-# CONFIG_SWAP is not set
|
|
CONFIG_SYSVIPC=y
|
|
CONFIG_POSIX_MQUEUE=y
|
|
# CONFIG_CROSS_MEMORY_ATTACH is not set
|
|
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
|
|
index 1757ac9cecbc..af54b96abee0 100644
|
|
--- a/arch/arc/configs/axs103_smp_defconfig
|
|
+++ b/arch/arc/configs/axs103_smp_defconfig
|
|
@@ -1,5 +1,4 @@
|
|
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
|
-# CONFIG_SWAP is not set
|
|
CONFIG_SYSVIPC=y
|
|
CONFIG_POSIX_MQUEUE=y
|
|
# CONFIG_CROSS_MEMORY_ATTACH is not set
|
|
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
|
|
index 8505db478904..1d92efb82c37 100644
|
|
--- a/arch/mips/cavium-octeon/octeon-platform.c
|
|
+++ b/arch/mips/cavium-octeon/octeon-platform.c
|
|
@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
|
|
return 0;
|
|
|
|
pd = of_find_device_by_node(ehci_node);
|
|
+ of_node_put(ehci_node);
|
|
if (!pd)
|
|
return 0;
|
|
|
|
@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
|
|
return 0;
|
|
|
|
pd = of_find_device_by_node(ohci_node);
|
|
+ of_node_put(ohci_node);
|
|
if (!pd)
|
|
return 0;
|
|
|
|
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
|
|
index 5ba6fcc26fa7..94a78dbbc91f 100644
|
|
--- a/arch/mips/generic/init.c
|
|
+++ b/arch/mips/generic/init.c
|
|
@@ -204,6 +204,7 @@ void __init arch_init_irq(void)
|
|
"mti,cpu-interrupt-controller");
|
|
if (!cpu_has_veic && !intc_node)
|
|
mips_cpu_irq_init();
|
|
+ of_node_put(intc_node);
|
|
|
|
irqchip_init();
|
|
}
|
|
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
|
|
index cea8ad864b3f..57b34257be2b 100644
|
|
--- a/arch/mips/include/asm/io.h
|
|
+++ b/arch/mips/include/asm/io.h
|
|
@@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
|
|
/*
|
|
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
|
*/
|
|
-static inline unsigned long isa_virt_to_bus(volatile void * address)
|
|
+static inline unsigned long isa_virt_to_bus(volatile void *address)
|
|
{
|
|
- return (unsigned long)address - PAGE_OFFSET;
|
|
+ return virt_to_phys(address);
|
|
}
|
|
|
|
-static inline void * isa_bus_to_virt(unsigned long address)
|
|
+static inline void *isa_bus_to_virt(unsigned long address)
|
|
{
|
|
- return (void *)(address + PAGE_OFFSET);
|
|
+ return phys_to_virt(address);
|
|
}
|
|
|
|
#define isa_page_to_bus page_to_phys
|
|
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
|
|
index 019035d7225c..8f845f6e5f42 100644
|
|
--- a/arch/mips/kernel/vdso.c
|
|
+++ b/arch/mips/kernel/vdso.c
|
|
@@ -13,6 +13,7 @@
|
|
#include <linux/err.h>
|
|
#include <linux/init.h>
|
|
#include <linux/ioport.h>
|
|
+#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
@@ -20,6 +21,7 @@
|
|
|
|
#include <asm/abi.h>
|
|
#include <asm/mips-cps.h>
|
|
+#include <asm/page.h>
|
|
#include <asm/vdso.h>
|
|
|
|
/* Kernel-provided data used by the VDSO. */
|
|
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
vvar_size = gic_size + PAGE_SIZE;
|
|
size = vvar_size + image->size;
|
|
|
|
+ /*
|
|
+ * Find a region that's large enough for us to perform the
|
|
+ * colour-matching alignment below.
|
|
+ */
|
|
+ if (cpu_has_dc_aliases)
|
|
+ size += shm_align_mask + 1;
|
|
+
|
|
base = get_unmapped_area(NULL, 0, size, 0, 0);
|
|
if (IS_ERR_VALUE(base)) {
|
|
ret = base;
|
|
goto out;
|
|
}
|
|
|
|
+ /*
|
|
+ * If we suffer from dcache aliasing, ensure that the VDSO data page
|
|
+ * mapping is coloured the same as the kernel's mapping of that memory.
|
|
+ * This ensures that when the kernel updates the VDSO data userland
|
|
+ * will observe it without requiring cache invalidations.
|
|
+ */
|
|
+ if (cpu_has_dc_aliases) {
|
|
+ base = __ALIGN_MASK(base, shm_align_mask);
|
|
+ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
|
|
+ }
|
|
+
|
|
data_addr = base + gic_size;
|
|
vdso_addr = data_addr + PAGE_SIZE;
|
|
|
|
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
|
|
index e12dfa48b478..a5893b2cdc0e 100644
|
|
--- a/arch/mips/mm/c-r4k.c
|
|
+++ b/arch/mips/mm/c-r4k.c
|
|
@@ -835,7 +835,8 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
|
|
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
|
|
{
|
|
/* Catch bad driver code */
|
|
- BUG_ON(size == 0);
|
|
+ if (WARN_ON(size == 0))
|
|
+ return;
|
|
|
|
preempt_disable();
|
|
if (cpu_has_inclusive_pcaches) {
|
|
@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
|
|
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
|
|
{
|
|
/* Catch bad driver code */
|
|
- BUG_ON(size == 0);
|
|
+ if (WARN_ON(size == 0))
|
|
+ return;
|
|
|
|
preempt_disable();
|
|
if (cpu_has_inclusive_pcaches) {
|
|
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
|
|
index 63f007f2de7e..4b95bdde22aa 100644
|
|
--- a/arch/powerpc/platforms/powernv/npu-dma.c
|
|
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
|
|
@@ -427,8 +427,9 @@ static int get_mmio_atsd_reg(struct npu *npu)
|
|
int i;
|
|
|
|
for (i = 0; i < npu->mmio_atsd_count; i++) {
|
|
- if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
|
|
- return i;
|
|
+ if (!test_bit(i, &npu->mmio_atsd_usage))
|
|
+ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
|
|
+ return i;
|
|
}
|
|
|
|
return -ENOSPC;
|
|
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
|
|
index 4f1f5fc8139d..061906f98dc5 100644
|
|
--- a/arch/s390/kvm/vsie.c
|
|
+++ b/arch/s390/kvm/vsie.c
|
|
@@ -170,7 +170,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
return set_validity_icpt(scb_s, 0x0039U);
|
|
|
|
/* copy only the wrapping keys */
|
|
- if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
|
|
+ if (read_guest_real(vcpu, crycb_addr + 72,
|
|
+ vsie_page->crycb.dea_wrapping_key_mask, 56))
|
|
return set_validity_icpt(scb_s, 0x0035U);
|
|
|
|
scb_s->ecb3 |= ecb3_flags;
|
|
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
|
|
index 48179928ff38..9d33dbf2489e 100644
|
|
--- a/arch/x86/kernel/cpu/microcode/amd.c
|
|
+++ b/arch/x86/kernel/cpu/microcode/amd.c
|
|
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
|
struct microcode_amd *mc_amd;
|
|
struct ucode_cpu_info *uci;
|
|
struct ucode_patch *p;
|
|
+ enum ucode_state ret;
|
|
u32 rev, dummy;
|
|
|
|
BUG_ON(raw_smp_processor_id() != cpu);
|
|
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
|
|
|
/* need to apply patch? */
|
|
if (rev >= mc_amd->hdr.patch_id) {
|
|
- c->microcode = rev;
|
|
- uci->cpu_sig.rev = rev;
|
|
- return UCODE_OK;
|
|
+ ret = UCODE_OK;
|
|
+ goto out;
|
|
}
|
|
|
|
if (__apply_microcode_amd(mc_amd)) {
|
|
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
|
cpu, mc_amd->hdr.patch_id);
|
|
return UCODE_ERROR;
|
|
}
|
|
- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
|
|
- mc_amd->hdr.patch_id);
|
|
|
|
- uci->cpu_sig.rev = mc_amd->hdr.patch_id;
|
|
- c->microcode = mc_amd->hdr.patch_id;
|
|
+ rev = mc_amd->hdr.patch_id;
|
|
+ ret = UCODE_UPDATED;
|
|
+
|
|
+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
|
|
|
|
- return UCODE_UPDATED;
|
|
+out:
|
|
+ uci->cpu_sig.rev = rev;
|
|
+ c->microcode = rev;
|
|
+
|
|
+ /* Update boot_cpu_data's revision too, if we're on the BSP: */
|
|
+ if (c->cpu_index == boot_cpu_data.cpu_index)
|
|
+ boot_cpu_data.microcode = rev;
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int install_equiv_cpu_table(const u8 *buf)
|
|
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
|
|
index 97ccf4c3b45b..16936a24795c 100644
|
|
--- a/arch/x86/kernel/cpu/microcode/intel.c
|
|
+++ b/arch/x86/kernel/cpu/microcode/intel.c
|
|
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
|
|
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
|
struct microcode_intel *mc;
|
|
+ enum ucode_state ret;
|
|
static int prev_rev;
|
|
u32 rev;
|
|
|
|
@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
|
|
*/
|
|
rev = intel_get_microcode_revision();
|
|
if (rev >= mc->hdr.rev) {
|
|
- uci->cpu_sig.rev = rev;
|
|
- c->microcode = rev;
|
|
- return UCODE_OK;
|
|
+ ret = UCODE_OK;
|
|
+ goto out;
|
|
}
|
|
|
|
/*
|
|
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
|
|
prev_rev = rev;
|
|
}
|
|
|
|
+ ret = UCODE_UPDATED;
|
|
+
|
|
+out:
|
|
uci->cpu_sig.rev = rev;
|
|
- c->microcode = rev;
|
|
+ c->microcode = rev;
|
|
+
|
|
+ /* Update boot_cpu_data's revision too, if we're on the BSP: */
|
|
+ if (c->cpu_index == boot_cpu_data.cpu_index)
|
|
+ boot_cpu_data.microcode = rev;
|
|
|
|
- return UCODE_UPDATED;
|
|
+ return ret;
|
|
}
|
|
|
|
static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index 4e5a8e30cc4e..fd46d890296c 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -6965,8 +6965,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
|
|
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
|
|
return kvm_skip_emulated_instruction(vcpu);
|
|
else
|
|
- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
|
|
- NULL, 0) == EMULATE_DONE;
|
|
+ return emulate_instruction(vcpu, EMULTYPE_SKIP) ==
|
|
+ EMULATE_DONE;
|
|
}
|
|
|
|
ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
|
|
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
|
|
index c2faff548f59..794c35c4ca73 100644
|
|
--- a/arch/x86/mm/fault.c
|
|
+++ b/arch/x86/mm/fault.c
|
|
@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
|
|
if (!(address >= VMALLOC_START && address < VMALLOC_END))
|
|
return -1;
|
|
|
|
- WARN_ON_ONCE(in_nmi());
|
|
-
|
|
/*
|
|
* Synchronize this task's top level page-table
|
|
* with the 'reference' page table.
|
|
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
|
|
index 4b571f3ea009..afbbe5750a1f 100644
|
|
--- a/block/bfq-cgroup.c
|
|
+++ b/block/bfq-cgroup.c
|
|
@@ -224,9 +224,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
|
|
|
|
void bfqg_and_blkg_put(struct bfq_group *bfqg)
|
|
{
|
|
- bfqg_put(bfqg);
|
|
-
|
|
blkg_put(bfqg_to_blkg(bfqg));
|
|
+
|
|
+ bfqg_put(bfqg);
|
|
}
|
|
|
|
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
|
|
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
|
|
index 6714507aa6c7..3d2ab65d2dd1 100644
|
|
--- a/block/blk-mq-tag.c
|
|
+++ b/block/blk-mq-tag.c
|
|
@@ -416,8 +416,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
|
if (tdepth <= tags->nr_reserved_tags)
|
|
return -EINVAL;
|
|
|
|
- tdepth -= tags->nr_reserved_tags;
|
|
-
|
|
/*
|
|
* If we are allowed to grow beyond the original size, allocate
|
|
* a new set of tags before freeing the old one.
|
|
@@ -437,7 +435,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
|
if (tdepth > 16 * BLKDEV_MAX_RQ)
|
|
return -EINVAL;
|
|
|
|
- new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
|
|
+ new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
|
|
+ tags->nr_reserved_tags);
|
|
if (!new)
|
|
return -ENOMEM;
|
|
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
|
|
@@ -454,7 +453,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
|
* Don't need (or can't) update reserved tags here, they
|
|
* remain static and should never need resizing.
|
|
*/
|
|
- sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
|
|
+ sbitmap_queue_resize(&tags->bitmap_tags,
|
|
+ tdepth - tags->nr_reserved_tags);
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/block/partitions/aix.c b/block/partitions/aix.c
|
|
index 007f95eea0e1..903f3ed175d0 100644
|
|
--- a/block/partitions/aix.c
|
|
+++ b/block/partitions/aix.c
|
|
@@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state)
|
|
u32 vgda_sector = 0;
|
|
u32 vgda_len = 0;
|
|
int numlvs = 0;
|
|
- struct pvd *pvd;
|
|
+ struct pvd *pvd = NULL;
|
|
struct lv_info {
|
|
unsigned short pps_per_lv;
|
|
unsigned short pps_found;
|
|
@@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state)
|
|
if (lvip[i].pps_per_lv)
|
|
foundlvs += 1;
|
|
}
|
|
+ /* pvd loops depend on n[].name and lvip[].pps_per_lv */
|
|
+ pvd = alloc_pvd(state, vgda_sector + 17);
|
|
}
|
|
put_dev_sector(sect);
|
|
}
|
|
- pvd = alloc_pvd(state, vgda_sector + 17);
|
|
if (pvd) {
|
|
int numpps = be16_to_cpu(pvd->pp_count);
|
|
int psn_part1 = be32_to_cpu(pvd->psn_part1);
|
|
@@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state)
|
|
next_lp_ix += 1;
|
|
}
|
|
for (i = 0; i < state->limit; i += 1)
|
|
- if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
|
|
+ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
|
|
+ char tmp[sizeof(n[i].name) + 1]; // null char
|
|
+
|
|
+ snprintf(tmp, sizeof(tmp), "%s", n[i].name);
|
|
pr_warn("partition %s (%u pp's found) is "
|
|
"not contiguous\n",
|
|
- n[i].name, lvip[i].pps_found);
|
|
+ tmp, lvip[i].pps_found);
|
|
+ }
|
|
kfree(pvd);
|
|
}
|
|
kfree(n);
|
|
diff --git a/crypto/Makefile b/crypto/Makefile
|
|
index adaf2c63baeb..56282e2d75ad 100644
|
|
--- a/crypto/Makefile
|
|
+++ b/crypto/Makefile
|
|
@@ -98,7 +98,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
|
|
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
|
|
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
|
|
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
|
|
-CFLAGS_aes_generic.o := $(call cc-ifversion, -ge, 0701, -Os) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
|
|
+CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
|
|
obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
|
|
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
|
|
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
|
|
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
|
|
index 6cb148268676..58e4658f9dd6 100644
|
|
--- a/drivers/android/binder_alloc.c
|
|
+++ b/drivers/android/binder_alloc.c
|
|
@@ -324,6 +324,34 @@ err_no_vma:
|
|
return vma ? -ENOMEM : -ESRCH;
|
|
}
|
|
|
|
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
|
|
+ struct vm_area_struct *vma)
|
|
+{
|
|
+ if (vma)
|
|
+ alloc->vma_vm_mm = vma->vm_mm;
|
|
+ /*
|
|
+ * If we see alloc->vma is not NULL, buffer data structures set up
|
|
+ * completely. Look at smp_rmb side binder_alloc_get_vma.
|
|
+ * We also want to guarantee new alloc->vma_vm_mm is always visible
|
|
+ * if alloc->vma is set.
|
|
+ */
|
|
+ smp_wmb();
|
|
+ alloc->vma = vma;
|
|
+}
|
|
+
|
|
+static inline struct vm_area_struct *binder_alloc_get_vma(
|
|
+ struct binder_alloc *alloc)
|
|
+{
|
|
+ struct vm_area_struct *vma = NULL;
|
|
+
|
|
+ if (alloc->vma) {
|
|
+ /* Look at description in binder_alloc_set_vma */
|
|
+ smp_rmb();
|
|
+ vma = alloc->vma;
|
|
+ }
|
|
+ return vma;
|
|
+}
|
|
+
|
|
struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
|
size_t data_size,
|
|
size_t offsets_size,
|
|
@@ -339,7 +367,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
|
size_t size, data_offsets_size;
|
|
int ret;
|
|
|
|
- if (alloc->vma == NULL) {
|
|
+ if (!binder_alloc_get_vma(alloc)) {
|
|
pr_err("%d: binder_alloc_buf, no vma\n",
|
|
alloc->pid);
|
|
return ERR_PTR(-ESRCH);
|
|
@@ -712,9 +740,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
|
buffer->free = 1;
|
|
binder_insert_free_buffer(alloc, buffer);
|
|
alloc->free_async_space = alloc->buffer_size / 2;
|
|
- barrier();
|
|
- alloc->vma = vma;
|
|
- alloc->vma_vm_mm = vma->vm_mm;
|
|
+ binder_alloc_set_vma(alloc, vma);
|
|
mmgrab(alloc->vma_vm_mm);
|
|
|
|
return 0;
|
|
@@ -741,10 +767,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|
int buffers, page_count;
|
|
struct binder_buffer *buffer;
|
|
|
|
- BUG_ON(alloc->vma);
|
|
-
|
|
buffers = 0;
|
|
mutex_lock(&alloc->mutex);
|
|
+ BUG_ON(alloc->vma);
|
|
+
|
|
while ((n = rb_first(&alloc->allocated_buffers))) {
|
|
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
|
|
|
@@ -886,7 +912,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
|
|
*/
|
|
void binder_alloc_vma_close(struct binder_alloc *alloc)
|
|
{
|
|
- WRITE_ONCE(alloc->vma, NULL);
|
|
+ binder_alloc_set_vma(alloc, NULL);
|
|
}
|
|
|
|
/**
|
|
@@ -921,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|
|
|
index = page - alloc->pages;
|
|
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
|
- vma = alloc->vma;
|
|
+ vma = binder_alloc_get_vma(alloc);
|
|
if (vma) {
|
|
if (!mmget_not_zero(alloc->vma_vm_mm))
|
|
goto err_mmget;
|
|
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
|
|
index bc562fd2b0a0..cda9a0b5bdaa 100644
|
|
--- a/drivers/ata/libahci.c
|
|
+++ b/drivers/ata/libahci.c
|
|
@@ -2096,7 +2096,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
|
|
struct ahci_host_priv *hpriv = ap->host->private_data;
|
|
void __iomem *port_mmio = ahci_port_base(ap);
|
|
struct ata_device *dev = ap->link.device;
|
|
- u32 devslp, dm, dito, mdat, deto;
|
|
+ u32 devslp, dm, dito, mdat, deto, dito_conf;
|
|
int rc;
|
|
unsigned int err_mask;
|
|
|
|
@@ -2120,8 +2120,15 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
|
|
return;
|
|
}
|
|
|
|
- /* device sleep was already enabled */
|
|
- if (devslp & PORT_DEVSLP_ADSE)
|
|
+ dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
|
|
+ dito = devslp_idle_timeout / (dm + 1);
|
|
+ if (dito > 0x3ff)
|
|
+ dito = 0x3ff;
|
|
+
|
|
+ dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
|
|
+
|
|
+ /* device sleep was already enabled and same dito */
|
|
+ if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
|
|
return;
|
|
|
|
/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
|
|
@@ -2129,11 +2136,6 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
|
|
if (rc)
|
|
return;
|
|
|
|
- dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
|
|
- dito = devslp_idle_timeout / (dm + 1);
|
|
- if (dito > 0x3ff)
|
|
- dito = 0x3ff;
|
|
-
|
|
/* Use the nominal value 10 ms if the read MDAT is zero,
|
|
* the nominal value of DETO is 20 ms.
|
|
*/
|
|
@@ -2151,6 +2153,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
|
|
deto = 20;
|
|
}
|
|
|
|
+ /* Make dito, mdat, deto bits to 0s */
|
|
+ devslp &= ~GENMASK_ULL(24, 2);
|
|
devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
|
|
(mdat << PORT_DEVSLP_MDAT_OFFSET) |
|
|
(deto << PORT_DEVSLP_DETO_OFFSET) |
|
|
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
|
|
index 5e55d03d3d01..fe1414df0f33 100644
|
|
--- a/drivers/block/nbd.c
|
|
+++ b/drivers/block/nbd.c
|
|
@@ -1228,6 +1228,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|
case NBD_SET_SOCK:
|
|
return nbd_add_socket(nbd, arg, false);
|
|
case NBD_SET_BLKSIZE:
|
|
+ if (!arg || !is_power_of_2(arg) || arg < 512 ||
|
|
+ arg > PAGE_SIZE)
|
|
+ return -EINVAL;
|
|
nbd_size_set(nbd, arg,
|
|
div_s64(config->bytesize, arg));
|
|
return 0;
|
|
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
|
|
index 531a0915066b..11ec92e47455 100644
|
|
--- a/drivers/block/pktcdvd.c
|
|
+++ b/drivers/block/pktcdvd.c
|
|
@@ -67,7 +67,7 @@
|
|
#include <scsi/scsi.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/device.h>
|
|
-
|
|
+#include <linux/nospec.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#define DRIVER_NAME "pktcdvd"
|
|
@@ -2231,6 +2231,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
|
|
{
|
|
if (dev_minor >= MAX_WRITERS)
|
|
return NULL;
|
|
+
|
|
+ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
|
|
return pkt_devs[dev_minor];
|
|
}
|
|
|
|
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
|
|
index b33c8d6eb8c7..500d4d632e48 100644
|
|
--- a/drivers/bluetooth/Kconfig
|
|
+++ b/drivers/bluetooth/Kconfig
|
|
@@ -146,6 +146,7 @@ config BT_HCIUART_LL
|
|
config BT_HCIUART_3WIRE
|
|
bool "Three-wire UART (H5) protocol support"
|
|
depends on BT_HCIUART
|
|
+ depends on BT_HCIUART_SERDEV
|
|
help
|
|
The HCI Three-wire UART Transport Layer makes it possible to
|
|
user the Bluetooth HCI over a serial port interface. The HCI
|
|
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
|
|
index 86b526b7d990..a2070ab86c82 100644
|
|
--- a/drivers/char/tpm/tpm-interface.c
|
|
+++ b/drivers/char/tpm/tpm-interface.c
|
|
@@ -369,10 +369,13 @@ err_len:
|
|
return -EINVAL;
|
|
}
|
|
|
|
-static int tpm_request_locality(struct tpm_chip *chip)
|
|
+static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
|
|
{
|
|
int rc;
|
|
|
|
+ if (flags & TPM_TRANSMIT_RAW)
|
|
+ return 0;
|
|
+
|
|
if (!chip->ops->request_locality)
|
|
return 0;
|
|
|
|
@@ -385,10 +388,13 @@ static int tpm_request_locality(struct tpm_chip *chip)
|
|
return 0;
|
|
}
|
|
|
|
-static void tpm_relinquish_locality(struct tpm_chip *chip)
|
|
+static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
|
|
{
|
|
int rc;
|
|
|
|
+ if (flags & TPM_TRANSMIT_RAW)
|
|
+ return;
|
|
+
|
|
if (!chip->ops->relinquish_locality)
|
|
return;
|
|
|
|
@@ -399,6 +405,28 @@ static void tpm_relinquish_locality(struct tpm_chip *chip)
|
|
chip->locality = -1;
|
|
}
|
|
|
|
+static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
|
|
+{
|
|
+ if (flags & TPM_TRANSMIT_RAW)
|
|
+ return 0;
|
|
+
|
|
+ if (!chip->ops->cmd_ready)
|
|
+ return 0;
|
|
+
|
|
+ return chip->ops->cmd_ready(chip);
|
|
+}
|
|
+
|
|
+static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
|
|
+{
|
|
+ if (flags & TPM_TRANSMIT_RAW)
|
|
+ return 0;
|
|
+
|
|
+ if (!chip->ops->go_idle)
|
|
+ return 0;
|
|
+
|
|
+ return chip->ops->go_idle(chip);
|
|
+}
|
|
+
|
|
static ssize_t tpm_try_transmit(struct tpm_chip *chip,
|
|
struct tpm_space *space,
|
|
u8 *buf, size_t bufsiz,
|
|
@@ -449,14 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
|
|
/* Store the decision as chip->locality will be changed. */
|
|
need_locality = chip->locality == -1;
|
|
|
|
- if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
|
|
- rc = tpm_request_locality(chip);
|
|
+ if (need_locality) {
|
|
+ rc = tpm_request_locality(chip, flags);
|
|
if (rc < 0)
|
|
goto out_no_locality;
|
|
}
|
|
|
|
- if (chip->dev.parent)
|
|
- pm_runtime_get_sync(chip->dev.parent);
|
|
+ rc = tpm_cmd_ready(chip, flags);
|
|
+ if (rc)
|
|
+ goto out;
|
|
|
|
rc = tpm2_prepare_space(chip, space, ordinal, buf);
|
|
if (rc)
|
|
@@ -516,13 +545,16 @@ out_recv:
|
|
}
|
|
|
|
rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
|
|
+ if (rc)
|
|
+ dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
|
|
|
|
out:
|
|
- if (chip->dev.parent)
|
|
- pm_runtime_put_sync(chip->dev.parent);
|
|
+ rc = tpm_go_idle(chip, flags);
|
|
+ if (rc)
|
|
+ goto out;
|
|
|
|
if (need_locality)
|
|
- tpm_relinquish_locality(chip);
|
|
+ tpm_relinquish_locality(chip, flags);
|
|
|
|
out_no_locality:
|
|
if (chip->ops->clk_enable != NULL)
|
|
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
|
|
index b83b30a3eea5..4bb9b4aa9b49 100644
|
|
--- a/drivers/char/tpm/tpm.h
|
|
+++ b/drivers/char/tpm/tpm.h
|
|
@@ -511,9 +511,17 @@ extern const struct file_operations tpm_fops;
|
|
extern const struct file_operations tpmrm_fops;
|
|
extern struct idr dev_nums_idr;
|
|
|
|
+/**
|
|
+ * enum tpm_transmit_flags
|
|
+ *
|
|
+ * @TPM_TRANSMIT_UNLOCKED: used to lock sequence of tpm_transmit calls.
|
|
+ * @TPM_TRANSMIT_RAW: prevent recursive calls into setup steps
|
|
+ * (go idle, locality,..). Always use with UNLOCKED
|
|
+ * as it will fail on double locking.
|
|
+ */
|
|
enum tpm_transmit_flags {
|
|
- TPM_TRANSMIT_UNLOCKED = BIT(0),
|
|
- TPM_TRANSMIT_RAW = BIT(1),
|
|
+ TPM_TRANSMIT_UNLOCKED = BIT(0),
|
|
+ TPM_TRANSMIT_RAW = BIT(1),
|
|
};
|
|
|
|
ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
|
|
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
|
|
index d26ea7513226..dabb2ae4e779 100644
|
|
--- a/drivers/char/tpm/tpm2-space.c
|
|
+++ b/drivers/char/tpm/tpm2-space.c
|
|
@@ -39,7 +39,8 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
|
|
for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
|
|
if (space->session_tbl[i])
|
|
tpm2_flush_context_cmd(chip, space->session_tbl[i],
|
|
- TPM_TRANSMIT_UNLOCKED);
|
|
+ TPM_TRANSMIT_UNLOCKED |
|
|
+ TPM_TRANSMIT_RAW);
|
|
}
|
|
}
|
|
|
|
@@ -84,7 +85,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
|
|
tpm_buf_append(&tbuf, &buf[*offset], body_size);
|
|
|
|
rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
|
|
- TPM_TRANSMIT_UNLOCKED, NULL);
|
|
+ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
|
|
if (rc < 0) {
|
|
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
|
|
__func__, rc);
|
|
@@ -133,7 +134,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
|
|
tpm_buf_append_u32(&tbuf, handle);
|
|
|
|
rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
|
|
- TPM_TRANSMIT_UNLOCKED, NULL);
|
|
+ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
|
|
if (rc < 0) {
|
|
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
|
|
__func__, rc);
|
|
@@ -170,7 +171,8 @@ static void tpm2_flush_space(struct tpm_chip *chip)
|
|
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
|
|
if (space->context_tbl[i] && ~space->context_tbl[i])
|
|
tpm2_flush_context_cmd(chip, space->context_tbl[i],
|
|
- TPM_TRANSMIT_UNLOCKED);
|
|
+ TPM_TRANSMIT_UNLOCKED |
|
|
+ TPM_TRANSMIT_RAW);
|
|
|
|
tpm2_flush_sessions(chip, space);
|
|
}
|
|
@@ -377,7 +379,8 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
|
|
|
|
return 0;
|
|
out_no_slots:
|
|
- tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED);
|
|
+ tpm2_flush_context_cmd(chip, phandle,
|
|
+ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW);
|
|
dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
|
|
phandle);
|
|
return -ENOMEM;
|
|
@@ -465,7 +468,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
|
|
return rc;
|
|
|
|
tpm2_flush_context_cmd(chip, space->context_tbl[i],
|
|
- TPM_TRANSMIT_UNLOCKED);
|
|
+ TPM_TRANSMIT_UNLOCKED |
|
|
+ TPM_TRANSMIT_RAW);
|
|
space->context_tbl[i] = ~0;
|
|
}
|
|
|
|
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
|
|
index bb756ad7897e..5c7ce5aaaf6f 100644
|
|
--- a/drivers/char/tpm/tpm_crb.c
|
|
+++ b/drivers/char/tpm/tpm_crb.c
|
|
@@ -137,7 +137,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
|
|
}
|
|
|
|
/**
|
|
- * crb_go_idle - request tpm crb device to go the idle state
|
|
+ * __crb_go_idle - request tpm crb device to go the idle state
|
|
*
|
|
* @dev: crb device
|
|
* @priv: crb private data
|
|
@@ -151,7 +151,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
|
|
*
|
|
* Return: 0 always
|
|
*/
|
|
-static int crb_go_idle(struct device *dev, struct crb_priv *priv)
|
|
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
|
|
{
|
|
if ((priv->flags & CRB_FL_ACPI_START) ||
|
|
(priv->flags & CRB_FL_CRB_SMC_START))
|
|
@@ -166,11 +166,20 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
|
|
dev_warn(dev, "goIdle timed out\n");
|
|
return -ETIME;
|
|
}
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
+static int crb_go_idle(struct tpm_chip *chip)
|
|
+{
|
|
+ struct device *dev = &chip->dev;
|
|
+ struct crb_priv *priv = dev_get_drvdata(dev);
|
|
+
|
|
+ return __crb_go_idle(dev, priv);
|
|
+}
|
|
+
|
|
/**
|
|
- * crb_cmd_ready - request tpm crb device to enter ready state
|
|
+ * __crb_cmd_ready - request tpm crb device to enter ready state
|
|
*
|
|
* @dev: crb device
|
|
* @priv: crb private data
|
|
@@ -183,7 +192,7 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
|
|
*
|
|
* Return: 0 on success -ETIME on timeout;
|
|
*/
|
|
-static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
|
|
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
|
|
{
|
|
if ((priv->flags & CRB_FL_ACPI_START) ||
|
|
(priv->flags & CRB_FL_CRB_SMC_START))
|
|
@@ -201,6 +210,14 @@ static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
|
|
return 0;
|
|
}
|
|
|
|
+static int crb_cmd_ready(struct tpm_chip *chip)
|
|
+{
|
|
+ struct device *dev = &chip->dev;
|
|
+ struct crb_priv *priv = dev_get_drvdata(dev);
|
|
+
|
|
+ return __crb_cmd_ready(dev, priv);
|
|
+}
|
|
+
|
|
static int __crb_request_locality(struct device *dev,
|
|
struct crb_priv *priv, int loc)
|
|
{
|
|
@@ -393,6 +410,8 @@ static const struct tpm_class_ops tpm_crb = {
|
|
.send = crb_send,
|
|
.cancel = crb_cancel,
|
|
.req_canceled = crb_req_canceled,
|
|
+ .go_idle = crb_go_idle,
|
|
+ .cmd_ready = crb_cmd_ready,
|
|
.request_locality = crb_request_locality,
|
|
.relinquish_locality = crb_relinquish_locality,
|
|
.req_complete_mask = CRB_DRV_STS_COMPLETE,
|
|
@@ -508,7 +527,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
|
* PTT HW bug w/a: wake up the device to access
|
|
* possibly not retained registers.
|
|
*/
|
|
- ret = crb_cmd_ready(dev, priv);
|
|
+ ret = __crb_cmd_ready(dev, priv);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -553,7 +572,7 @@ out:
|
|
if (!ret)
|
|
priv->cmd_size = cmd_size;
|
|
|
|
- crb_go_idle(dev, priv);
|
|
+ __crb_go_idle(dev, priv);
|
|
|
|
__crb_relinquish_locality(dev, priv, 0);
|
|
|
|
@@ -624,32 +643,7 @@ static int crb_acpi_add(struct acpi_device *device)
|
|
chip->acpi_dev_handle = device->handle;
|
|
chip->flags = TPM_CHIP_FLAG_TPM2;
|
|
|
|
- rc = __crb_request_locality(dev, priv, 0);
|
|
- if (rc)
|
|
- return rc;
|
|
-
|
|
- rc = crb_cmd_ready(dev, priv);
|
|
- if (rc)
|
|
- goto out;
|
|
-
|
|
- pm_runtime_get_noresume(dev);
|
|
- pm_runtime_set_active(dev);
|
|
- pm_runtime_enable(dev);
|
|
-
|
|
- rc = tpm_chip_register(chip);
|
|
- if (rc) {
|
|
- crb_go_idle(dev, priv);
|
|
- pm_runtime_put_noidle(dev);
|
|
- pm_runtime_disable(dev);
|
|
- goto out;
|
|
- }
|
|
-
|
|
- pm_runtime_put_sync(dev);
|
|
-
|
|
-out:
|
|
- __crb_relinquish_locality(dev, priv, 0);
|
|
-
|
|
- return rc;
|
|
+ return tpm_chip_register(chip);
|
|
}
|
|
|
|
static int crb_acpi_remove(struct acpi_device *device)
|
|
@@ -659,52 +653,11 @@ static int crb_acpi_remove(struct acpi_device *device)
|
|
|
|
tpm_chip_unregister(chip);
|
|
|
|
- pm_runtime_disable(dev);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
-static int __maybe_unused crb_pm_runtime_suspend(struct device *dev)
|
|
-{
|
|
- struct tpm_chip *chip = dev_get_drvdata(dev);
|
|
- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
|
|
-
|
|
- return crb_go_idle(dev, priv);
|
|
-}
|
|
-
|
|
-static int __maybe_unused crb_pm_runtime_resume(struct device *dev)
|
|
-{
|
|
- struct tpm_chip *chip = dev_get_drvdata(dev);
|
|
- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
|
|
-
|
|
- return crb_cmd_ready(dev, priv);
|
|
-}
|
|
-
|
|
-static int __maybe_unused crb_pm_suspend(struct device *dev)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- ret = tpm_pm_suspend(dev);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- return crb_pm_runtime_suspend(dev);
|
|
-}
|
|
-
|
|
-static int __maybe_unused crb_pm_resume(struct device *dev)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- ret = crb_pm_runtime_resume(dev);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- return tpm_pm_resume(dev);
|
|
-}
|
|
-
|
|
static const struct dev_pm_ops crb_pm = {
|
|
- SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume)
|
|
- SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
|
|
};
|
|
|
|
static const struct acpi_device_id crb_device_ids[] = {
|
|
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
|
|
index d5b44cadac56..c619e76ce827 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_infineon.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
|
|
@@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
|
|
/* Lock the adapter for the duration of the whole sequence. */
|
|
if (!tpm_dev.client->adapter->algo->master_xfer)
|
|
return -EOPNOTSUPP;
|
|
- i2c_lock_adapter(tpm_dev.client->adapter);
|
|
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
|
|
|
|
if (tpm_dev.chip_type == SLB9645) {
|
|
/* use a combined read for newer chips
|
|
@@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
|
|
}
|
|
|
|
out:
|
|
- i2c_unlock_adapter(tpm_dev.client->adapter);
|
|
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
|
|
/* take care of 'guard time' */
|
|
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
|
|
|
|
@@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
|
|
|
|
if (!tpm_dev.client->adapter->algo->master_xfer)
|
|
return -EOPNOTSUPP;
|
|
- i2c_lock_adapter(tpm_dev.client->adapter);
|
|
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
|
|
|
|
/* prepend the 'register address' to the buffer */
|
|
tpm_dev.buf[0] = addr;
|
|
@@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
|
|
usleep_range(sleep_low, sleep_hi);
|
|
}
|
|
|
|
- i2c_unlock_adapter(tpm_dev.client->adapter);
|
|
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
|
|
/* take care of 'guard time' */
|
|
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
|
|
|
|
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
|
|
index 8ab0bd8445f6..b00388fc41c8 100644
|
|
--- a/drivers/char/tpm/tpm_tis_spi.c
|
|
+++ b/drivers/char/tpm/tpm_tis_spi.c
|
|
@@ -188,6 +188,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
|
|
static int tpm_tis_spi_probe(struct spi_device *dev)
|
|
{
|
|
struct tpm_tis_spi_phy *phy;
|
|
+ int irq;
|
|
|
|
phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
|
|
GFP_KERNEL);
|
|
@@ -200,7 +201,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
|
|
if (!phy->iobuf)
|
|
return -ENOMEM;
|
|
|
|
- return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
|
|
+ /* If the SPI device has an IRQ then use that */
|
|
+ if (dev->irq > 0)
|
|
+ irq = dev->irq;
|
|
+ else
|
|
+ irq = -1;
|
|
+
|
|
+ return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
|
|
NULL);
|
|
}
|
|
|
|
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
|
|
index e4b40f2b4627..9c0f7cf920af 100644
|
|
--- a/drivers/firmware/google/vpd.c
|
|
+++ b/drivers/firmware/google/vpd.c
|
|
@@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
|
|
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
|
|
kfree(sec->raw_name);
|
|
memunmap(sec->baseaddr);
|
|
+ sec->enabled = false;
|
|
}
|
|
|
|
return 0;
|
|
@@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
|
|
ret = vpd_section_init("rw", &rw_vpd,
|
|
physaddr + sizeof(struct vpd_cbmem) +
|
|
header.ro_size, header.rw_size);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ vpd_section_destroy(&ro_vpd);
|
|
return ret;
|
|
+ }
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
|
|
index 4b80e996d976..1022fe8d09c7 100644
|
|
--- a/drivers/gpio/gpio-ml-ioh.c
|
|
+++ b/drivers/gpio/gpio-ml-ioh.c
|
|
@@ -497,9 +497,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
|
|
return 0;
|
|
|
|
err_gpiochip_add:
|
|
+ chip = chip_save;
|
|
while (--i >= 0) {
|
|
- chip--;
|
|
gpiochip_remove(&chip->gpio);
|
|
+ chip++;
|
|
}
|
|
kfree(chip_save);
|
|
|
|
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
|
|
index fbaf974277df..1eb857e2f62f 100644
|
|
--- a/drivers/gpio/gpio-tegra.c
|
|
+++ b/drivers/gpio/gpio-tegra.c
|
|
@@ -728,4 +728,4 @@ static int __init tegra_gpio_init(void)
|
|
{
|
|
return platform_driver_register(&tegra_gpio_driver);
|
|
}
|
|
-postcore_initcall(tegra_gpio_init);
|
|
+subsys_initcall(tegra_gpio_init);
|
|
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
|
|
index be813b2738c1..2e706f1abe64 100644
|
|
--- a/drivers/gpu/drm/i915/i915_reg.h
|
|
+++ b/drivers/gpu/drm/i915/i915_reg.h
|
|
@@ -8462,6 +8462,7 @@ enum skl_power_gate {
|
|
#define TRANS_MSA_10_BPC (2<<5)
|
|
#define TRANS_MSA_12_BPC (3<<5)
|
|
#define TRANS_MSA_16_BPC (4<<5)
|
|
+#define TRANS_MSA_CEA_RANGE (1<<3)
|
|
|
|
/* LCPLL Control */
|
|
#define LCPLL_CTL _MMIO(0x130040)
|
|
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
|
|
index 5e5fe03b638c..3a4a581345c4 100644
|
|
--- a/drivers/gpu/drm/i915/intel_ddi.c
|
|
+++ b/drivers/gpu/drm/i915/intel_ddi.c
|
|
@@ -1396,6 +1396,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
|
|
WARN_ON(transcoder_is_dsi(cpu_transcoder));
|
|
|
|
temp = TRANS_MSA_SYNC_CLK;
|
|
+
|
|
+ if (crtc_state->limited_color_range)
|
|
+ temp |= TRANS_MSA_CEA_RANGE;
|
|
+
|
|
switch (crtc_state->pipe_bpp) {
|
|
case 18:
|
|
temp |= TRANS_MSA_6_BPC;
|
|
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
|
|
index 658fa2d3e40c..2c8411b8d050 100644
|
|
--- a/drivers/gpu/ipu-v3/ipu-common.c
|
|
+++ b/drivers/gpu/ipu-v3/ipu-common.c
|
|
@@ -1401,6 +1401,8 @@ static int ipu_probe(struct platform_device *pdev)
|
|
return -ENODEV;
|
|
|
|
ipu->id = of_alias_get_id(np, "ipu");
|
|
+ if (ipu->id < 0)
|
|
+ ipu->id = 0;
|
|
|
|
if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
|
|
IS_ENABLED(CONFIG_DRM)) {
|
|
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
|
|
index 8267439dd1ee..d8101cd28dfa 100644
|
|
--- a/drivers/hv/hv.c
|
|
+++ b/drivers/hv/hv.c
|
|
@@ -196,6 +196,10 @@ int hv_synic_alloc(void)
|
|
|
|
return 0;
|
|
err:
|
|
+ /*
|
|
+ * Any memory allocations that succeeded will be freed when
|
|
+ * the caller cleans up by calling hv_synic_free()
|
|
+ */
|
|
return -ENOMEM;
|
|
}
|
|
|
|
@@ -208,12 +212,10 @@ void hv_synic_free(void)
|
|
struct hv_per_cpu_context *hv_cpu
|
|
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
|
|
|
- if (hv_cpu->synic_event_page)
|
|
- free_page((unsigned long)hv_cpu->synic_event_page);
|
|
- if (hv_cpu->synic_message_page)
|
|
- free_page((unsigned long)hv_cpu->synic_message_page);
|
|
- if (hv_cpu->post_msg_page)
|
|
- free_page((unsigned long)hv_cpu->post_msg_page);
|
|
+ kfree(hv_cpu->clk_evt);
|
|
+ free_page((unsigned long)hv_cpu->synic_event_page);
|
|
+ free_page((unsigned long)hv_cpu->synic_message_page);
|
|
+ free_page((unsigned long)hv_cpu->post_msg_page);
|
|
}
|
|
|
|
kfree(hv_context.hv_numa_map);
|
|
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
|
|
index 284f8670dbeb..2feae9a421e6 100644
|
|
--- a/drivers/i2c/busses/i2c-aspeed.c
|
|
+++ b/drivers/i2c/busses/i2c-aspeed.c
|
|
@@ -859,7 +859,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
|
|
if (!match)
|
|
bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
|
|
else
|
|
- bus->get_clk_reg_val = match->data;
|
|
+ bus->get_clk_reg_val = (u32 (*)(u32))match->data;
|
|
|
|
/* Initialize the I2C adapter */
|
|
spin_lock_init(&bus->lock);
|
|
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
|
|
index ba8df2fde1b2..67cbd9f61acc 100644
|
|
--- a/drivers/i2c/busses/i2c-i801.c
|
|
+++ b/drivers/i2c/busses/i2c-i801.c
|
|
@@ -138,6 +138,7 @@
|
|
|
|
#define SBREG_BAR 0x10
|
|
#define SBREG_SMBCTRL 0xc6000c
|
|
+#define SBREG_SMBCTRL_DNV 0xcf000c
|
|
|
|
/* Host status bits for SMBPCISTS */
|
|
#define SMBPCISTS_INTS BIT(3)
|
|
@@ -1395,7 +1396,11 @@ static void i801_add_tco(struct i801_priv *priv)
|
|
spin_unlock(&p2sb_spinlock);
|
|
|
|
res = &tco_res[ICH_RES_MEM_OFF];
|
|
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
|
|
+ if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
|
|
+ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
|
|
+ else
|
|
+ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
|
|
+
|
|
res->end = res->start + 3;
|
|
res->flags = IORESOURCE_MEM;
|
|
|
|
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
|
|
index ae6ed254e01d..732d6c456a6f 100644
|
|
--- a/drivers/i2c/busses/i2c-xiic.c
|
|
+++ b/drivers/i2c/busses/i2c-xiic.c
|
|
@@ -538,6 +538,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
|
|
{
|
|
u8 rx_watermark;
|
|
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
|
|
+ unsigned long flags;
|
|
|
|
/* Clear and enable Rx full interrupt. */
|
|
xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
|
|
@@ -553,6 +554,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
|
|
rx_watermark = IIC_RX_FIFO_DEPTH;
|
|
xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
|
|
|
|
+ local_irq_save(flags);
|
|
if (!(msg->flags & I2C_M_NOSTART))
|
|
/* write the address */
|
|
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
|
|
@@ -563,6 +565,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
|
|
|
|
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
|
|
msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
|
|
+ local_irq_restore(flags);
|
|
+
|
|
if (i2c->nmsgs == 1)
|
|
/* very last, enable bus not busy as well */
|
|
xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
|
|
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
|
|
index 79843a3ca9dc..752dbc388c27 100644
|
|
--- a/drivers/infiniband/core/cma.c
|
|
+++ b/drivers/infiniband/core/cma.c
|
|
@@ -1459,9 +1459,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
|
|
(addr->src_addr.ss_family == AF_IB ||
|
|
cma_protocol_roce_dev_port(id->device, port_num));
|
|
|
|
- return !addr->dev_addr.bound_dev_if ||
|
|
- (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
|
|
- addr->dev_addr.bound_dev_if == net_dev->ifindex);
|
|
+ /*
|
|
+ * Net namespaces must match, and if the listner is listening
|
|
+ * on a specific netdevice than netdevice must match as well.
|
|
+ */
|
|
+ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
|
|
+ (!!addr->dev_addr.bound_dev_if ==
|
|
+ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
|
|
+ return true;
|
|
+ else
|
|
+ return false;
|
|
}
|
|
|
|
static struct rdma_id_private *cma_find_listener(
|
|
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
index fc149ea64be7..59aaac43db91 100644
|
|
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
@@ -1647,10 +1647,11 @@ static int mxt_parse_object_table(struct mxt_data *data,
|
|
break;
|
|
case MXT_TOUCH_MULTI_T9:
|
|
data->multitouch = MXT_TOUCH_MULTI_T9;
|
|
+ /* Only handle messages from first T9 instance */
|
|
data->T9_reportid_min = min_id;
|
|
- data->T9_reportid_max = max_id;
|
|
- data->num_touchids = object->num_report_ids
|
|
- * mxt_obj_instances(object);
|
|
+ data->T9_reportid_max = min_id +
|
|
+ object->num_report_ids - 1;
|
|
+ data->num_touchids = object->num_report_ids;
|
|
break;
|
|
case MXT_SPT_MESSAGECOUNT_T44:
|
|
data->T44_address = object->start_address;
|
|
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
|
|
index 195d6e93ac71..5d0ba5f644c4 100644
|
|
--- a/drivers/iommu/ipmmu-vmsa.c
|
|
+++ b/drivers/iommu/ipmmu-vmsa.c
|
|
@@ -54,7 +54,7 @@ struct ipmmu_vmsa_domain {
|
|
struct io_pgtable_ops *iop;
|
|
|
|
unsigned int context_id;
|
|
- spinlock_t lock; /* Protects mappings */
|
|
+ struct mutex mutex; /* Protects mappings */
|
|
};
|
|
|
|
struct ipmmu_vmsa_iommu_priv {
|
|
@@ -523,7 +523,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
|
|
if (!domain)
|
|
return NULL;
|
|
|
|
- spin_lock_init(&domain->lock);
|
|
+ mutex_init(&domain->mutex);
|
|
|
|
return &domain->io_domain;
|
|
}
|
|
@@ -548,7 +548,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
|
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
|
struct ipmmu_vmsa_device *mmu = priv->mmu;
|
|
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
|
|
- unsigned long flags;
|
|
unsigned int i;
|
|
int ret = 0;
|
|
|
|
@@ -557,7 +556,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
|
return -ENXIO;
|
|
}
|
|
|
|
- spin_lock_irqsave(&domain->lock, flags);
|
|
+ mutex_lock(&domain->mutex);
|
|
|
|
if (!domain->mmu) {
|
|
/* The domain hasn't been used yet, initialize it. */
|
|
@@ -574,7 +573,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
|
} else
|
|
dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
|
|
|
|
- spin_unlock_irqrestore(&domain->lock, flags);
|
|
+ mutex_unlock(&domain->mutex);
|
|
|
|
if (ret < 0)
|
|
return ret;
|
|
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
|
|
index c4c2b3b85ebc..f6e040fcad9a 100644
|
|
--- a/drivers/macintosh/via-pmu.c
|
|
+++ b/drivers/macintosh/via-pmu.c
|
|
@@ -532,8 +532,9 @@ init_pmu(void)
|
|
int timeout;
|
|
struct adb_request req;
|
|
|
|
- out_8(&via[B], via[B] | TREQ); /* negate TREQ */
|
|
- out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
|
|
+ /* Negate TREQ. Set TACK to input and TREQ to output. */
|
|
+ out_8(&via[B], in_8(&via[B]) | TREQ);
|
|
+ out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
|
|
|
|
pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
|
|
timeout = 100000;
|
|
@@ -1455,8 +1456,8 @@ pmu_sr_intr(void)
|
|
struct adb_request *req;
|
|
int bite = 0;
|
|
|
|
- if (via[B] & TREQ) {
|
|
- printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
|
|
+ if (in_8(&via[B]) & TREQ) {
|
|
+ printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
|
|
out_8(&via[IFR], SR_INT);
|
|
return NULL;
|
|
}
|
|
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
|
|
index 71c3507df9a0..a4b7c2698096 100644
|
|
--- a/drivers/md/dm-cache-target.c
|
|
+++ b/drivers/md/dm-cache-target.c
|
|
@@ -2330,7 +2330,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
|
|
{0, 2, "Invalid number of cache feature arguments"},
|
|
};
|
|
|
|
- int r;
|
|
+ int r, mode_ctr = 0;
|
|
unsigned argc;
|
|
const char *arg;
|
|
struct cache_features *cf = &ca->features;
|
|
@@ -2344,14 +2344,20 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
|
|
while (argc--) {
|
|
arg = dm_shift_arg(as);
|
|
|
|
- if (!strcasecmp(arg, "writeback"))
|
|
+ if (!strcasecmp(arg, "writeback")) {
|
|
cf->io_mode = CM_IO_WRITEBACK;
|
|
+ mode_ctr++;
|
|
+ }
|
|
|
|
- else if (!strcasecmp(arg, "writethrough"))
|
|
+ else if (!strcasecmp(arg, "writethrough")) {
|
|
cf->io_mode = CM_IO_WRITETHROUGH;
|
|
+ mode_ctr++;
|
|
+ }
|
|
|
|
- else if (!strcasecmp(arg, "passthrough"))
|
|
+ else if (!strcasecmp(arg, "passthrough")) {
|
|
cf->io_mode = CM_IO_PASSTHROUGH;
|
|
+ mode_ctr++;
|
|
+ }
|
|
|
|
else if (!strcasecmp(arg, "metadata2"))
|
|
cf->metadata_version = 2;
|
|
@@ -2362,6 +2368,11 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
|
|
}
|
|
}
|
|
|
|
+ if (mode_ctr > 1) {
|
|
+ *error = "Duplicate cache io_mode features requested";
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
|
|
index 07ca2fd10189..5018fb2352c2 100644
|
|
--- a/drivers/md/raid5.c
|
|
+++ b/drivers/md/raid5.c
|
|
@@ -4516,6 +4516,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
|
|
s->failed++;
|
|
if (rdev && !test_bit(Faulty, &rdev->flags))
|
|
do_recovery = 1;
|
|
+ else if (!rdev) {
|
|
+ rdev = rcu_dereference(
|
|
+ conf->disks[i].replacement);
|
|
+ if (rdev && !test_bit(Faulty, &rdev->flags))
|
|
+ do_recovery = 1;
|
|
+ }
|
|
}
|
|
|
|
if (test_bit(R5_InJournal, &dev->flags))
|
|
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
|
|
index 2ab8d83e5576..fcfe658a4328 100644
|
|
--- a/drivers/media/dvb-frontends/helene.c
|
|
+++ b/drivers/media/dvb-frontends/helene.c
|
|
@@ -897,7 +897,10 @@ static int helene_x_pon(struct helene_priv *priv)
|
|
helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
|
|
|
|
/* 0x81 - 0x94 */
|
|
- data[0] = 0x18; /* xtal 24 MHz */
|
|
+ if (priv->xtal == SONY_HELENE_XTAL_16000)
|
|
+ data[0] = 0x10; /* xtal 16 MHz */
|
|
+ else
|
|
+ data[0] = 0x18; /* xtal 24 MHz */
|
|
data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
|
|
data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
|
|
data[3] = 0x80; /* REFOUT signal output 500mVpp */
|
|
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
|
|
index 56fe4e5b396e..4a65861433d6 100644
|
|
--- a/drivers/media/platform/davinci/vpif_display.c
|
|
+++ b/drivers/media/platform/davinci/vpif_display.c
|
|
@@ -1114,6 +1114,14 @@ vpif_init_free_channel_objects:
|
|
return err;
|
|
}
|
|
|
|
+static void free_vpif_objs(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
|
|
+ kfree(vpif_obj.dev[i]);
|
|
+}
|
|
+
|
|
static int vpif_async_bound(struct v4l2_async_notifier *notifier,
|
|
struct v4l2_subdev *subdev,
|
|
struct v4l2_async_subdev *asd)
|
|
@@ -1250,11 +1258,6 @@ static __init int vpif_probe(struct platform_device *pdev)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (!pdev->dev.platform_data) {
|
|
- dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
vpif_dev = &pdev->dev;
|
|
err = initialize_vpif();
|
|
|
|
@@ -1266,7 +1269,7 @@ static __init int vpif_probe(struct platform_device *pdev)
|
|
err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
|
|
if (err) {
|
|
v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
|
|
- return err;
|
|
+ goto vpif_free;
|
|
}
|
|
|
|
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
|
|
@@ -1309,7 +1312,10 @@ static __init int vpif_probe(struct platform_device *pdev)
|
|
if (vpif_obj.sd[i])
|
|
vpif_obj.sd[i]->grp_id = 1 << i;
|
|
}
|
|
- vpif_probe_complete();
|
|
+ err = vpif_probe_complete();
|
|
+ if (err) {
|
|
+ goto probe_subdev_out;
|
|
+ }
|
|
} else {
|
|
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
|
|
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
|
|
@@ -1330,6 +1336,8 @@ probe_subdev_out:
|
|
kfree(vpif_obj.sd);
|
|
vpif_unregister:
|
|
v4l2_device_unregister(&vpif_obj.v4l2_dev);
|
|
+vpif_free:
|
|
+ free_vpif_objs();
|
|
|
|
return err;
|
|
}
|
|
@@ -1351,8 +1359,8 @@ static int vpif_remove(struct platform_device *device)
|
|
ch = vpif_obj.dev[i];
|
|
/* Unregister video device */
|
|
video_unregister_device(&ch->video_dev);
|
|
- kfree(vpif_obj.dev[i]);
|
|
}
|
|
+ free_vpif_objs();
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
|
|
index 64df82817de3..4882ee25bd75 100644
|
|
--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c
|
|
+++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
|
|
@@ -392,9 +392,6 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
|
|
!media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
|
|
return -ENOLINK;
|
|
|
|
- dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
|
|
- data_type;
|
|
-
|
|
if (tg->enabled) {
|
|
/* Config Test Generator */
|
|
struct v4l2_mbus_framefmt *f =
|
|
@@ -416,6 +413,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
|
|
writel_relaxed(val, csid->base +
|
|
CAMSS_CSID_TG_DT_n_CGG_0(0));
|
|
|
|
+ dt = csid_get_fmt_entry(
|
|
+ csid->fmt[MSM_CSID_PAD_SRC].code)->data_type;
|
|
+
|
|
/* 5:0 data type */
|
|
val = dt;
|
|
writel_relaxed(val, csid->base +
|
|
@@ -425,6 +425,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
|
|
val = tg->payload_mode;
|
|
writel_relaxed(val, csid->base +
|
|
CAMSS_CSID_TG_DT_n_CGG_2(0));
|
|
+
|
|
+ df = csid_get_fmt_entry(
|
|
+ csid->fmt[MSM_CSID_PAD_SRC].code)->decode_format;
|
|
} else {
|
|
struct csid_phy_config *phy = &csid->phy;
|
|
|
|
@@ -439,13 +442,16 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
|
|
|
|
writel_relaxed(val,
|
|
csid->base + CAMSS_CSID_CORE_CTRL_1);
|
|
+
|
|
+ dt = csid_get_fmt_entry(
|
|
+ csid->fmt[MSM_CSID_PAD_SINK].code)->data_type;
|
|
+ df = csid_get_fmt_entry(
|
|
+ csid->fmt[MSM_CSID_PAD_SINK].code)->decode_format;
|
|
}
|
|
|
|
/* Config LUT */
|
|
|
|
dt_shift = (cid % 4) * 8;
|
|
- df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
|
|
- decode_format;
|
|
|
|
val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
|
|
val &= ~(0xff << dt_shift);
|
|
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
|
|
index 8e9531f7f83f..9942932ecbf9 100644
|
|
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
|
|
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
|
|
@@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
|
|
static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
|
|
{
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
- struct s5p_mfc_buf *dst_buf, *src_buf;
|
|
- size_t dec_y_addr;
|
|
+ struct s5p_mfc_buf *dst_buf, *src_buf;
|
|
+ u32 dec_y_addr;
|
|
unsigned int frame_type;
|
|
|
|
/* Make sure we actually have a new frame before continuing. */
|
|
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
|
|
if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
|
|
return;
|
|
- dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
|
|
+ dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
|
|
|
|
/* Copy timestamp / timecode from decoded src to dst and set
|
|
appropriate flags. */
|
|
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
|
|
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
|
|
- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
|
|
- == dec_y_addr) {
|
|
- dst_buf->b->timecode =
|
|
- src_buf->b->timecode;
|
|
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
|
|
+
|
|
+ if (addr == dec_y_addr) {
|
|
+ dst_buf->b->timecode = src_buf->b->timecode;
|
|
dst_buf->b->vb2_buf.timestamp =
|
|
src_buf->b->vb2_buf.timestamp;
|
|
dst_buf->b->flags &=
|
|
@@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
|
|
{
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
struct s5p_mfc_buf *dst_buf;
|
|
- size_t dspl_y_addr;
|
|
+ u32 dspl_y_addr;
|
|
unsigned int frame_type;
|
|
|
|
- dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
|
|
+ dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
|
|
if (IS_MFCV6_PLUS(dev))
|
|
frame_type = s5p_mfc_hw_call(dev->mfc_ops,
|
|
get_disp_frame_type, ctx);
|
|
@@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
|
|
/* The MFC returns address of the buffer, now we have to
|
|
* check which videobuf does it correspond to */
|
|
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
|
|
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
|
|
+
|
|
/* Check if this is the buffer we're looking for */
|
|
- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
|
|
- == dspl_y_addr) {
|
|
+ if (addr == dspl_y_addr) {
|
|
list_del(&dst_buf->list);
|
|
ctx->dst_queue_cnt--;
|
|
dst_buf->b->sequence = ctx->sequence;
|
|
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
|
|
index b421329b21fa..3d09e1c87921 100644
|
|
--- a/drivers/media/usb/dvb-usb/dw2102.c
|
|
+++ b/drivers/media/usb/dvb-usb/dw2102.c
|
|
@@ -2103,14 +2103,12 @@ static struct dvb_usb_device_properties s6x0_properties = {
|
|
}
|
|
};
|
|
|
|
-static struct dvb_usb_device_properties *p1100;
|
|
static const struct dvb_usb_device_description d1100 = {
|
|
"Prof 1100 USB ",
|
|
{&dw2102_table[PROF_1100], NULL},
|
|
{NULL},
|
|
};
|
|
|
|
-static struct dvb_usb_device_properties *s660;
|
|
static const struct dvb_usb_device_description d660 = {
|
|
"TeVii S660 USB",
|
|
{&dw2102_table[TEVII_S660], NULL},
|
|
@@ -2129,14 +2127,12 @@ static const struct dvb_usb_device_description d480_2 = {
|
|
{NULL},
|
|
};
|
|
|
|
-static struct dvb_usb_device_properties *p7500;
|
|
static const struct dvb_usb_device_description d7500 = {
|
|
"Prof 7500 USB DVB-S2",
|
|
{&dw2102_table[PROF_7500], NULL},
|
|
{NULL},
|
|
};
|
|
|
|
-static struct dvb_usb_device_properties *s421;
|
|
static const struct dvb_usb_device_description d421 = {
|
|
"TeVii S421 PCI",
|
|
{&dw2102_table[TEVII_S421], NULL},
|
|
@@ -2336,6 +2332,11 @@ static int dw2102_probe(struct usb_interface *intf,
|
|
const struct usb_device_id *id)
|
|
{
|
|
int retval = -ENOMEM;
|
|
+ struct dvb_usb_device_properties *p1100;
|
|
+ struct dvb_usb_device_properties *s660;
|
|
+ struct dvb_usb_device_properties *p7500;
|
|
+ struct dvb_usb_device_properties *s421;
|
|
+
|
|
p1100 = kmemdup(&s6x0_properties,
|
|
sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
|
|
if (!p1100)
|
|
@@ -2404,8 +2405,16 @@ static int dw2102_probe(struct usb_interface *intf,
|
|
0 == dvb_usb_device_init(intf, &t220_properties,
|
|
THIS_MODULE, NULL, adapter_nr) ||
|
|
0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
|
|
- THIS_MODULE, NULL, adapter_nr))
|
|
+ THIS_MODULE, NULL, adapter_nr)) {
|
|
+
|
|
+ /* clean up copied properties */
|
|
+ kfree(s421);
|
|
+ kfree(p7500);
|
|
+ kfree(s660);
|
|
+ kfree(p1100);
|
|
+
|
|
return 0;
|
|
+ }
|
|
|
|
retval = -ENODEV;
|
|
kfree(s421);
|
|
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
|
|
index 0f3fab47fe48..7dc1cbcd2fb8 100644
|
|
--- a/drivers/mfd/ti_am335x_tscadc.c
|
|
+++ b/drivers/mfd/ti_am335x_tscadc.c
|
|
@@ -210,14 +210,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
|
|
* The TSC_ADC_SS controller design assumes the OCP clock is
|
|
* at least 6x faster than the ADC clock.
|
|
*/
|
|
- clk = clk_get(&pdev->dev, "adc_tsc_fck");
|
|
+ clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
|
|
if (IS_ERR(clk)) {
|
|
dev_err(&pdev->dev, "failed to get TSC fck\n");
|
|
err = PTR_ERR(clk);
|
|
goto err_disable_clk;
|
|
}
|
|
clock_rate = clk_get_rate(clk);
|
|
- clk_put(clk);
|
|
tscadc->clk_div = clock_rate / ADC_CLK;
|
|
|
|
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
|
|
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
|
|
index ddc9e4b08b5c..56efa9d18a9a 100644
|
|
--- a/drivers/misc/mic/scif/scif_api.c
|
|
+++ b/drivers/misc/mic/scif/scif_api.c
|
|
@@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
|
|
goto scif_bind_exit;
|
|
}
|
|
} else {
|
|
- pn = scif_get_new_port();
|
|
- if (!pn) {
|
|
- ret = -ENOSPC;
|
|
+ ret = scif_get_new_port();
|
|
+ if (ret < 0)
|
|
goto scif_bind_exit;
|
|
- }
|
|
+ pn = ret;
|
|
}
|
|
|
|
ep->state = SCIFEP_BOUND;
|
|
@@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
|
|
err = -EISCONN;
|
|
break;
|
|
case SCIFEP_UNBOUND:
|
|
- ep->port.port = scif_get_new_port();
|
|
- if (!ep->port.port) {
|
|
- err = -ENOSPC;
|
|
- } else {
|
|
- ep->port.node = scif_info.nodeid;
|
|
- ep->conn_async_state = ASYNC_CONN_IDLE;
|
|
- }
|
|
+ err = scif_get_new_port();
|
|
+ if (err < 0)
|
|
+ break;
|
|
+ ep->port.port = err;
|
|
+ ep->port.node = scif_info.nodeid;
|
|
+ ep->conn_async_state = ASYNC_CONN_IDLE;
|
|
/* Fall through */
|
|
case SCIFEP_BOUND:
|
|
/*
|
|
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
|
|
index b77aacafc3fc..dda3ed72d05b 100644
|
|
--- a/drivers/misc/ti-st/st_kim.c
|
|
+++ b/drivers/misc/ti-st/st_kim.c
|
|
@@ -756,14 +756,14 @@ static int kim_probe(struct platform_device *pdev)
|
|
err = gpio_request(kim_gdata->nshutdown, "kim");
|
|
if (unlikely(err)) {
|
|
pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
|
|
- return err;
|
|
+ goto err_sysfs_group;
|
|
}
|
|
|
|
/* Configure nShutdown GPIO as output=0 */
|
|
err = gpio_direction_output(kim_gdata->nshutdown, 0);
|
|
if (unlikely(err)) {
|
|
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
|
|
- return err;
|
|
+ goto err_sysfs_group;
|
|
}
|
|
/* get reference of pdev for request_firmware
|
|
*/
|
|
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
|
|
index 23a6986d512b..a8f74d9bba4f 100644
|
|
--- a/drivers/mtd/ubi/wl.c
|
|
+++ b/drivers/mtd/ubi/wl.c
|
|
@@ -1615,8 +1615,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
|
cond_resched();
|
|
|
|
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
|
|
- if (!e)
|
|
+ if (!e) {
|
|
+ err = -ENOMEM;
|
|
goto out_free;
|
|
+ }
|
|
|
|
e->pnum = aeb->pnum;
|
|
e->ec = aeb->ec;
|
|
@@ -1635,8 +1637,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
|
cond_resched();
|
|
|
|
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
|
|
- if (!e)
|
|
+ if (!e) {
|
|
+ err = -ENOMEM;
|
|
goto out_free;
|
|
+ }
|
|
|
|
e->pnum = aeb->pnum;
|
|
e->ec = aeb->ec;
|
|
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
|
|
index 0c5b68e7da51..9b3167054843 100644
|
|
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
|
|
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
|
|
@@ -22,7 +22,7 @@
|
|
#include <linux/mdio-mux.h>
|
|
#include <linux/delay.h>
|
|
|
|
-#define MDIO_PARAM_OFFSET 0x00
|
|
+#define MDIO_PARAM_OFFSET 0x23c
|
|
#define MDIO_PARAM_MIIM_CYCLE 29
|
|
#define MDIO_PARAM_INTERNAL_SEL 25
|
|
#define MDIO_PARAM_BUS_ID 22
|
|
@@ -30,20 +30,22 @@
|
|
#define MDIO_PARAM_PHY_ID 16
|
|
#define MDIO_PARAM_PHY_DATA 0
|
|
|
|
-#define MDIO_READ_OFFSET 0x04
|
|
+#define MDIO_READ_OFFSET 0x240
|
|
#define MDIO_READ_DATA_MASK 0xffff
|
|
-#define MDIO_ADDR_OFFSET 0x08
|
|
+#define MDIO_ADDR_OFFSET 0x244
|
|
|
|
-#define MDIO_CTRL_OFFSET 0x0C
|
|
+#define MDIO_CTRL_OFFSET 0x248
|
|
#define MDIO_CTRL_WRITE_OP 0x1
|
|
#define MDIO_CTRL_READ_OP 0x2
|
|
|
|
-#define MDIO_STAT_OFFSET 0x10
|
|
+#define MDIO_STAT_OFFSET 0x24c
|
|
#define MDIO_STAT_DONE 1
|
|
|
|
#define BUS_MAX_ADDR 32
|
|
#define EXT_BUS_START_ADDR 16
|
|
|
|
+#define MDIO_REG_ADDR_SPACE_SIZE 0x250
|
|
+
|
|
struct iproc_mdiomux_desc {
|
|
void *mux_handle;
|
|
void __iomem *base;
|
|
@@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
|
|
md->dev = &pdev->dev;
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ if (res->start & 0xfff) {
|
|
+ /* For backward compatibility in case the
|
|
+ * base address is specified with an offset.
|
|
+ */
|
|
+ dev_info(&pdev->dev, "fix base address in dt-blob\n");
|
|
+ res->start &= ~0xfff;
|
|
+ res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
|
|
+ }
|
|
md->base = devm_ioremap_resource(&pdev->dev, res);
|
|
if (IS_ERR(md->base)) {
|
|
dev_err(&pdev->dev, "failed to ioremap register\n");
|
|
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
|
|
index cb17ffadfc30..e0baea2dfd3c 100644
|
|
--- a/drivers/net/tun.c
|
|
+++ b/drivers/net/tun.c
|
|
@@ -534,14 +534,6 @@ static void tun_queue_purge(struct tun_file *tfile)
|
|
skb_queue_purge(&tfile->sk.sk_error_queue);
|
|
}
|
|
|
|
-static void tun_cleanup_tx_array(struct tun_file *tfile)
|
|
-{
|
|
- if (tfile->tx_array.ring.queue) {
|
|
- skb_array_cleanup(&tfile->tx_array);
|
|
- memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
|
|
- }
|
|
-}
|
|
-
|
|
static void __tun_detach(struct tun_file *tfile, bool clean)
|
|
{
|
|
struct tun_file *ntfile;
|
|
@@ -583,7 +575,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
|
|
tun->dev->reg_state == NETREG_REGISTERED)
|
|
unregister_netdevice(tun->dev);
|
|
}
|
|
- tun_cleanup_tx_array(tfile);
|
|
+ skb_array_cleanup(&tfile->tx_array);
|
|
sock_put(&tfile->sk);
|
|
}
|
|
}
|
|
@@ -623,13 +615,11 @@ static void tun_detach_all(struct net_device *dev)
|
|
/* Drop read queue */
|
|
tun_queue_purge(tfile);
|
|
sock_put(&tfile->sk);
|
|
- tun_cleanup_tx_array(tfile);
|
|
}
|
|
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
|
|
tun_enable_queue(tfile);
|
|
tun_queue_purge(tfile);
|
|
sock_put(&tfile->sk);
|
|
- tun_cleanup_tx_array(tfile);
|
|
}
|
|
BUG_ON(tun->numdisabled != 0);
|
|
|
|
@@ -675,7 +665,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
|
|
}
|
|
|
|
if (!tfile->detached &&
|
|
- skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
|
|
+ skb_array_resize(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
|
|
err = -ENOMEM;
|
|
goto out;
|
|
}
|
|
@@ -2624,6 +2614,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
|
|
&tun_proto, 0);
|
|
if (!tfile)
|
|
return -ENOMEM;
|
|
+ if (skb_array_init(&tfile->tx_array, 0, GFP_KERNEL)) {
|
|
+ sk_free(&tfile->sk);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
RCU_INIT_POINTER(tfile->tun, NULL);
|
|
tfile->flags = 0;
|
|
tfile->ifindex = 0;
|
|
@@ -2644,8 +2639,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
|
|
|
|
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
|
|
|
|
- memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
|
|
index 52ebed1f55a1..6fa9c223ff93 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/mac.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/mac.c
|
|
@@ -3074,6 +3074,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
|
|
passive = channel->flags & IEEE80211_CHAN_NO_IR;
|
|
ch->passive = passive;
|
|
|
|
+ /* the firmware is ignoring the "radar" flag of the
|
|
+ * channel and is scanning actively using Probe Requests
|
|
+ * on "Radar detection"/DFS channels which are not
|
|
+ * marked as "available"
|
|
+ */
|
|
+ ch->passive |= ch->chan_radar;
|
|
+
|
|
ch->freq = channel->center_freq;
|
|
ch->band_center_freq1 = channel->center_freq;
|
|
ch->min_power = 0;
|
|
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
|
|
index 7616c1c4bbd3..baec856af90f 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
|
|
@@ -1451,6 +1451,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
|
|
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
|
|
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
|
|
cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
|
|
+ cfg->wmi_send_separate = __cpu_to_le32(0);
|
|
+ cfg->num_ocb_vdevs = __cpu_to_le32(0);
|
|
+ cfg->num_ocb_channels = __cpu_to_le32(0);
|
|
+ cfg->num_ocb_schedules = __cpu_to_le32(0);
|
|
+ cfg->host_capab = __cpu_to_le32(0);
|
|
|
|
ath10k_wmi_put_host_mem_chunks(ar, chunks);
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
|
|
index 22cf011e839a..e75bba0bbf67 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
|
|
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
|
|
@@ -1228,6 +1228,11 @@ struct wmi_tlv_resource_config {
|
|
__le32 keep_alive_pattern_size;
|
|
__le32 max_tdls_concurrent_sleep_sta;
|
|
__le32 max_tdls_concurrent_buffer_sta;
|
|
+ __le32 wmi_send_separate;
|
|
+ __le32 num_ocb_vdevs;
|
|
+ __le32 num_ocb_channels;
|
|
+ __le32 num_ocb_schedules;
|
|
+ __le32 host_capab;
|
|
} __packed;
|
|
|
|
struct wmi_tlv_init_cmd {
|
|
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
|
|
index 8c5c2dd8fa7f..a7f506eb7b36 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/hw.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/hw.c
|
|
@@ -2915,16 +2915,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
|
|
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
|
|
struct ieee80211_channel *channel;
|
|
int chan_pwr, new_pwr;
|
|
+ u16 ctl = NO_CTL;
|
|
|
|
if (!chan)
|
|
return;
|
|
|
|
+ if (!test)
|
|
+ ctl = ath9k_regd_get_ctl(reg, chan);
|
|
+
|
|
channel = chan->chan;
|
|
chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
|
|
new_pwr = min_t(int, chan_pwr, reg->power_limit);
|
|
|
|
- ah->eep_ops->set_txpower(ah, chan,
|
|
- ath9k_regd_get_ctl(reg, chan),
|
|
+ ah->eep_ops->set_txpower(ah, chan, ctl,
|
|
get_antenna_gain(ah, chan), new_pwr, test);
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
index d8b041f48ca8..fa64c1cc94ae 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/xmit.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
struct ieee80211_sta *sta = info->status.status_driver_data[0];
|
|
|
|
- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
|
|
+ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
|
|
+ IEEE80211_TX_STATUS_EOSP)) {
|
|
ieee80211_tx_status(hw, skb);
|
|
return;
|
|
}
|
|
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
|
|
index 0f15696195f8..078a4940bc5c 100644
|
|
--- a/drivers/net/wireless/ti/wlcore/rx.c
|
|
+++ b/drivers/net/wireless/ti/wlcore/rx.c
|
|
@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
|
|
static void wl1271_rx_status(struct wl1271 *wl,
|
|
struct wl1271_rx_descriptor *desc,
|
|
struct ieee80211_rx_status *status,
|
|
- u8 beacon)
|
|
+ u8 beacon, u8 probe_rsp)
|
|
{
|
|
memset(status, 0, sizeof(struct ieee80211_rx_status));
|
|
|
|
@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
|
|
}
|
|
}
|
|
|
|
+ if (beacon || probe_rsp)
|
|
+ status->boottime_ns = ktime_get_boot_ns();
|
|
+
|
|
if (beacon)
|
|
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
|
|
status->band);
|
|
@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
|
|
if (ieee80211_is_data_present(hdr->frame_control))
|
|
is_data = 1;
|
|
|
|
- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
|
|
+ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
|
|
+ ieee80211_is_probe_resp(hdr->frame_control));
|
|
wlcore_hw_set_rx_csum(wl, desc, skb);
|
|
|
|
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
|
|
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
|
|
index af81b2dec42e..620f5b995a12 100644
|
|
--- a/drivers/pci/switch/switchtec.c
|
|
+++ b/drivers/pci/switch/switchtec.c
|
|
@@ -24,6 +24,8 @@
|
|
#include <linux/cdev.h>
|
|
#include <linux/wait.h>
|
|
|
|
+#include <linux/nospec.h>
|
|
+
|
|
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
|
|
MODULE_VERSION("0.1");
|
|
MODULE_LICENSE("GPL");
|
|
@@ -1173,6 +1175,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
|
|
default:
|
|
if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
|
|
return -EINVAL;
|
|
+ p.port = array_index_nospec(p.port,
|
|
+ ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
|
|
p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
|
|
break;
|
|
}
|
|
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
|
|
index 6e472691d8ee..17f2c5a505b2 100644
|
|
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
|
|
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
|
|
@@ -389,7 +389,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
|
|
const char *name;
|
|
int i, ret;
|
|
|
|
- if (group > pctldev->num_groups)
|
|
+ if (group >= pctldev->num_groups)
|
|
return;
|
|
|
|
seq_printf(s, "\n");
|
|
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
|
|
index 433af328d981..b78f42abff2f 100644
|
|
--- a/drivers/pinctrl/pinctrl-amd.c
|
|
+++ b/drivers/pinctrl/pinctrl-amd.c
|
|
@@ -530,7 +530,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
|
|
/* Each status bit covers four pins */
|
|
for (i = 0; i < 4; i++) {
|
|
regval = readl(regs + i);
|
|
- if (!(regval & PIN_IRQ_PENDING))
|
|
+ if (!(regval & PIN_IRQ_PENDING) ||
|
|
+ !(regval & BIT(INTERRUPT_MASK_OFF)))
|
|
continue;
|
|
irq = irq_find_mapping(gc->irqdomain, irqnr + i);
|
|
generic_handle_irq(irq);
|
|
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
|
|
index dffa3aab7178..cec4c3223044 100644
|
|
--- a/drivers/rpmsg/rpmsg_core.c
|
|
+++ b/drivers/rpmsg/rpmsg_core.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <linux/module.h>
|
|
#include <linux/rpmsg.h>
|
|
#include <linux/of_device.h>
|
|
+#include <linux/pm_domain.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include "rpmsg_internal.h"
|
|
@@ -418,6 +419,10 @@ static int rpmsg_dev_probe(struct device *dev)
|
|
struct rpmsg_endpoint *ept = NULL;
|
|
int err;
|
|
|
|
+ err = dev_pm_domain_attach(dev, true);
|
|
+ if (err)
|
|
+ goto out;
|
|
+
|
|
if (rpdrv->callback) {
|
|
strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
|
|
chinfo.src = rpdev->src;
|
|
@@ -459,6 +464,8 @@ static int rpmsg_dev_remove(struct device *dev)
|
|
|
|
rpdrv->remove(rpdev);
|
|
|
|
+ dev_pm_domain_detach(dev, true);
|
|
+
|
|
if (rpdev->ept)
|
|
rpmsg_destroy_ept(rpdev->ept);
|
|
|
|
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
|
|
index a1388842e17e..dd342207095a 100644
|
|
--- a/drivers/scsi/3w-9xxx.c
|
|
+++ b/drivers/scsi/3w-9xxx.c
|
|
@@ -2042,6 +2042,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|
|
|
if (twa_initialize_device_extension(tw_dev)) {
|
|
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
|
|
+ retval = -ENOMEM;
|
|
goto out_free_device_extension;
|
|
}
|
|
|
|
@@ -2064,6 +2065,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|
tw_dev->base_addr = ioremap(mem_addr, mem_len);
|
|
if (!tw_dev->base_addr) {
|
|
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
|
|
+ retval = -ENOMEM;
|
|
goto out_release_mem_region;
|
|
}
|
|
|
|
@@ -2071,8 +2073,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|
TW_DISABLE_INTERRUPTS(tw_dev);
|
|
|
|
/* Initialize the card */
|
|
- if (twa_reset_sequence(tw_dev, 0))
|
|
+ if (twa_reset_sequence(tw_dev, 0)) {
|
|
+ retval = -ENOMEM;
|
|
goto out_iounmap;
|
|
+ }
|
|
|
|
/* Set host specific parameters */
|
|
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
|
|
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
|
|
index b150e131b2e7..aa317d6909e8 100644
|
|
--- a/drivers/scsi/3w-sas.c
|
|
+++ b/drivers/scsi/3w-sas.c
|
|
@@ -1597,6 +1597,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|
|
|
if (twl_initialize_device_extension(tw_dev)) {
|
|
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
|
|
+ retval = -ENOMEM;
|
|
goto out_free_device_extension;
|
|
}
|
|
|
|
@@ -1611,6 +1612,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|
tw_dev->base_addr = pci_iomap(pdev, 1, 0);
|
|
if (!tw_dev->base_addr) {
|
|
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
|
|
+ retval = -ENOMEM;
|
|
goto out_release_mem_region;
|
|
}
|
|
|
|
@@ -1620,6 +1622,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|
/* Initialize the card */
|
|
if (twl_reset_sequence(tw_dev, 0)) {
|
|
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
|
|
+ retval = -ENOMEM;
|
|
goto out_iounmap;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
|
|
index f6179e3d6953..961ea6f7def8 100644
|
|
--- a/drivers/scsi/3w-xxxx.c
|
|
+++ b/drivers/scsi/3w-xxxx.c
|
|
@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|
|
|
if (tw_initialize_device_extension(tw_dev)) {
|
|
printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
|
|
+ retval = -ENOMEM;
|
|
goto out_free_device_extension;
|
|
}
|
|
|
|
@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|
tw_dev->base_addr = pci_resource_start(pdev, 0);
|
|
if (!tw_dev->base_addr) {
|
|
printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
|
|
+ retval = -ENOMEM;
|
|
goto out_release_mem_region;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
|
|
index 8eb3f96fe068..bc61cc8bc6f0 100644
|
|
--- a/drivers/scsi/lpfc/lpfc.h
|
|
+++ b/drivers/scsi/lpfc/lpfc.h
|
|
@@ -676,7 +676,7 @@ struct lpfc_hba {
|
|
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
|
|
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
|
|
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
|
|
-#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
|
|
+#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
|
|
|
|
uint32_t hba_flag; /* hba generic flags */
|
|
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
|
|
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
|
|
index e6d51135d105..0d0be7d8b9d6 100644
|
|
--- a/drivers/target/target_core_transport.c
|
|
+++ b/drivers/target/target_core_transport.c
|
|
@@ -317,6 +317,7 @@ void __transport_register_session(
|
|
{
|
|
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
|
|
unsigned char buf[PR_REG_ISID_LEN];
|
|
+ unsigned long flags;
|
|
|
|
se_sess->se_tpg = se_tpg;
|
|
se_sess->fabric_sess_ptr = fabric_sess_ptr;
|
|
@@ -353,7 +354,7 @@ void __transport_register_session(
|
|
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
|
|
}
|
|
|
|
- spin_lock_irq(&se_nacl->nacl_sess_lock);
|
|
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
|
|
/*
|
|
* The se_nacl->nacl_sess pointer will be set to the
|
|
* last active I_T Nexus for each struct se_node_acl.
|
|
@@ -362,7 +363,7 @@ void __transport_register_session(
|
|
|
|
list_add_tail(&se_sess->sess_acl_list,
|
|
&se_nacl->acl_sess_list);
|
|
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
|
|
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
|
|
}
|
|
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
|
|
|
|
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
|
|
index 20d79a6007d5..070733ca94d5 100644
|
|
--- a/drivers/tty/rocket.c
|
|
+++ b/drivers/tty/rocket.c
|
|
@@ -1894,7 +1894,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
|
|
ByteIO_t UPCIRingInd = 0;
|
|
|
|
if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
|
|
- pci_enable_device(dev))
|
|
+ pci_enable_device(dev) || i >= NUM_BOARDS)
|
|
return 0;
|
|
|
|
rcktpt_io_addr[i] = pci_resource_start(dev, 0);
|
|
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
|
|
index ff04b7f8549f..41784798c789 100644
|
|
--- a/drivers/uio/uio.c
|
|
+++ b/drivers/uio/uio.c
|
|
@@ -841,8 +841,6 @@ int __uio_register_device(struct module *owner,
|
|
if (ret)
|
|
goto err_uio_dev_add_attributes;
|
|
|
|
- info->uio_dev = idev;
|
|
-
|
|
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
|
|
/*
|
|
* Note that we deliberately don't use devm_request_irq
|
|
@@ -858,6 +856,7 @@ int __uio_register_device(struct module *owner,
|
|
goto err_request_irq;
|
|
}
|
|
|
|
+ info->uio_dev = idev;
|
|
return 0;
|
|
|
|
err_request_irq:
|
|
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
|
|
index 4737615f0eaa..ce696d6c4641 100644
|
|
--- a/fs/autofs4/autofs_i.h
|
|
+++ b/fs/autofs4/autofs_i.h
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/list.h>
|
|
#include <linux/completion.h>
|
|
#include <asm/current.h>
|
|
+#include <linux/magic.h>
|
|
|
|
/* This is the range of ioctl() numbers we claim as ours */
|
|
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
|
|
@@ -124,7 +125,8 @@ struct autofs_sb_info {
|
|
|
|
static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
|
|
{
|
|
- return (struct autofs_sb_info *)(sb->s_fs_info);
|
|
+ return sb->s_magic != AUTOFS_SUPER_MAGIC ?
|
|
+ NULL : (struct autofs_sb_info *)(sb->s_fs_info);
|
|
}
|
|
|
|
static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)
|
|
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
|
|
index 09e7d68dff02..3c7e727612fa 100644
|
|
--- a/fs/autofs4/inode.c
|
|
+++ b/fs/autofs4/inode.c
|
|
@@ -14,7 +14,6 @@
|
|
#include <linux/pagemap.h>
|
|
#include <linux/parser.h>
|
|
#include <linux/bitops.h>
|
|
-#include <linux/magic.h>
|
|
#include "autofs_i.h"
|
|
#include <linux/module.h>
|
|
|
|
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
|
|
index 7303ba108112..a507c0d25354 100644
|
|
--- a/fs/btrfs/ioctl.c
|
|
+++ b/fs/btrfs/ioctl.c
|
|
@@ -3158,6 +3158,25 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
|
|
|
|
same_lock_start = min_t(u64, loff, dst_loff);
|
|
same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
|
|
+ } else {
|
|
+ /*
|
|
+ * If the source and destination inodes are different, the
|
|
+ * source's range end offset matches the source's i_size, that
|
|
+ * i_size is not a multiple of the sector size, and the
|
|
+ * destination range does not go past the destination's i_size,
|
|
+ * we must round down the length to the nearest sector size
|
|
+ * multiple. If we don't do this adjustment we end replacing
|
|
+ * with zeroes the bytes in the range that starts at the
|
|
+ * deduplication range's end offset and ends at the next sector
|
|
+ * size multiple.
|
|
+ */
|
|
+ if (loff + olen == i_size_read(src) &&
|
|
+ dst_loff + len < i_size_read(dst)) {
|
|
+ const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
|
|
+
|
|
+ len = round_down(i_size_read(src), sz) - loff;
|
|
+ olen = len;
|
|
+ }
|
|
}
|
|
|
|
/* don't make the dst file partly checksummed */
|
|
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
|
|
index caf9cf91b825..2cd0b3053439 100644
|
|
--- a/fs/cifs/inode.c
|
|
+++ b/fs/cifs/inode.c
|
|
@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
|
|
oparms.cifs_sb = cifs_sb;
|
|
oparms.desired_access = GENERIC_READ;
|
|
oparms.create_options = CREATE_NOT_DIR;
|
|
+ if (backup_cred(cifs_sb))
|
|
+ oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
|
|
oparms.disposition = FILE_OPEN;
|
|
oparms.path = path;
|
|
oparms.fid = &fid;
|
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
|
index e9f246fe9d80..759cbbf7b1af 100644
|
|
--- a/fs/cifs/smb2ops.c
|
|
+++ b/fs/cifs/smb2ops.c
|
|
@@ -385,7 +385,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
|
|
oparms.tcon = tcon;
|
|
oparms.desired_access = FILE_READ_ATTRIBUTES;
|
|
oparms.disposition = FILE_OPEN;
|
|
- oparms.create_options = 0;
|
|
+ if (backup_cred(cifs_sb))
|
|
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
|
|
+ else
|
|
+ oparms.create_options = 0;
|
|
oparms.fid = &fid;
|
|
oparms.reconnect = false;
|
|
|
|
@@ -534,7 +537,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
|
|
oparms.tcon = tcon;
|
|
oparms.desired_access = FILE_READ_EA;
|
|
oparms.disposition = FILE_OPEN;
|
|
- oparms.create_options = 0;
|
|
+ if (backup_cred(cifs_sb))
|
|
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
|
|
+ else
|
|
+ oparms.create_options = 0;
|
|
oparms.fid = &fid;
|
|
oparms.reconnect = false;
|
|
|
|
@@ -613,7 +619,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
|
|
oparms.tcon = tcon;
|
|
oparms.desired_access = FILE_WRITE_EA;
|
|
oparms.disposition = FILE_OPEN;
|
|
- oparms.create_options = 0;
|
|
+ if (backup_cred(cifs_sb))
|
|
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
|
|
+ else
|
|
+ oparms.create_options = 0;
|
|
oparms.fid = &fid;
|
|
oparms.reconnect = false;
|
|
|
|
@@ -1215,7 +1224,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
|
|
oparms.tcon = tcon;
|
|
oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
|
|
oparms.disposition = FILE_OPEN;
|
|
- oparms.create_options = 0;
|
|
+ if (backup_cred(cifs_sb))
|
|
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
|
|
+ else
|
|
+ oparms.create_options = 0;
|
|
oparms.fid = fid;
|
|
oparms.reconnect = false;
|
|
|
|
@@ -1491,7 +1503,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
|
|
oparms.tcon = tcon;
|
|
oparms.desired_access = FILE_READ_ATTRIBUTES;
|
|
oparms.disposition = FILE_OPEN;
|
|
- oparms.create_options = 0;
|
|
+ if (backup_cred(cifs_sb))
|
|
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
|
|
+ else
|
|
+ oparms.create_options = 0;
|
|
oparms.fid = &fid;
|
|
oparms.reconnect = false;
|
|
|
|
@@ -3200,7 +3215,7 @@ struct smb_version_values smb21_values = {
|
|
struct smb_version_values smb3any_values = {
|
|
.version_string = SMB3ANY_VERSION_STRING,
|
|
.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
|
|
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
|
|
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
|
|
.large_lock_type = 0,
|
|
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
|
|
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
|
|
@@ -3220,7 +3235,7 @@ struct smb_version_values smb3any_values = {
|
|
struct smb_version_values smbdefault_values = {
|
|
.version_string = SMBDEFAULT_VERSION_STRING,
|
|
.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
|
|
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
|
|
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
|
|
.large_lock_type = 0,
|
|
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
|
|
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
|
|
@@ -3240,7 +3255,7 @@ struct smb_version_values smbdefault_values = {
|
|
struct smb_version_values smb30_values = {
|
|
.version_string = SMB30_VERSION_STRING,
|
|
.protocol_id = SMB30_PROT_ID,
|
|
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
|
|
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
|
|
.large_lock_type = 0,
|
|
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
|
|
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
|
|
@@ -3260,7 +3275,7 @@ struct smb_version_values smb30_values = {
|
|
struct smb_version_values smb302_values = {
|
|
.version_string = SMB302_VERSION_STRING,
|
|
.protocol_id = SMB302_PROT_ID,
|
|
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
|
|
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
|
|
.large_lock_type = 0,
|
|
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
|
|
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
|
|
@@ -3281,7 +3296,7 @@ struct smb_version_values smb302_values = {
|
|
struct smb_version_values smb311_values = {
|
|
.version_string = SMB311_VERSION_STRING,
|
|
.protocol_id = SMB311_PROT_ID,
|
|
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
|
|
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
|
|
.large_lock_type = 0,
|
|
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
|
|
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
|
|
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
|
|
index 58842b36481d..078ec705a5cc 100644
|
|
--- a/fs/cifs/smb2pdu.c
|
|
+++ b/fs/cifs/smb2pdu.c
|
|
@@ -1816,6 +1816,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
|
|
if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
|
|
*oplock == SMB2_OPLOCK_LEVEL_NONE)
|
|
req->RequestedOplockLevel = *oplock;
|
|
+ else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
|
|
+ (oparms->create_options & CREATE_NOT_FILE))
|
|
+ req->RequestedOplockLevel = *oplock; /* no srv lease support */
|
|
else {
|
|
rc = add_lease_context(server, iov, &n_iov, oplock);
|
|
if (rc) {
|
|
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
|
|
index 3b34004a71c1..54f8520ad7a2 100644
|
|
--- a/fs/f2fs/f2fs.h
|
|
+++ b/fs/f2fs/f2fs.h
|
|
@@ -1766,8 +1766,13 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
|
|
pgoff_t index, bool for_write)
|
|
{
|
|
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
|
- struct page *page = find_lock_page(mapping, index);
|
|
+ struct page *page;
|
|
|
|
+ if (!for_write)
|
|
+ page = find_get_page_flags(mapping, index,
|
|
+ FGP_LOCK | FGP_ACCESSED);
|
|
+ else
|
|
+ page = find_lock_page(mapping, index);
|
|
if (page)
|
|
return page;
|
|
|
|
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
|
|
index 87e654c53c31..6f589730782d 100644
|
|
--- a/fs/f2fs/file.c
|
|
+++ b/fs/f2fs/file.c
|
|
@@ -1803,7 +1803,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
struct super_block *sb = sbi->sb;
|
|
__u32 in;
|
|
- int ret;
|
|
+ int ret = 0;
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
|
|
index f2f897cd23c9..f22884418e92 100644
|
|
--- a/fs/f2fs/gc.c
|
|
+++ b/fs/f2fs/gc.c
|
|
@@ -958,7 +958,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
|
|
goto next;
|
|
|
|
sum = page_address(sum_page);
|
|
- f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
|
|
+ if (type != GET_SUM_TYPE((&sum->footer))) {
|
|
+ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
|
|
+ "type [%d, %d] in SSA and SIT",
|
|
+ segno, type, GET_SUM_TYPE((&sum->footer)));
|
|
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
+ goto next;
|
|
+ }
|
|
|
|
/*
|
|
* this is to avoid deadlock:
|
|
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
|
|
index 8322e4e7bb3f..888a9dc13677 100644
|
|
--- a/fs/f2fs/inline.c
|
|
+++ b/fs/f2fs/inline.c
|
|
@@ -128,6 +128,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
|
|
if (err)
|
|
return err;
|
|
|
|
+ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
|
|
+ f2fs_put_dnode(dn);
|
|
+ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
|
|
+ f2fs_msg(fio.sbi->sb, KERN_WARNING,
|
|
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
|
|
+ "run fsck to fix.",
|
|
+ __func__, dn->inode->i_ino, dn->data_blkaddr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
|
|
|
|
read_inline_data(page, dn->inode_page);
|
|
@@ -365,6 +375,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
|
|
if (err)
|
|
goto out;
|
|
|
|
+ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
|
|
+ f2fs_put_dnode(&dn);
|
|
+ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
|
|
+ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
|
|
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
|
|
+ "run fsck to fix.",
|
|
+ __func__, dir->i_ino, dn.data_blkaddr);
|
|
+ err = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
f2fs_wait_on_page_writeback(page, DATA, true);
|
|
zero_user_segment(page, MAX_INLINE_DATA(dir), PAGE_SIZE);
|
|
|
|
@@ -481,6 +502,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
|
|
return 0;
|
|
recover:
|
|
lock_page(ipage);
|
|
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
|
|
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
|
|
f2fs_i_depth_write(dir, 0);
|
|
f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
|
|
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
|
|
index f623da26159f..712505ec5de4 100644
|
|
--- a/fs/f2fs/node.c
|
|
+++ b/fs/f2fs/node.c
|
|
@@ -1610,7 +1610,9 @@ next_step:
|
|
!is_cold_node(page)))
|
|
continue;
|
|
lock_node:
|
|
- if (!trylock_page(page))
|
|
+ if (wbc->sync_mode == WB_SYNC_ALL)
|
|
+ lock_page(page);
|
|
+ else if (!trylock_page(page))
|
|
continue;
|
|
|
|
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
|
|
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
|
|
index 39ada30889b6..4dfb5080098f 100644
|
|
--- a/fs/f2fs/segment.h
|
|
+++ b/fs/f2fs/segment.h
|
|
@@ -414,6 +414,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
|
|
if (test_and_clear_bit(segno, free_i->free_segmap)) {
|
|
free_i->free_segments++;
|
|
|
|
+ if (IS_CURSEC(sbi, secno))
|
|
+ goto skip_free;
|
|
next = find_next_bit(free_i->free_segmap,
|
|
start_segno + sbi->segs_per_sec, start_segno);
|
|
if (next >= start_segno + sbi->segs_per_sec) {
|
|
@@ -421,6 +423,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
|
|
free_i->free_sections++;
|
|
}
|
|
}
|
|
+skip_free:
|
|
spin_unlock(&free_i->segmap_lock);
|
|
}
|
|
|
|
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
|
|
index 400c00058bad..eae35909fa51 100644
|
|
--- a/fs/f2fs/super.c
|
|
+++ b/fs/f2fs/super.c
|
|
@@ -1883,12 +1883,17 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
|
|
unsigned int ovp_segments, reserved_segments;
|
|
unsigned int main_segs, blocks_per_seg;
|
|
+ unsigned int sit_segs, nat_segs;
|
|
+ unsigned int sit_bitmap_size, nat_bitmap_size;
|
|
+ unsigned int log_blocks_per_seg;
|
|
int i;
|
|
|
|
total = le32_to_cpu(raw_super->segment_count);
|
|
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
|
|
- fsmeta += le32_to_cpu(raw_super->segment_count_sit);
|
|
- fsmeta += le32_to_cpu(raw_super->segment_count_nat);
|
|
+ sit_segs = le32_to_cpu(raw_super->segment_count_sit);
|
|
+ fsmeta += sit_segs;
|
|
+ nat_segs = le32_to_cpu(raw_super->segment_count_nat);
|
|
+ fsmeta += nat_segs;
|
|
fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
|
|
fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
|
|
|
|
@@ -1919,6 +1924,18 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|
return 1;
|
|
}
|
|
|
|
+ sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
|
|
+ nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
|
|
+ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
|
|
+
|
|
+ if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
|
|
+ nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
+ "Wrong bitmap size: sit: %u, nat:%u",
|
|
+ sit_bitmap_size, nat_bitmap_size);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
if (unlikely(f2fs_cp_error(sbi))) {
|
|
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
|
|
return 1;
|
|
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
|
|
index e2c258f717cd..93af9d7dfcdc 100644
|
|
--- a/fs/f2fs/sysfs.c
|
|
+++ b/fs/f2fs/sysfs.c
|
|
@@ -9,6 +9,7 @@
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
+#include <linux/compiler.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/f2fs_fs.h>
|
|
#include <linux/seq_file.h>
|
|
@@ -381,7 +382,8 @@ static struct kobject f2fs_feat = {
|
|
.kset = &f2fs_kset,
|
|
};
|
|
|
|
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
|
|
+static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
|
|
+ void *offset)
|
|
{
|
|
struct super_block *sb = seq->private;
|
|
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
|
@@ -408,7 +410,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
|
|
return 0;
|
|
}
|
|
|
|
-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
|
|
+static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
|
|
+ void *offset)
|
|
{
|
|
struct super_block *sb = seq->private;
|
|
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
|
@@ -432,7 +435,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
|
|
return 0;
|
|
}
|
|
|
|
-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
|
|
+static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
|
|
+ void *offset)
|
|
{
|
|
struct super_block *sb = seq->private;
|
|
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
|
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
|
|
index 2c3f398995f6..b8d55da2f04d 100644
|
|
--- a/fs/nfs/callback_proc.c
|
|
+++ b/fs/nfs/callback_proc.c
|
|
@@ -213,9 +213,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
|
|
{
|
|
u32 oldseq, newseq;
|
|
|
|
- /* Is the stateid still not initialised? */
|
|
+ /* Is the stateid not initialised? */
|
|
if (!pnfs_layout_is_valid(lo))
|
|
- return NFS4ERR_DELAY;
|
|
+ return NFS4ERR_NOMATCHING_LAYOUT;
|
|
|
|
/* Mismatched stateid? */
|
|
if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
|
|
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
|
|
index 123c069429a7..57de914630bc 100644
|
|
--- a/fs/nfs/callback_xdr.c
|
|
+++ b/fs/nfs/callback_xdr.c
|
|
@@ -904,16 +904,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
|
|
|
|
if (hdr_arg.minorversion == 0) {
|
|
cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
|
|
- if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
|
|
+ if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
|
|
+ if (cps.clp)
|
|
+ nfs_put_client(cps.clp);
|
|
goto out_invalidcred;
|
|
+ }
|
|
}
|
|
|
|
cps.minorversion = hdr_arg.minorversion;
|
|
hdr_res.taglen = hdr_arg.taglen;
|
|
hdr_res.tag = hdr_arg.tag;
|
|
- if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
|
|
+ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
|
|
+ if (cps.clp)
|
|
+ nfs_put_client(cps.clp);
|
|
return rpc_system_err;
|
|
-
|
|
+ }
|
|
while (status == 0 && nops != hdr_arg.nops) {
|
|
status = process_op(nops, rqstp, &xdr_in,
|
|
rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
|
|
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
|
|
index 9f0bb908e2b5..e41ef532c4ce 100644
|
|
--- a/include/linux/mm_types.h
|
|
+++ b/include/linux/mm_types.h
|
|
@@ -354,7 +354,7 @@ struct kioctx_table;
|
|
struct mm_struct {
|
|
struct vm_area_struct *mmap; /* list of VMAs */
|
|
struct rb_root mm_rb;
|
|
- u32 vmacache_seqnum; /* per-thread vmacache */
|
|
+ u64 vmacache_seqnum; /* per-thread vmacache */
|
|
#ifdef CONFIG_MMU
|
|
unsigned long (*get_unmapped_area) (struct file *filp,
|
|
unsigned long addr, unsigned long len,
|
|
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
|
|
index 5fe87687664c..d7016dcb245e 100644
|
|
--- a/include/linux/mm_types_task.h
|
|
+++ b/include/linux/mm_types_task.h
|
|
@@ -32,7 +32,7 @@
|
|
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
|
|
|
|
struct vmacache {
|
|
- u32 seqnum;
|
|
+ u64 seqnum;
|
|
struct vm_area_struct *vmas[VMACACHE_SIZE];
|
|
};
|
|
|
|
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
|
|
index 7fd514f36e74..a4be6388a980 100644
|
|
--- a/include/linux/rhashtable.h
|
|
+++ b/include/linux/rhashtable.h
|
|
@@ -152,25 +152,25 @@ struct rhashtable_params {
|
|
/**
|
|
* struct rhashtable - Hash table handle
|
|
* @tbl: Bucket table
|
|
- * @nelems: Number of elements in table
|
|
* @key_len: Key length for hashfn
|
|
- * @p: Configuration parameters
|
|
* @max_elems: Maximum number of elements in table
|
|
+ * @p: Configuration parameters
|
|
* @rhlist: True if this is an rhltable
|
|
* @run_work: Deferred worker to expand/shrink asynchronously
|
|
* @mutex: Mutex to protect current/future table swapping
|
|
* @lock: Spin lock to protect walker list
|
|
+ * @nelems: Number of elements in table
|
|
*/
|
|
struct rhashtable {
|
|
struct bucket_table __rcu *tbl;
|
|
- atomic_t nelems;
|
|
unsigned int key_len;
|
|
- struct rhashtable_params p;
|
|
unsigned int max_elems;
|
|
+ struct rhashtable_params p;
|
|
bool rhlist;
|
|
struct work_struct run_work;
|
|
struct mutex mutex;
|
|
spinlock_t lock;
|
|
+ atomic_t nelems;
|
|
};
|
|
|
|
/**
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index 6dd77767fd5b..f64e88444082 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -663,21 +663,26 @@ struct sk_buff {
|
|
struct sk_buff *prev;
|
|
|
|
union {
|
|
- ktime_t tstamp;
|
|
- u64 skb_mstamp;
|
|
+ struct net_device *dev;
|
|
+ /* Some protocols might use this space to store information,
|
|
+ * while device pointer would be NULL.
|
|
+ * UDP receive path is one user.
|
|
+ */
|
|
+ unsigned long dev_scratch;
|
|
};
|
|
};
|
|
- struct rb_node rbnode; /* used in netem & tcp stack */
|
|
+ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
|
|
+ struct list_head list;
|
|
};
|
|
- struct sock *sk;
|
|
|
|
union {
|
|
- struct net_device *dev;
|
|
- /* Some protocols might use this space to store information,
|
|
- * while device pointer would be NULL.
|
|
- * UDP receive path is one user.
|
|
- */
|
|
- unsigned long dev_scratch;
|
|
+ struct sock *sk;
|
|
+ int ip_defrag_offset;
|
|
+ };
|
|
+
|
|
+ union {
|
|
+ ktime_t tstamp;
|
|
+ u64 skb_mstamp;
|
|
};
|
|
/*
|
|
* This is the control buffer. It is free to use for every
|
|
@@ -2580,7 +2585,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
-void skb_rbtree_purge(struct rb_root *root);
|
|
+unsigned int skb_rbtree_purge(struct rb_root *root);
|
|
|
|
void *netdev_alloc_frag(unsigned int fragsz);
|
|
|
|
@@ -3134,6 +3139,7 @@ static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
|
|
return skb->data;
|
|
}
|
|
|
|
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
|
|
/**
|
|
* pskb_trim_rcsum - trim received skb and update checksum
|
|
* @skb: buffer to trim
|
|
@@ -3147,9 +3153,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
|
|
{
|
|
if (likely(len >= skb->len))
|
|
return 0;
|
|
- if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
- skb->ip_summed = CHECKSUM_NONE;
|
|
- return __pskb_trim(skb, len);
|
|
+ return pskb_trim_rcsum_slow(skb, len);
|
|
}
|
|
|
|
static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
|
|
@@ -3169,6 +3173,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
|
|
|
|
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
|
|
|
|
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
|
|
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
|
|
+#define skb_rb_last(root) rb_to_skb(rb_last(root))
|
|
+#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
|
|
+#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
|
|
+
|
|
#define skb_queue_walk(queue, skb) \
|
|
for (skb = (queue)->next; \
|
|
skb != (struct sk_buff *)(queue); \
|
|
@@ -3183,6 +3193,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
|
|
for (; skb != (struct sk_buff *)(queue); \
|
|
skb = skb->next)
|
|
|
|
+#define skb_rbtree_walk(skb, root) \
|
|
+ for (skb = skb_rb_first(root); skb != NULL; \
|
|
+ skb = skb_rb_next(skb))
|
|
+
|
|
+#define skb_rbtree_walk_from(skb) \
|
|
+ for (; skb != NULL; \
|
|
+ skb = skb_rb_next(skb))
|
|
+
|
|
+#define skb_rbtree_walk_from_safe(skb, tmp) \
|
|
+ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
|
|
+ skb = tmp)
|
|
+
|
|
#define skb_queue_walk_from_safe(queue, skb, tmp) \
|
|
for (tmp = skb->next; \
|
|
skb != (struct sk_buff *)(queue); \
|
|
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
|
|
index 2a6c3d96b31f..7f7b29f86c59 100644
|
|
--- a/include/linux/tpm.h
|
|
+++ b/include/linux/tpm.h
|
|
@@ -48,6 +48,8 @@ struct tpm_class_ops {
|
|
u8 (*status) (struct tpm_chip *chip);
|
|
bool (*update_timeouts)(struct tpm_chip *chip,
|
|
unsigned long *timeout_cap);
|
|
+ int (*go_idle)(struct tpm_chip *chip);
|
|
+ int (*cmd_ready)(struct tpm_chip *chip);
|
|
int (*request_locality)(struct tpm_chip *chip, int loc);
|
|
int (*relinquish_locality)(struct tpm_chip *chip, int loc);
|
|
void (*clk_enable)(struct tpm_chip *chip, bool value);
|
|
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
|
|
index 5c7f010676a7..47a3441cf4c4 100644
|
|
--- a/include/linux/vm_event_item.h
|
|
+++ b/include/linux/vm_event_item.h
|
|
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|
#ifdef CONFIG_DEBUG_VM_VMACACHE
|
|
VMACACHE_FIND_CALLS,
|
|
VMACACHE_FIND_HITS,
|
|
- VMACACHE_FULL_FLUSHES,
|
|
#endif
|
|
#ifdef CONFIG_SWAP
|
|
SWAP_RA,
|
|
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
|
|
index a5b3aa8d281f..a09b28f76460 100644
|
|
--- a/include/linux/vmacache.h
|
|
+++ b/include/linux/vmacache.h
|
|
@@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
|
|
memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
|
|
}
|
|
|
|
-extern void vmacache_flush_all(struct mm_struct *mm);
|
|
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
|
|
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
|
|
unsigned long addr);
|
|
@@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
|
|
static inline void vmacache_invalidate(struct mm_struct *mm)
|
|
{
|
|
mm->vmacache_seqnum++;
|
|
-
|
|
- /* deal with overflows */
|
|
- if (unlikely(mm->vmacache_seqnum == 0))
|
|
- vmacache_flush_all(mm);
|
|
}
|
|
|
|
#endif /* __LINUX_VMACACHE_H */
|
|
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
|
|
index a6e4edd8d4a2..335cf7851f12 100644
|
|
--- a/include/net/inet_frag.h
|
|
+++ b/include/net/inet_frag.h
|
|
@@ -2,14 +2,20 @@
|
|
#ifndef __NET_FRAG_H__
|
|
#define __NET_FRAG_H__
|
|
|
|
+#include <linux/rhashtable.h>
|
|
+
|
|
struct netns_frags {
|
|
- /* Keep atomic mem on separate cachelines in structs that include it */
|
|
- atomic_t mem ____cacheline_aligned_in_smp;
|
|
/* sysctls */
|
|
+ long high_thresh;
|
|
+ long low_thresh;
|
|
int timeout;
|
|
- int high_thresh;
|
|
- int low_thresh;
|
|
int max_dist;
|
|
+ struct inet_frags *f;
|
|
+
|
|
+ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
|
|
+
|
|
+ /* Keep atomic mem on separate cachelines in structs that include it */
|
|
+ atomic_long_t mem ____cacheline_aligned_in_smp;
|
|
};
|
|
|
|
/**
|
|
@@ -25,130 +31,115 @@ enum {
|
|
INET_FRAG_COMPLETE = BIT(2),
|
|
};
|
|
|
|
+struct frag_v4_compare_key {
|
|
+ __be32 saddr;
|
|
+ __be32 daddr;
|
|
+ u32 user;
|
|
+ u32 vif;
|
|
+ __be16 id;
|
|
+ u16 protocol;
|
|
+};
|
|
+
|
|
+struct frag_v6_compare_key {
|
|
+ struct in6_addr saddr;
|
|
+ struct in6_addr daddr;
|
|
+ u32 user;
|
|
+ __be32 id;
|
|
+ u32 iif;
|
|
+};
|
|
+
|
|
/**
|
|
* struct inet_frag_queue - fragment queue
|
|
*
|
|
- * @lock: spinlock protecting the queue
|
|
+ * @node: rhash node
|
|
+ * @key: keys identifying this frag.
|
|
* @timer: queue expiration timer
|
|
- * @list: hash bucket list
|
|
+ * @lock: spinlock protecting this frag
|
|
* @refcnt: reference count of the queue
|
|
* @fragments: received fragments head
|
|
+ * @rb_fragments: received fragments rb-tree root
|
|
* @fragments_tail: received fragments tail
|
|
+ * @last_run_head: the head of the last "run". see ip_fragment.c
|
|
* @stamp: timestamp of the last received fragment
|
|
* @len: total length of the original datagram
|
|
* @meat: length of received fragments so far
|
|
* @flags: fragment queue flags
|
|
* @max_size: maximum received fragment size
|
|
* @net: namespace that this frag belongs to
|
|
- * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
|
|
+ * @rcu: rcu head for freeing deferall
|
|
*/
|
|
struct inet_frag_queue {
|
|
- spinlock_t lock;
|
|
+ struct rhash_head node;
|
|
+ union {
|
|
+ struct frag_v4_compare_key v4;
|
|
+ struct frag_v6_compare_key v6;
|
|
+ } key;
|
|
struct timer_list timer;
|
|
- struct hlist_node list;
|
|
+ spinlock_t lock;
|
|
refcount_t refcnt;
|
|
- struct sk_buff *fragments;
|
|
+ struct sk_buff *fragments; /* Used in IPv6. */
|
|
+ struct rb_root rb_fragments; /* Used in IPv4. */
|
|
struct sk_buff *fragments_tail;
|
|
+ struct sk_buff *last_run_head;
|
|
ktime_t stamp;
|
|
int len;
|
|
int meat;
|
|
__u8 flags;
|
|
u16 max_size;
|
|
- struct netns_frags *net;
|
|
- struct hlist_node list_evictor;
|
|
-};
|
|
-
|
|
-#define INETFRAGS_HASHSZ 1024
|
|
-
|
|
-/* averaged:
|
|
- * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
|
|
- * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
|
|
- * struct frag_queue))
|
|
- */
|
|
-#define INETFRAGS_MAXDEPTH 128
|
|
-
|
|
-struct inet_frag_bucket {
|
|
- struct hlist_head chain;
|
|
- spinlock_t chain_lock;
|
|
+ struct netns_frags *net;
|
|
+ struct rcu_head rcu;
|
|
};
|
|
|
|
struct inet_frags {
|
|
- struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
|
|
-
|
|
- struct work_struct frags_work;
|
|
- unsigned int next_bucket;
|
|
- unsigned long last_rebuild_jiffies;
|
|
- bool rebuild;
|
|
-
|
|
- /* The first call to hashfn is responsible to initialize
|
|
- * rnd. This is best done with net_get_random_once.
|
|
- *
|
|
- * rnd_seqlock is used to let hash insertion detect
|
|
- * when it needs to re-lookup the hash chain to use.
|
|
- */
|
|
- u32 rnd;
|
|
- seqlock_t rnd_seqlock;
|
|
unsigned int qsize;
|
|
|
|
- unsigned int (*hashfn)(const struct inet_frag_queue *);
|
|
- bool (*match)(const struct inet_frag_queue *q,
|
|
- const void *arg);
|
|
void (*constructor)(struct inet_frag_queue *q,
|
|
const void *arg);
|
|
void (*destructor)(struct inet_frag_queue *);
|
|
- void (*frag_expire)(unsigned long data);
|
|
+ void (*frag_expire)(struct timer_list *t);
|
|
struct kmem_cache *frags_cachep;
|
|
const char *frags_cache_name;
|
|
+ struct rhashtable_params rhash_params;
|
|
};
|
|
|
|
int inet_frags_init(struct inet_frags *);
|
|
void inet_frags_fini(struct inet_frags *);
|
|
|
|
-static inline void inet_frags_init_net(struct netns_frags *nf)
|
|
+static inline int inet_frags_init_net(struct netns_frags *nf)
|
|
{
|
|
- atomic_set(&nf->mem, 0);
|
|
+ atomic_long_set(&nf->mem, 0);
|
|
+ return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
|
|
}
|
|
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
|
|
+void inet_frags_exit_net(struct netns_frags *nf);
|
|
|
|
-void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
|
|
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
|
|
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
|
- struct inet_frags *f, void *key, unsigned int hash);
|
|
+void inet_frag_kill(struct inet_frag_queue *q);
|
|
+void inet_frag_destroy(struct inet_frag_queue *q);
|
|
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
|
|
|
|
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
|
|
- const char *prefix);
|
|
+/* Free all skbs in the queue; return the sum of their truesizes. */
|
|
+unsigned int inet_frag_rbtree_purge(struct rb_root *root);
|
|
|
|
-static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
|
|
+static inline void inet_frag_put(struct inet_frag_queue *q)
|
|
{
|
|
if (refcount_dec_and_test(&q->refcnt))
|
|
- inet_frag_destroy(q, f);
|
|
-}
|
|
-
|
|
-static inline bool inet_frag_evicting(struct inet_frag_queue *q)
|
|
-{
|
|
- return !hlist_unhashed(&q->list_evictor);
|
|
+ inet_frag_destroy(q);
|
|
}
|
|
|
|
/* Memory Tracking Functions. */
|
|
|
|
-static inline int frag_mem_limit(struct netns_frags *nf)
|
|
-{
|
|
- return atomic_read(&nf->mem);
|
|
-}
|
|
-
|
|
-static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
|
|
+static inline long frag_mem_limit(const struct netns_frags *nf)
|
|
{
|
|
- atomic_sub(i, &nf->mem);
|
|
+ return atomic_long_read(&nf->mem);
|
|
}
|
|
|
|
-static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
|
|
+static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
|
|
{
|
|
- atomic_add(i, &nf->mem);
|
|
+ atomic_long_sub(val, &nf->mem);
|
|
}
|
|
|
|
-static inline int sum_frag_mem_limit(struct netns_frags *nf)
|
|
+static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
|
|
{
|
|
- return atomic_read(&nf->mem);
|
|
+ atomic_long_add(val, &nf->mem);
|
|
}
|
|
|
|
/* RFC 3168 support :
|
|
diff --git a/include/net/ip.h b/include/net/ip.h
|
|
index 81da1123fc8e..7c430343176a 100644
|
|
--- a/include/net/ip.h
|
|
+++ b/include/net/ip.h
|
|
@@ -570,7 +570,6 @@ static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *s
|
|
return skb;
|
|
}
|
|
#endif
|
|
-int ip_frag_mem(struct net *net);
|
|
|
|
/*
|
|
* Functions provided by ip_forward.c
|
|
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
|
|
index f280c61e019a..fa87a62e9bd3 100644
|
|
--- a/include/net/ipv6.h
|
|
+++ b/include/net/ipv6.h
|
|
@@ -331,13 +331,6 @@ static inline bool ipv6_accept_ra(struct inet6_dev *idev)
|
|
idev->cnf.accept_ra;
|
|
}
|
|
|
|
-#if IS_ENABLED(CONFIG_IPV6)
|
|
-static inline int ip6_frag_mem(struct net *net)
|
|
-{
|
|
- return sum_frag_mem_limit(&net->ipv6.frags);
|
|
-}
|
|
-#endif
|
|
-
|
|
#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
|
|
#define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
|
|
#define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
|
|
@@ -531,17 +524,8 @@ enum ip6_defrag_users {
|
|
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
|
|
};
|
|
|
|
-struct ip6_create_arg {
|
|
- __be32 id;
|
|
- u32 user;
|
|
- const struct in6_addr *src;
|
|
- const struct in6_addr *dst;
|
|
- int iif;
|
|
- u8 ecn;
|
|
-};
|
|
-
|
|
void ip6_frag_init(struct inet_frag_queue *q, const void *a);
|
|
-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
|
|
+extern const struct rhashtable_params ip6_rhash_params;
|
|
|
|
/*
|
|
* Equivalent of ipv4 struct ip
|
|
@@ -549,19 +533,13 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
|
|
struct frag_queue {
|
|
struct inet_frag_queue q;
|
|
|
|
- __be32 id; /* fragment id */
|
|
- u32 user;
|
|
- struct in6_addr saddr;
|
|
- struct in6_addr daddr;
|
|
-
|
|
int iif;
|
|
unsigned int csum;
|
|
__u16 nhoffset;
|
|
u8 ecn;
|
|
};
|
|
|
|
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
|
|
- struct inet_frags *frags);
|
|
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
|
|
|
|
static inline bool ipv6_addr_any(const struct in6_addr *a)
|
|
{
|
|
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
|
|
index ac71559314e7..9eae13eefc49 100644
|
|
--- a/include/uapi/linux/ethtool.h
|
|
+++ b/include/uapi/linux/ethtool.h
|
|
@@ -898,13 +898,13 @@ struct ethtool_rx_flow_spec {
|
|
static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
|
|
{
|
|
return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
|
|
-};
|
|
+}
|
|
|
|
static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
|
|
{
|
|
return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
|
|
ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
|
|
-};
|
|
+}
|
|
|
|
/**
|
|
* struct ethtool_rxnfc - command to get or set RX flow classification rules
|
|
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
|
|
index 0d941cdd8e8c..f5d753e60836 100644
|
|
--- a/include/uapi/linux/snmp.h
|
|
+++ b/include/uapi/linux/snmp.h
|
|
@@ -56,6 +56,7 @@ enum
|
|
IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
|
|
IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
|
|
IPSTATS_MIB_CEPKTS, /* InCEPkts */
|
|
+ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
|
|
__IPSTATS_MIB_MAX
|
|
};
|
|
|
|
diff --git a/kernel/cpu.c b/kernel/cpu.c
|
|
index 8f02f9b6e046..f3f389e33343 100644
|
|
--- a/kernel/cpu.c
|
|
+++ b/kernel/cpu.c
|
|
@@ -612,15 +612,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
|
|
bool bringup = st->bringup;
|
|
enum cpuhp_state state;
|
|
|
|
+ if (WARN_ON_ONCE(!st->should_run))
|
|
+ return;
|
|
+
|
|
/*
|
|
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
|
|
* that if we see ->should_run we also see the rest of the state.
|
|
*/
|
|
smp_mb();
|
|
|
|
- if (WARN_ON_ONCE(!st->should_run))
|
|
- return;
|
|
-
|
|
cpuhp_lock_acquire(bringup);
|
|
|
|
if (st->single) {
|
|
@@ -932,7 +932,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
|
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
|
|
if (ret) {
|
|
st->target = prev_state;
|
|
- undo_cpu_down(cpu, st);
|
|
+ if (st->state < prev_state)
|
|
+ undo_cpu_down(cpu, st);
|
|
break;
|
|
}
|
|
}
|
|
@@ -985,7 +986,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
|
|
* to do the further cleanups.
|
|
*/
|
|
ret = cpuhp_down_callbacks(cpu, st, target);
|
|
- if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
|
|
+ if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
|
|
cpuhp_reset_state(st, prev_state);
|
|
__cpuhp_kick_ap(st);
|
|
}
|
|
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
|
index 9fe525f410bf..f17c76a1a05f 100644
|
|
--- a/kernel/time/timer.c
|
|
+++ b/kernel/time/timer.c
|
|
@@ -1609,6 +1609,22 @@ static inline void __run_timers(struct timer_base *base)
|
|
|
|
raw_spin_lock_irq(&base->lock);
|
|
|
|
+ /*
|
|
+ * timer_base::must_forward_clk must be cleared before running
|
|
+ * timers so that any timer functions that call mod_timer() will
|
|
+ * not try to forward the base. Idle tracking / clock forwarding
|
|
+ * logic is only used with BASE_STD timers.
|
|
+ *
|
|
+ * The must_forward_clk flag is cleared unconditionally also for
|
|
+ * the deferrable base. The deferrable base is not affected by idle
|
|
+ * tracking and never forwarded, so clearing the flag is a NOOP.
|
|
+ *
|
|
+ * The fact that the deferrable base is never forwarded can cause
|
|
+ * large variations in granularity for deferrable timers, but they
|
|
+ * can be deferred for long periods due to idle anyway.
|
|
+ */
|
|
+ base->must_forward_clk = false;
|
|
+
|
|
while (time_after_eq(jiffies, base->clk)) {
|
|
|
|
levels = collect_expired_timers(base, heads);
|
|
@@ -1628,19 +1644,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
|
|
{
|
|
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
|
|
|
- /*
|
|
- * must_forward_clk must be cleared before running timers so that any
|
|
- * timer functions that call mod_timer will not try to forward the
|
|
- * base. idle trcking / clock forwarding logic is only used with
|
|
- * BASE_STD timers.
|
|
- *
|
|
- * The deferrable base does not do idle tracking at all, so we do
|
|
- * not forward it. This can result in very large variations in
|
|
- * granularity for deferrable timers, but they can be deferred for
|
|
- * long periods due to idle.
|
|
- */
|
|
- base->must_forward_clk = false;
|
|
-
|
|
__run_timers(base);
|
|
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
|
|
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
|
|
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
|
|
index 39215c724fc7..cebbcec877d7 100644
|
|
--- a/lib/rhashtable.c
|
|
+++ b/lib/rhashtable.c
|
|
@@ -364,6 +364,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
|
|
err = rhashtable_rehash_chain(ht, old_hash);
|
|
if (err)
|
|
return err;
|
|
+ cond_resched();
|
|
}
|
|
|
|
/* Publish the new table pointer. */
|
|
@@ -1073,6 +1074,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
|
|
for (i = 0; i < tbl->size; i++) {
|
|
struct rhash_head *pos, *next;
|
|
|
|
+ cond_resched();
|
|
for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
|
|
next = !rht_is_a_nulls(pos) ?
|
|
rht_dereference(pos->next, ht) : NULL;
|
|
diff --git a/mm/debug.c b/mm/debug.c
|
|
index 6726bec731c9..c55abc893fdc 100644
|
|
--- a/mm/debug.c
|
|
+++ b/mm/debug.c
|
|
@@ -100,7 +100,7 @@ EXPORT_SYMBOL(dump_vma);
|
|
|
|
void dump_mm(const struct mm_struct *mm)
|
|
{
|
|
- pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
|
|
+ pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
|
|
#ifdef CONFIG_MMU
|
|
"get_unmapped_area %p\n"
|
|
#endif
|
|
@@ -128,7 +128,7 @@ void dump_mm(const struct mm_struct *mm)
|
|
"tlb_flush_pending %d\n"
|
|
"def_flags: %#lx(%pGv)\n",
|
|
|
|
- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
|
|
+ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
|
|
#ifdef CONFIG_MMU
|
|
mm->get_unmapped_area,
|
|
#endif
|
|
diff --git a/mm/vmacache.c b/mm/vmacache.c
|
|
index db7596eb6132..f1729617dc85 100644
|
|
--- a/mm/vmacache.c
|
|
+++ b/mm/vmacache.c
|
|
@@ -7,44 +7,6 @@
|
|
#include <linux/mm.h>
|
|
#include <linux/vmacache.h>
|
|
|
|
-/*
|
|
- * Flush vma caches for threads that share a given mm.
|
|
- *
|
|
- * The operation is safe because the caller holds the mmap_sem
|
|
- * exclusively and other threads accessing the vma cache will
|
|
- * have mmap_sem held at least for read, so no extra locking
|
|
- * is required to maintain the vma cache.
|
|
- */
|
|
-void vmacache_flush_all(struct mm_struct *mm)
|
|
-{
|
|
- struct task_struct *g, *p;
|
|
-
|
|
- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
|
|
-
|
|
- /*
|
|
- * Single threaded tasks need not iterate the entire
|
|
- * list of process. We can avoid the flushing as well
|
|
- * since the mm's seqnum was increased and don't have
|
|
- * to worry about other threads' seqnum. Current's
|
|
- * flush will occur upon the next lookup.
|
|
- */
|
|
- if (atomic_read(&mm->mm_users) == 1)
|
|
- return;
|
|
-
|
|
- rcu_read_lock();
|
|
- for_each_process_thread(g, p) {
|
|
- /*
|
|
- * Only flush the vmacache pointers as the
|
|
- * mm seqnum is already set and curr's will
|
|
- * be set upon invalidation when the next
|
|
- * lookup is done.
|
|
- */
|
|
- if (mm == p->mm)
|
|
- vmacache_flush(p);
|
|
- }
|
|
- rcu_read_unlock();
|
|
-}
|
|
-
|
|
/*
|
|
* This task may be accessing a foreign mm via (for example)
|
|
* get_user_pages()->find_vma(). The vmacache is task-local and this
|
|
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
|
|
index cef3754408d4..b21fcc838784 100644
|
|
--- a/net/bluetooth/hidp/core.c
|
|
+++ b/net/bluetooth/hidp/core.c
|
|
@@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
|
|
hid->version = req->version;
|
|
hid->country = req->country;
|
|
|
|
- strncpy(hid->name, req->name, sizeof(req->name) - 1);
|
|
+ strncpy(hid->name, req->name, sizeof(hid->name));
|
|
|
|
snprintf(hid->phys, sizeof(hid->phys), "%pMR",
|
|
&l2cap_pi(session->ctrl_sock->sk)->chan->src);
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index 2e5eeba97de9..168a3e8883d4 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -1839,6 +1839,20 @@ done:
|
|
}
|
|
EXPORT_SYMBOL(___pskb_trim);
|
|
|
|
+/* Note : use pskb_trim_rcsum() instead of calling this directly
|
|
+ */
|
|
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
|
|
+{
|
|
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
|
+ int delta = skb->len - len;
|
|
+
|
|
+ skb->csum = csum_sub(skb->csum,
|
|
+ skb_checksum(skb, len, delta, 0));
|
|
+ }
|
|
+ return __pskb_trim(skb, len);
|
|
+}
|
|
+EXPORT_SYMBOL(pskb_trim_rcsum_slow);
|
|
+
|
|
/**
|
|
* __pskb_pull_tail - advance tail of skb header
|
|
* @skb: buffer to reallocate
|
|
@@ -2842,20 +2856,27 @@ EXPORT_SYMBOL(skb_queue_purge);
|
|
/**
|
|
* skb_rbtree_purge - empty a skb rbtree
|
|
* @root: root of the rbtree to empty
|
|
+ * Return value: the sum of truesizes of all purged skbs.
|
|
*
|
|
* Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
|
|
* the list and one reference dropped. This function does not take
|
|
* any lock. Synchronization should be handled by the caller (e.g., TCP
|
|
* out-of-order queue is protected by the socket lock).
|
|
*/
|
|
-void skb_rbtree_purge(struct rb_root *root)
|
|
+unsigned int skb_rbtree_purge(struct rb_root *root)
|
|
{
|
|
- struct sk_buff *skb, *next;
|
|
+ struct rb_node *p = rb_first(root);
|
|
+ unsigned int sum = 0;
|
|
|
|
- rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
|
|
- kfree_skb(skb);
|
|
+ while (p) {
|
|
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
|
|
|
|
- *root = RB_ROOT;
|
|
+ p = rb_next(p);
|
|
+ rb_erase(&skb->rbnode, root);
|
|
+ sum += skb->truesize;
|
|
+ kfree_skb(skb);
|
|
+ }
|
|
+ return sum;
|
|
}
|
|
|
|
/**
|
|
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
|
|
index bae7d78aa068..fbeacbc2be5d 100644
|
|
--- a/net/dcb/dcbnl.c
|
|
+++ b/net/dcb/dcbnl.c
|
|
@@ -1765,7 +1765,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
|
|
if (itr->app.selector == app->selector &&
|
|
itr->app.protocol == app->protocol &&
|
|
itr->ifindex == ifindex &&
|
|
- (!prio || itr->app.priority == prio))
|
|
+ ((prio == -1) || itr->app.priority == prio))
|
|
return itr;
|
|
}
|
|
|
|
@@ -1800,7 +1800,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
|
|
u8 prio = 0;
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
|
|
+ itr = dcb_app_lookup(app, dev->ifindex, -1);
|
|
+ if (itr)
|
|
prio = itr->app.priority;
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
@@ -1828,7 +1829,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
/* Search for existing match and replace */
|
|
- if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
|
|
+ itr = dcb_app_lookup(new, dev->ifindex, -1);
|
|
+ if (itr) {
|
|
if (new->priority)
|
|
itr->app.priority = new->priority;
|
|
else {
|
|
@@ -1861,7 +1863,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
|
|
u8 prio = 0;
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
|
|
+ itr = dcb_app_lookup(app, dev->ifindex, -1);
|
|
+ if (itr)
|
|
prio |= 1 << itr->app.priority;
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
|
|
index d8de3bcfb103..b8d95cb71c25 100644
|
|
--- a/net/ieee802154/6lowpan/6lowpan_i.h
|
|
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
|
|
@@ -17,37 +17,19 @@ typedef unsigned __bitwise lowpan_rx_result;
|
|
#define LOWPAN_DISPATCH_FRAG1 0xc0
|
|
#define LOWPAN_DISPATCH_FRAGN 0xe0
|
|
|
|
-struct lowpan_create_arg {
|
|
+struct frag_lowpan_compare_key {
|
|
u16 tag;
|
|
u16 d_size;
|
|
- const struct ieee802154_addr *src;
|
|
- const struct ieee802154_addr *dst;
|
|
+ const struct ieee802154_addr src;
|
|
+ const struct ieee802154_addr dst;
|
|
};
|
|
|
|
-/* Equivalent of ipv4 struct ip
|
|
+/* Equivalent of ipv4 struct ipq
|
|
*/
|
|
struct lowpan_frag_queue {
|
|
struct inet_frag_queue q;
|
|
-
|
|
- u16 tag;
|
|
- u16 d_size;
|
|
- struct ieee802154_addr saddr;
|
|
- struct ieee802154_addr daddr;
|
|
};
|
|
|
|
-static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
|
|
-{
|
|
- switch (a->mode) {
|
|
- case IEEE802154_ADDR_LONG:
|
|
- return (((__force u64)a->extended_addr) >> 32) ^
|
|
- (((__force u64)a->extended_addr) & 0xffffffff);
|
|
- case IEEE802154_ADDR_SHORT:
|
|
- return (__force u32)(a->short_addr + (a->pan_id << 16));
|
|
- default:
|
|
- return 0;
|
|
- }
|
|
-}
|
|
-
|
|
int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
|
|
void lowpan_net_frag_exit(void);
|
|
int lowpan_net_frag_init(void);
|
|
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
|
|
index f85b08baff16..1790b65944b3 100644
|
|
--- a/net/ieee802154/6lowpan/reassembly.c
|
|
+++ b/net/ieee802154/6lowpan/reassembly.c
|
|
@@ -37,55 +37,24 @@ static struct inet_frags lowpan_frags;
|
|
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
|
|
struct sk_buff *prev, struct net_device *ldev);
|
|
|
|
-static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
|
|
- const struct ieee802154_addr *saddr,
|
|
- const struct ieee802154_addr *daddr)
|
|
-{
|
|
- net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
|
|
- return jhash_3words(ieee802154_addr_hash(saddr),
|
|
- ieee802154_addr_hash(daddr),
|
|
- (__force u32)(tag + (d_size << 16)),
|
|
- lowpan_frags.rnd);
|
|
-}
|
|
-
|
|
-static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
|
|
-{
|
|
- const struct lowpan_frag_queue *fq;
|
|
-
|
|
- fq = container_of(q, struct lowpan_frag_queue, q);
|
|
- return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
|
|
-}
|
|
-
|
|
-static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
|
|
-{
|
|
- const struct lowpan_frag_queue *fq;
|
|
- const struct lowpan_create_arg *arg = a;
|
|
-
|
|
- fq = container_of(q, struct lowpan_frag_queue, q);
|
|
- return fq->tag == arg->tag && fq->d_size == arg->d_size &&
|
|
- ieee802154_addr_equal(&fq->saddr, arg->src) &&
|
|
- ieee802154_addr_equal(&fq->daddr, arg->dst);
|
|
-}
|
|
-
|
|
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
|
|
{
|
|
- const struct lowpan_create_arg *arg = a;
|
|
+ const struct frag_lowpan_compare_key *key = a;
|
|
struct lowpan_frag_queue *fq;
|
|
|
|
fq = container_of(q, struct lowpan_frag_queue, q);
|
|
|
|
- fq->tag = arg->tag;
|
|
- fq->d_size = arg->d_size;
|
|
- fq->saddr = *arg->src;
|
|
- fq->daddr = *arg->dst;
|
|
+ BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
|
|
+ memcpy(&q->key, key, sizeof(*key));
|
|
}
|
|
|
|
-static void lowpan_frag_expire(unsigned long data)
|
|
+static void lowpan_frag_expire(struct timer_list *t)
|
|
{
|
|
+ struct inet_frag_queue *frag = from_timer(frag, t, timer);
|
|
struct frag_queue *fq;
|
|
struct net *net;
|
|
|
|
- fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
|
|
+ fq = container_of(frag, struct frag_queue, q);
|
|
net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
|
|
|
|
spin_lock(&fq->q.lock);
|
|
@@ -93,10 +62,10 @@ static void lowpan_frag_expire(unsigned long data)
|
|
if (fq->q.flags & INET_FRAG_COMPLETE)
|
|
goto out;
|
|
|
|
- inet_frag_kill(&fq->q, &lowpan_frags);
|
|
+ inet_frag_kill(&fq->q);
|
|
out:
|
|
spin_unlock(&fq->q.lock);
|
|
- inet_frag_put(&fq->q, &lowpan_frags);
|
|
+ inet_frag_put(&fq->q);
|
|
}
|
|
|
|
static inline struct lowpan_frag_queue *
|
|
@@ -104,25 +73,20 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
|
|
const struct ieee802154_addr *src,
|
|
const struct ieee802154_addr *dst)
|
|
{
|
|
- struct inet_frag_queue *q;
|
|
- struct lowpan_create_arg arg;
|
|
- unsigned int hash;
|
|
struct netns_ieee802154_lowpan *ieee802154_lowpan =
|
|
net_ieee802154_lowpan(net);
|
|
+ struct frag_lowpan_compare_key key = {
|
|
+ .tag = cb->d_tag,
|
|
+ .d_size = cb->d_size,
|
|
+ .src = *src,
|
|
+ .dst = *dst,
|
|
+ };
|
|
+ struct inet_frag_queue *q;
|
|
|
|
- arg.tag = cb->d_tag;
|
|
- arg.d_size = cb->d_size;
|
|
- arg.src = src;
|
|
- arg.dst = dst;
|
|
-
|
|
- hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
|
|
-
|
|
- q = inet_frag_find(&ieee802154_lowpan->frags,
|
|
- &lowpan_frags, &arg, hash);
|
|
- if (IS_ERR_OR_NULL(q)) {
|
|
- inet_frag_maybe_warn_overflow(q, pr_fmt());
|
|
+ q = inet_frag_find(&ieee802154_lowpan->frags, &key);
|
|
+ if (!q)
|
|
return NULL;
|
|
- }
|
|
+
|
|
return container_of(q, struct lowpan_frag_queue, q);
|
|
}
|
|
|
|
@@ -229,7 +193,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
|
|
struct sk_buff *fp, *head = fq->q.fragments;
|
|
int sum_truesize;
|
|
|
|
- inet_frag_kill(&fq->q, &lowpan_frags);
|
|
+ inet_frag_kill(&fq->q);
|
|
|
|
/* Make the one we just received the head. */
|
|
if (prev) {
|
|
@@ -437,7 +401,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
|
|
ret = lowpan_frag_queue(fq, skb, frag_type);
|
|
spin_unlock(&fq->q.lock);
|
|
|
|
- inet_frag_put(&fq->q, &lowpan_frags);
|
|
+ inet_frag_put(&fq->q);
|
|
return ret;
|
|
}
|
|
|
|
@@ -447,24 +411,22 @@ err:
|
|
}
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
-static int zero;
|
|
|
|
static struct ctl_table lowpan_frags_ns_ctl_table[] = {
|
|
{
|
|
.procname = "6lowpanfrag_high_thresh",
|
|
.data = &init_net.ieee802154_lowpan.frags.high_thresh,
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0644,
|
|
- .proc_handler = proc_dointvec_minmax,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
.extra1 = &init_net.ieee802154_lowpan.frags.low_thresh
|
|
},
|
|
{
|
|
.procname = "6lowpanfrag_low_thresh",
|
|
.data = &init_net.ieee802154_lowpan.frags.low_thresh,
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0644,
|
|
- .proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = &zero,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
.extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
|
|
},
|
|
{
|
|
@@ -580,14 +542,20 @@ static int __net_init lowpan_frags_init_net(struct net *net)
|
|
{
|
|
struct netns_ieee802154_lowpan *ieee802154_lowpan =
|
|
net_ieee802154_lowpan(net);
|
|
+ int res;
|
|
|
|
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
|
|
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
|
|
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
|
|
+ ieee802154_lowpan->frags.f = &lowpan_frags;
|
|
|
|
- inet_frags_init_net(&ieee802154_lowpan->frags);
|
|
-
|
|
- return lowpan_frags_ns_sysctl_register(net);
|
|
+ res = inet_frags_init_net(&ieee802154_lowpan->frags);
|
|
+ if (res < 0)
|
|
+ return res;
|
|
+ res = lowpan_frags_ns_sysctl_register(net);
|
|
+ if (res < 0)
|
|
+ inet_frags_exit_net(&ieee802154_lowpan->frags);
|
|
+ return res;
|
|
}
|
|
|
|
static void __net_exit lowpan_frags_exit_net(struct net *net)
|
|
@@ -596,7 +564,7 @@ static void __net_exit lowpan_frags_exit_net(struct net *net)
|
|
net_ieee802154_lowpan(net);
|
|
|
|
lowpan_frags_ns_sysctl_unregister(net);
|
|
- inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
|
|
+ inet_frags_exit_net(&ieee802154_lowpan->frags);
|
|
}
|
|
|
|
static struct pernet_operations lowpan_frags_ops = {
|
|
@@ -604,32 +572,63 @@ static struct pernet_operations lowpan_frags_ops = {
|
|
.exit = lowpan_frags_exit_net,
|
|
};
|
|
|
|
-int __init lowpan_net_frag_init(void)
|
|
+static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
|
|
{
|
|
- int ret;
|
|
+ return jhash2(data,
|
|
+ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
|
|
+}
|
|
|
|
- ret = lowpan_frags_sysctl_register();
|
|
- if (ret)
|
|
- return ret;
|
|
+static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
|
|
+{
|
|
+ const struct inet_frag_queue *fq = data;
|
|
|
|
- ret = register_pernet_subsys(&lowpan_frags_ops);
|
|
- if (ret)
|
|
- goto err_pernet;
|
|
+ return jhash2((const u32 *)&fq->key,
|
|
+ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
|
|
+}
|
|
+
|
|
+static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
|
|
+{
|
|
+ const struct frag_lowpan_compare_key *key = arg->key;
|
|
+ const struct inet_frag_queue *fq = ptr;
|
|
+
|
|
+ return !!memcmp(&fq->key, key, sizeof(*key));
|
|
+}
|
|
+
|
|
+static const struct rhashtable_params lowpan_rhash_params = {
|
|
+ .head_offset = offsetof(struct inet_frag_queue, node),
|
|
+ .hashfn = lowpan_key_hashfn,
|
|
+ .obj_hashfn = lowpan_obj_hashfn,
|
|
+ .obj_cmpfn = lowpan_obj_cmpfn,
|
|
+ .automatic_shrinking = true,
|
|
+};
|
|
+
|
|
+int __init lowpan_net_frag_init(void)
|
|
+{
|
|
+ int ret;
|
|
|
|
- lowpan_frags.hashfn = lowpan_hashfn;
|
|
lowpan_frags.constructor = lowpan_frag_init;
|
|
lowpan_frags.destructor = NULL;
|
|
lowpan_frags.qsize = sizeof(struct frag_queue);
|
|
- lowpan_frags.match = lowpan_frag_match;
|
|
lowpan_frags.frag_expire = lowpan_frag_expire;
|
|
lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
|
|
+ lowpan_frags.rhash_params = lowpan_rhash_params;
|
|
ret = inet_frags_init(&lowpan_frags);
|
|
if (ret)
|
|
- goto err_pernet;
|
|
+ goto out;
|
|
|
|
+ ret = lowpan_frags_sysctl_register();
|
|
+ if (ret)
|
|
+ goto err_sysctl;
|
|
+
|
|
+ ret = register_pernet_subsys(&lowpan_frags_ops);
|
|
+ if (ret)
|
|
+ goto err_pernet;
|
|
+out:
|
|
return ret;
|
|
err_pernet:
|
|
lowpan_frags_sysctl_unregister();
|
|
+err_sysctl:
|
|
+ inet_frags_fini(&lowpan_frags);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
|
|
index ba4454ecdf0f..f6764537148c 100644
|
|
--- a/net/ipv4/inet_fragment.c
|
|
+++ b/net/ipv4/inet_fragment.c
|
|
@@ -25,12 +25,6 @@
|
|
#include <net/inet_frag.h>
|
|
#include <net/inet_ecn.h>
|
|
|
|
-#define INETFRAGS_EVICT_BUCKETS 128
|
|
-#define INETFRAGS_EVICT_MAX 512
|
|
-
|
|
-/* don't rebuild inetfrag table with new secret more often than this */
|
|
-#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
|
|
-
|
|
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
|
|
* Value : 0xff if frame should be dropped.
|
|
* 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
|
|
@@ -52,157 +46,8 @@ const u8 ip_frag_ecn_table[16] = {
|
|
};
|
|
EXPORT_SYMBOL(ip_frag_ecn_table);
|
|
|
|
-static unsigned int
|
|
-inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
|
|
-{
|
|
- return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
|
|
-}
|
|
-
|
|
-static bool inet_frag_may_rebuild(struct inet_frags *f)
|
|
-{
|
|
- return time_after(jiffies,
|
|
- f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
|
|
-}
|
|
-
|
|
-static void inet_frag_secret_rebuild(struct inet_frags *f)
|
|
-{
|
|
- int i;
|
|
-
|
|
- write_seqlock_bh(&f->rnd_seqlock);
|
|
-
|
|
- if (!inet_frag_may_rebuild(f))
|
|
- goto out;
|
|
-
|
|
- get_random_bytes(&f->rnd, sizeof(u32));
|
|
-
|
|
- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
|
|
- struct inet_frag_bucket *hb;
|
|
- struct inet_frag_queue *q;
|
|
- struct hlist_node *n;
|
|
-
|
|
- hb = &f->hash[i];
|
|
- spin_lock(&hb->chain_lock);
|
|
-
|
|
- hlist_for_each_entry_safe(q, n, &hb->chain, list) {
|
|
- unsigned int hval = inet_frag_hashfn(f, q);
|
|
-
|
|
- if (hval != i) {
|
|
- struct inet_frag_bucket *hb_dest;
|
|
-
|
|
- hlist_del(&q->list);
|
|
-
|
|
- /* Relink to new hash chain. */
|
|
- hb_dest = &f->hash[hval];
|
|
-
|
|
- /* This is the only place where we take
|
|
- * another chain_lock while already holding
|
|
- * one. As this will not run concurrently,
|
|
- * we cannot deadlock on hb_dest lock below, if its
|
|
- * already locked it will be released soon since
|
|
- * other caller cannot be waiting for hb lock
|
|
- * that we've taken above.
|
|
- */
|
|
- spin_lock_nested(&hb_dest->chain_lock,
|
|
- SINGLE_DEPTH_NESTING);
|
|
- hlist_add_head(&q->list, &hb_dest->chain);
|
|
- spin_unlock(&hb_dest->chain_lock);
|
|
- }
|
|
- }
|
|
- spin_unlock(&hb->chain_lock);
|
|
- }
|
|
-
|
|
- f->rebuild = false;
|
|
- f->last_rebuild_jiffies = jiffies;
|
|
-out:
|
|
- write_sequnlock_bh(&f->rnd_seqlock);
|
|
-}
|
|
-
|
|
-static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
|
|
-{
|
|
- if (!hlist_unhashed(&q->list_evictor))
|
|
- return false;
|
|
-
|
|
- return q->net->low_thresh == 0 ||
|
|
- frag_mem_limit(q->net) >= q->net->low_thresh;
|
|
-}
|
|
-
|
|
-static unsigned int
|
|
-inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
|
|
-{
|
|
- struct inet_frag_queue *fq;
|
|
- struct hlist_node *n;
|
|
- unsigned int evicted = 0;
|
|
- HLIST_HEAD(expired);
|
|
-
|
|
- spin_lock(&hb->chain_lock);
|
|
-
|
|
- hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
|
|
- if (!inet_fragq_should_evict(fq))
|
|
- continue;
|
|
-
|
|
- if (!del_timer(&fq->timer))
|
|
- continue;
|
|
-
|
|
- hlist_add_head(&fq->list_evictor, &expired);
|
|
- ++evicted;
|
|
- }
|
|
-
|
|
- spin_unlock(&hb->chain_lock);
|
|
-
|
|
- hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
|
|
- f->frag_expire((unsigned long) fq);
|
|
-
|
|
- return evicted;
|
|
-}
|
|
-
|
|
-static void inet_frag_worker(struct work_struct *work)
|
|
-{
|
|
- unsigned int budget = INETFRAGS_EVICT_BUCKETS;
|
|
- unsigned int i, evicted = 0;
|
|
- struct inet_frags *f;
|
|
-
|
|
- f = container_of(work, struct inet_frags, frags_work);
|
|
-
|
|
- BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
|
|
-
|
|
- local_bh_disable();
|
|
-
|
|
- for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
|
|
- evicted += inet_evict_bucket(f, &f->hash[i]);
|
|
- i = (i + 1) & (INETFRAGS_HASHSZ - 1);
|
|
- if (evicted > INETFRAGS_EVICT_MAX)
|
|
- break;
|
|
- }
|
|
-
|
|
- f->next_bucket = i;
|
|
-
|
|
- local_bh_enable();
|
|
-
|
|
- if (f->rebuild && inet_frag_may_rebuild(f))
|
|
- inet_frag_secret_rebuild(f);
|
|
-}
|
|
-
|
|
-static void inet_frag_schedule_worker(struct inet_frags *f)
|
|
-{
|
|
- if (unlikely(!work_pending(&f->frags_work)))
|
|
- schedule_work(&f->frags_work);
|
|
-}
|
|
-
|
|
int inet_frags_init(struct inet_frags *f)
|
|
{
|
|
- int i;
|
|
-
|
|
- INIT_WORK(&f->frags_work, inet_frag_worker);
|
|
-
|
|
- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
|
|
- struct inet_frag_bucket *hb = &f->hash[i];
|
|
-
|
|
- spin_lock_init(&hb->chain_lock);
|
|
- INIT_HLIST_HEAD(&hb->chain);
|
|
- }
|
|
-
|
|
- seqlock_init(&f->rnd_seqlock);
|
|
- f->last_rebuild_jiffies = 0;
|
|
f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
|
|
NULL);
|
|
if (!f->frags_cachep)
|
|
@@ -214,83 +59,75 @@ EXPORT_SYMBOL(inet_frags_init);
|
|
|
|
void inet_frags_fini(struct inet_frags *f)
|
|
{
|
|
- cancel_work_sync(&f->frags_work);
|
|
+ /* We must wait that all inet_frag_destroy_rcu() have completed. */
|
|
+ rcu_barrier();
|
|
+
|
|
kmem_cache_destroy(f->frags_cachep);
|
|
+ f->frags_cachep = NULL;
|
|
}
|
|
EXPORT_SYMBOL(inet_frags_fini);
|
|
|
|
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
|
|
+static void inet_frags_free_cb(void *ptr, void *arg)
|
|
{
|
|
- unsigned int seq;
|
|
- int i;
|
|
-
|
|
- nf->low_thresh = 0;
|
|
+ struct inet_frag_queue *fq = ptr;
|
|
|
|
-evict_again:
|
|
- local_bh_disable();
|
|
- seq = read_seqbegin(&f->rnd_seqlock);
|
|
-
|
|
- for (i = 0; i < INETFRAGS_HASHSZ ; i++)
|
|
- inet_evict_bucket(f, &f->hash[i]);
|
|
-
|
|
- local_bh_enable();
|
|
- cond_resched();
|
|
-
|
|
- if (read_seqretry(&f->rnd_seqlock, seq) ||
|
|
- sum_frag_mem_limit(nf))
|
|
- goto evict_again;
|
|
-}
|
|
-EXPORT_SYMBOL(inet_frags_exit_net);
|
|
-
|
|
-static struct inet_frag_bucket *
|
|
-get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
|
|
-__acquires(hb->chain_lock)
|
|
-{
|
|
- struct inet_frag_bucket *hb;
|
|
- unsigned int seq, hash;
|
|
-
|
|
- restart:
|
|
- seq = read_seqbegin(&f->rnd_seqlock);
|
|
-
|
|
- hash = inet_frag_hashfn(f, fq);
|
|
- hb = &f->hash[hash];
|
|
+ /* If we can not cancel the timer, it means this frag_queue
|
|
+ * is already disappearing, we have nothing to do.
|
|
+ * Otherwise, we own a refcount until the end of this function.
|
|
+ */
|
|
+ if (!del_timer(&fq->timer))
|
|
+ return;
|
|
|
|
- spin_lock(&hb->chain_lock);
|
|
- if (read_seqretry(&f->rnd_seqlock, seq)) {
|
|
- spin_unlock(&hb->chain_lock);
|
|
- goto restart;
|
|
+ spin_lock_bh(&fq->lock);
|
|
+ if (!(fq->flags & INET_FRAG_COMPLETE)) {
|
|
+ fq->flags |= INET_FRAG_COMPLETE;
|
|
+ refcount_dec(&fq->refcnt);
|
|
}
|
|
+ spin_unlock_bh(&fq->lock);
|
|
|
|
- return hb;
|
|
+ inet_frag_put(fq);
|
|
}
|
|
|
|
-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
|
|
+void inet_frags_exit_net(struct netns_frags *nf)
|
|
{
|
|
- struct inet_frag_bucket *hb;
|
|
+ nf->low_thresh = 0; /* prevent creation of new frags */
|
|
|
|
- hb = get_frag_bucket_locked(fq, f);
|
|
- hlist_del(&fq->list);
|
|
- fq->flags |= INET_FRAG_COMPLETE;
|
|
- spin_unlock(&hb->chain_lock);
|
|
+ rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
|
|
}
|
|
+EXPORT_SYMBOL(inet_frags_exit_net);
|
|
|
|
-void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
|
|
+void inet_frag_kill(struct inet_frag_queue *fq)
|
|
{
|
|
if (del_timer(&fq->timer))
|
|
refcount_dec(&fq->refcnt);
|
|
|
|
if (!(fq->flags & INET_FRAG_COMPLETE)) {
|
|
- fq_unlink(fq, f);
|
|
+ struct netns_frags *nf = fq->net;
|
|
+
|
|
+ fq->flags |= INET_FRAG_COMPLETE;
|
|
+ rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
|
|
refcount_dec(&fq->refcnt);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(inet_frag_kill);
|
|
|
|
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
|
|
+static void inet_frag_destroy_rcu(struct rcu_head *head)
|
|
+{
|
|
+ struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
|
|
+ rcu);
|
|
+ struct inet_frags *f = q->net->f;
|
|
+
|
|
+ if (f->destructor)
|
|
+ f->destructor(q);
|
|
+ kmem_cache_free(f->frags_cachep, q);
|
|
+}
|
|
+
|
|
+void inet_frag_destroy(struct inet_frag_queue *q)
|
|
{
|
|
struct sk_buff *fp;
|
|
struct netns_frags *nf;
|
|
unsigned int sum, sum_truesize = 0;
|
|
+ struct inet_frags *f;
|
|
|
|
WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
|
|
WARN_ON(del_timer(&q->timer) != 0);
|
|
@@ -298,64 +135,35 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
|
|
/* Release all fragment data. */
|
|
fp = q->fragments;
|
|
nf = q->net;
|
|
- while (fp) {
|
|
- struct sk_buff *xp = fp->next;
|
|
-
|
|
- sum_truesize += fp->truesize;
|
|
- kfree_skb(fp);
|
|
- fp = xp;
|
|
+ f = nf->f;
|
|
+ if (fp) {
|
|
+ do {
|
|
+ struct sk_buff *xp = fp->next;
|
|
+
|
|
+ sum_truesize += fp->truesize;
|
|
+ kfree_skb(fp);
|
|
+ fp = xp;
|
|
+ } while (fp);
|
|
+ } else {
|
|
+ sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
|
|
}
|
|
sum = sum_truesize + f->qsize;
|
|
|
|
- if (f->destructor)
|
|
- f->destructor(q);
|
|
- kmem_cache_free(f->frags_cachep, q);
|
|
+ call_rcu(&q->rcu, inet_frag_destroy_rcu);
|
|
|
|
sub_frag_mem_limit(nf, sum);
|
|
}
|
|
EXPORT_SYMBOL(inet_frag_destroy);
|
|
|
|
-static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
|
- struct inet_frag_queue *qp_in,
|
|
- struct inet_frags *f,
|
|
- void *arg)
|
|
-{
|
|
- struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
|
|
- struct inet_frag_queue *qp;
|
|
-
|
|
-#ifdef CONFIG_SMP
|
|
- /* With SMP race we have to recheck hash table, because
|
|
- * such entry could have been created on other cpu before
|
|
- * we acquired hash bucket lock.
|
|
- */
|
|
- hlist_for_each_entry(qp, &hb->chain, list) {
|
|
- if (qp->net == nf && f->match(qp, arg)) {
|
|
- refcount_inc(&qp->refcnt);
|
|
- spin_unlock(&hb->chain_lock);
|
|
- qp_in->flags |= INET_FRAG_COMPLETE;
|
|
- inet_frag_put(qp_in, f);
|
|
- return qp;
|
|
- }
|
|
- }
|
|
-#endif
|
|
- qp = qp_in;
|
|
- if (!mod_timer(&qp->timer, jiffies + nf->timeout))
|
|
- refcount_inc(&qp->refcnt);
|
|
-
|
|
- refcount_inc(&qp->refcnt);
|
|
- hlist_add_head(&qp->list, &hb->chain);
|
|
-
|
|
- spin_unlock(&hb->chain_lock);
|
|
-
|
|
- return qp;
|
|
-}
|
|
-
|
|
static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
|
|
struct inet_frags *f,
|
|
void *arg)
|
|
{
|
|
struct inet_frag_queue *q;
|
|
|
|
+ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
|
|
+ return NULL;
|
|
+
|
|
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
|
|
if (!q)
|
|
return NULL;
|
|
@@ -364,77 +172,53 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
|
|
f->constructor(q, arg);
|
|
add_frag_mem_limit(nf, f->qsize);
|
|
|
|
- setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
|
|
+ timer_setup(&q->timer, f->frag_expire, 0);
|
|
spin_lock_init(&q->lock);
|
|
- refcount_set(&q->refcnt, 1);
|
|
+ refcount_set(&q->refcnt, 3);
|
|
|
|
return q;
|
|
}
|
|
|
|
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
|
- struct inet_frags *f,
|
|
void *arg)
|
|
{
|
|
+ struct inet_frags *f = nf->f;
|
|
struct inet_frag_queue *q;
|
|
+ int err;
|
|
|
|
q = inet_frag_alloc(nf, f, arg);
|
|
if (!q)
|
|
return NULL;
|
|
|
|
- return inet_frag_intern(nf, q, f, arg);
|
|
-}
|
|
-
|
|
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
|
- struct inet_frags *f, void *key,
|
|
- unsigned int hash)
|
|
-{
|
|
- struct inet_frag_bucket *hb;
|
|
- struct inet_frag_queue *q;
|
|
- int depth = 0;
|
|
+ mod_timer(&q->timer, jiffies + nf->timeout);
|
|
|
|
- if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
|
|
- inet_frag_schedule_worker(f);
|
|
+ err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
|
|
+ f->rhash_params);
|
|
+ if (err < 0) {
|
|
+ q->flags |= INET_FRAG_COMPLETE;
|
|
+ inet_frag_kill(q);
|
|
+ inet_frag_destroy(q);
|
|
return NULL;
|
|
}
|
|
+ return q;
|
|
+}
|
|
|
|
- if (frag_mem_limit(nf) > nf->low_thresh)
|
|
- inet_frag_schedule_worker(f);
|
|
-
|
|
- hash &= (INETFRAGS_HASHSZ - 1);
|
|
- hb = &f->hash[hash];
|
|
-
|
|
- spin_lock(&hb->chain_lock);
|
|
- hlist_for_each_entry(q, &hb->chain, list) {
|
|
- if (q->net == nf && f->match(q, key)) {
|
|
- refcount_inc(&q->refcnt);
|
|
- spin_unlock(&hb->chain_lock);
|
|
- return q;
|
|
- }
|
|
- depth++;
|
|
- }
|
|
- spin_unlock(&hb->chain_lock);
|
|
+/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
|
|
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
|
|
+{
|
|
+ struct inet_frag_queue *fq;
|
|
|
|
- if (depth <= INETFRAGS_MAXDEPTH)
|
|
- return inet_frag_create(nf, f, key);
|
|
+ rcu_read_lock();
|
|
|
|
- if (inet_frag_may_rebuild(f)) {
|
|
- if (!f->rebuild)
|
|
- f->rebuild = true;
|
|
- inet_frag_schedule_worker(f);
|
|
+ fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
|
|
+ if (fq) {
|
|
+ if (!refcount_inc_not_zero(&fq->refcnt))
|
|
+ fq = NULL;
|
|
+ rcu_read_unlock();
|
|
+ return fq;
|
|
}
|
|
+ rcu_read_unlock();
|
|
|
|
- return ERR_PTR(-ENOBUFS);
|
|
+ return inet_frag_create(nf, key);
|
|
}
|
|
EXPORT_SYMBOL(inet_frag_find);
|
|
-
|
|
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
|
|
- const char *prefix)
|
|
-{
|
|
- static const char msg[] = "inet_frag_find: Fragment hash bucket"
|
|
- " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
|
|
- ". Dropping fragment.\n";
|
|
-
|
|
- if (PTR_ERR(q) == -ENOBUFS)
|
|
- net_dbg_ratelimited("%s%s", prefix, msg);
|
|
-}
|
|
-EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
|
|
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
|
|
index 4cb1befc3949..e7227128df2c 100644
|
|
--- a/net/ipv4/ip_fragment.c
|
|
+++ b/net/ipv4/ip_fragment.c
|
|
@@ -57,27 +57,64 @@
|
|
*/
|
|
static const char ip_frag_cache_name[] = "ip4-frags";
|
|
|
|
-struct ipfrag_skb_cb
|
|
-{
|
|
+/* Use skb->cb to track consecutive/adjacent fragments coming at
|
|
+ * the end of the queue. Nodes in the rb-tree queue will
|
|
+ * contain "runs" of one or more adjacent fragments.
|
|
+ *
|
|
+ * Invariants:
|
|
+ * - next_frag is NULL at the tail of a "run";
|
|
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
|
|
+ */
|
|
+struct ipfrag_skb_cb {
|
|
struct inet_skb_parm h;
|
|
- int offset;
|
|
+ struct sk_buff *next_frag;
|
|
+ int frag_run_len;
|
|
};
|
|
|
|
-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
|
|
+#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
|
|
+
|
|
+static void ip4_frag_init_run(struct sk_buff *skb)
|
|
+{
|
|
+ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
|
|
+
|
|
+ FRAG_CB(skb)->next_frag = NULL;
|
|
+ FRAG_CB(skb)->frag_run_len = skb->len;
|
|
+}
|
|
+
|
|
+/* Append skb to the last "run". */
|
|
+static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
|
|
+ struct sk_buff *skb)
|
|
+{
|
|
+ RB_CLEAR_NODE(&skb->rbnode);
|
|
+ FRAG_CB(skb)->next_frag = NULL;
|
|
+
|
|
+ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
|
|
+ FRAG_CB(q->fragments_tail)->next_frag = skb;
|
|
+ q->fragments_tail = skb;
|
|
+}
|
|
+
|
|
+/* Create a new "run" with the skb. */
|
|
+static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
|
|
+{
|
|
+ if (q->last_run_head)
|
|
+ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
|
|
+ &q->last_run_head->rbnode.rb_right);
|
|
+ else
|
|
+ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
|
|
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
|
|
+
|
|
+ ip4_frag_init_run(skb);
|
|
+ q->fragments_tail = skb;
|
|
+ q->last_run_head = skb;
|
|
+}
|
|
|
|
/* Describe an entry in the "incomplete datagrams" queue. */
|
|
struct ipq {
|
|
struct inet_frag_queue q;
|
|
|
|
- u32 user;
|
|
- __be32 saddr;
|
|
- __be32 daddr;
|
|
- __be16 id;
|
|
- u8 protocol;
|
|
u8 ecn; /* RFC3168 support */
|
|
u16 max_df_size; /* largest frag with DF set seen */
|
|
int iif;
|
|
- int vif; /* L3 master device index */
|
|
unsigned int rid;
|
|
struct inet_peer *peer;
|
|
};
|
|
@@ -89,49 +126,9 @@ static u8 ip4_frag_ecn(u8 tos)
|
|
|
|
static struct inet_frags ip4_frags;
|
|
|
|
-int ip_frag_mem(struct net *net)
|
|
-{
|
|
- return sum_frag_mem_limit(&net->ipv4.frags);
|
|
-}
|
|
-
|
|
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
|
- struct net_device *dev);
|
|
-
|
|
-struct ip4_create_arg {
|
|
- struct iphdr *iph;
|
|
- u32 user;
|
|
- int vif;
|
|
-};
|
|
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
|
|
+ struct sk_buff *prev_tail, struct net_device *dev);
|
|
|
|
-static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
|
|
-{
|
|
- net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
|
|
- return jhash_3words((__force u32)id << 16 | prot,
|
|
- (__force u32)saddr, (__force u32)daddr,
|
|
- ip4_frags.rnd);
|
|
-}
|
|
-
|
|
-static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
|
|
-{
|
|
- const struct ipq *ipq;
|
|
-
|
|
- ipq = container_of(q, struct ipq, q);
|
|
- return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
|
|
-}
|
|
-
|
|
-static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
|
|
-{
|
|
- const struct ipq *qp;
|
|
- const struct ip4_create_arg *arg = a;
|
|
-
|
|
- qp = container_of(q, struct ipq, q);
|
|
- return qp->id == arg->iph->id &&
|
|
- qp->saddr == arg->iph->saddr &&
|
|
- qp->daddr == arg->iph->daddr &&
|
|
- qp->protocol == arg->iph->protocol &&
|
|
- qp->user == arg->user &&
|
|
- qp->vif == arg->vif;
|
|
-}
|
|
|
|
static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
|
|
{
|
|
@@ -140,17 +137,12 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
|
|
frags);
|
|
struct net *net = container_of(ipv4, struct net, ipv4);
|
|
|
|
- const struct ip4_create_arg *arg = a;
|
|
+ const struct frag_v4_compare_key *key = a;
|
|
|
|
- qp->protocol = arg->iph->protocol;
|
|
- qp->id = arg->iph->id;
|
|
- qp->ecn = ip4_frag_ecn(arg->iph->tos);
|
|
- qp->saddr = arg->iph->saddr;
|
|
- qp->daddr = arg->iph->daddr;
|
|
- qp->vif = arg->vif;
|
|
- qp->user = arg->user;
|
|
+ q->key.v4 = *key;
|
|
+ qp->ecn = 0;
|
|
qp->peer = q->net->max_dist ?
|
|
- inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
|
|
+ inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
|
|
NULL;
|
|
}
|
|
|
|
@@ -168,7 +160,7 @@ static void ip4_frag_free(struct inet_frag_queue *q)
|
|
|
|
static void ipq_put(struct ipq *ipq)
|
|
{
|
|
- inet_frag_put(&ipq->q, &ip4_frags);
|
|
+ inet_frag_put(&ipq->q);
|
|
}
|
|
|
|
/* Kill ipq entry. It is not destroyed immediately,
|
|
@@ -176,7 +168,7 @@ static void ipq_put(struct ipq *ipq)
|
|
*/
|
|
static void ipq_kill(struct ipq *ipq)
|
|
{
|
|
- inet_frag_kill(&ipq->q, &ip4_frags);
|
|
+ inet_frag_kill(&ipq->q);
|
|
}
|
|
|
|
static bool frag_expire_skip_icmp(u32 user)
|
|
@@ -191,12 +183,16 @@ static bool frag_expire_skip_icmp(u32 user)
|
|
/*
|
|
* Oops, a fragment queue timed out. Kill it and send an ICMP reply.
|
|
*/
|
|
-static void ip_expire(unsigned long arg)
|
|
+static void ip_expire(struct timer_list *t)
|
|
{
|
|
- struct ipq *qp;
|
|
+ struct inet_frag_queue *frag = from_timer(frag, t, timer);
|
|
+ const struct iphdr *iph;
|
|
+ struct sk_buff *head = NULL;
|
|
struct net *net;
|
|
+ struct ipq *qp;
|
|
+ int err;
|
|
|
|
- qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
|
|
+ qp = container_of(frag, struct ipq, q);
|
|
net = container_of(qp->q.net, struct net, ipv4.frags);
|
|
|
|
rcu_read_lock();
|
|
@@ -207,51 +203,65 @@ static void ip_expire(unsigned long arg)
|
|
|
|
ipq_kill(qp);
|
|
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
|
|
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
|
|
|
|
- if (!inet_frag_evicting(&qp->q)) {
|
|
- struct sk_buff *clone, *head = qp->q.fragments;
|
|
- const struct iphdr *iph;
|
|
- int err;
|
|
-
|
|
- __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
|
|
+ if (!(qp->q.flags & INET_FRAG_FIRST_IN))
|
|
+ goto out;
|
|
|
|
- if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
|
|
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
|
|
+ * pull the head out of the tree in order to be able to
|
|
+ * deal with head->dev.
|
|
+ */
|
|
+ if (qp->q.fragments) {
|
|
+ head = qp->q.fragments;
|
|
+ qp->q.fragments = head->next;
|
|
+ } else {
|
|
+ head = skb_rb_first(&qp->q.rb_fragments);
|
|
+ if (!head)
|
|
goto out;
|
|
+ if (FRAG_CB(head)->next_frag)
|
|
+ rb_replace_node(&head->rbnode,
|
|
+ &FRAG_CB(head)->next_frag->rbnode,
|
|
+ &qp->q.rb_fragments);
|
|
+ else
|
|
+ rb_erase(&head->rbnode, &qp->q.rb_fragments);
|
|
+ memset(&head->rbnode, 0, sizeof(head->rbnode));
|
|
+ barrier();
|
|
+ }
|
|
+ if (head == qp->q.fragments_tail)
|
|
+ qp->q.fragments_tail = NULL;
|
|
|
|
- head->dev = dev_get_by_index_rcu(net, qp->iif);
|
|
- if (!head->dev)
|
|
- goto out;
|
|
+ sub_frag_mem_limit(qp->q.net, head->truesize);
|
|
+
|
|
+ head->dev = dev_get_by_index_rcu(net, qp->iif);
|
|
+ if (!head->dev)
|
|
+ goto out;
|
|
|
|
|
|
- /* skb has no dst, perform route lookup again */
|
|
- iph = ip_hdr(head);
|
|
- err = ip_route_input_noref(head, iph->daddr, iph->saddr,
|
|
+ /* skb has no dst, perform route lookup again */
|
|
+ iph = ip_hdr(head);
|
|
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
|
|
iph->tos, head->dev);
|
|
- if (err)
|
|
- goto out;
|
|
+ if (err)
|
|
+ goto out;
|
|
|
|
- /* Only an end host needs to send an ICMP
|
|
- * "Fragment Reassembly Timeout" message, per RFC792.
|
|
- */
|
|
- if (frag_expire_skip_icmp(qp->user) &&
|
|
- (skb_rtable(head)->rt_type != RTN_LOCAL))
|
|
- goto out;
|
|
+ /* Only an end host needs to send an ICMP
|
|
+ * "Fragment Reassembly Timeout" message, per RFC792.
|
|
+ */
|
|
+ if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
|
|
+ (skb_rtable(head)->rt_type != RTN_LOCAL))
|
|
+ goto out;
|
|
|
|
- clone = skb_clone(head, GFP_ATOMIC);
|
|
+ spin_unlock(&qp->q.lock);
|
|
+ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
|
|
+ goto out_rcu_unlock;
|
|
|
|
- /* Send an ICMP "Fragment Reassembly Timeout" message. */
|
|
- if (clone) {
|
|
- spin_unlock(&qp->q.lock);
|
|
- icmp_send(clone, ICMP_TIME_EXCEEDED,
|
|
- ICMP_EXC_FRAGTIME, 0);
|
|
- consume_skb(clone);
|
|
- goto out_rcu_unlock;
|
|
- }
|
|
- }
|
|
out:
|
|
spin_unlock(&qp->q.lock);
|
|
out_rcu_unlock:
|
|
rcu_read_unlock();
|
|
+ if (head)
|
|
+ kfree_skb(head);
|
|
ipq_put(qp);
|
|
}
|
|
|
|
@@ -261,21 +271,20 @@ out_rcu_unlock:
|
|
static struct ipq *ip_find(struct net *net, struct iphdr *iph,
|
|
u32 user, int vif)
|
|
{
|
|
+ struct frag_v4_compare_key key = {
|
|
+ .saddr = iph->saddr,
|
|
+ .daddr = iph->daddr,
|
|
+ .user = user,
|
|
+ .vif = vif,
|
|
+ .id = iph->id,
|
|
+ .protocol = iph->protocol,
|
|
+ };
|
|
struct inet_frag_queue *q;
|
|
- struct ip4_create_arg arg;
|
|
- unsigned int hash;
|
|
-
|
|
- arg.iph = iph;
|
|
- arg.user = user;
|
|
- arg.vif = vif;
|
|
|
|
- hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
|
|
-
|
|
- q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
|
|
- if (IS_ERR_OR_NULL(q)) {
|
|
- inet_frag_maybe_warn_overflow(q, pr_fmt());
|
|
+ q = inet_frag_find(&net->ipv4.frags, &key);
|
|
+ if (!q)
|
|
return NULL;
|
|
- }
|
|
+
|
|
return container_of(q, struct ipq, q);
|
|
}
|
|
|
|
@@ -295,7 +304,7 @@ static int ip_frag_too_far(struct ipq *qp)
|
|
end = atomic_inc_return(&peer->rid);
|
|
qp->rid = end;
|
|
|
|
- rc = qp->q.fragments && (end - start) > max;
|
|
+ rc = qp->q.fragments_tail && (end - start) > max;
|
|
|
|
if (rc) {
|
|
struct net *net;
|
|
@@ -309,7 +318,6 @@ static int ip_frag_too_far(struct ipq *qp)
|
|
|
|
static int ip_frag_reinit(struct ipq *qp)
|
|
{
|
|
- struct sk_buff *fp;
|
|
unsigned int sum_truesize = 0;
|
|
|
|
if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
|
|
@@ -317,21 +325,16 @@ static int ip_frag_reinit(struct ipq *qp)
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
- fp = qp->q.fragments;
|
|
- do {
|
|
- struct sk_buff *xp = fp->next;
|
|
-
|
|
- sum_truesize += fp->truesize;
|
|
- kfree_skb(fp);
|
|
- fp = xp;
|
|
- } while (fp);
|
|
+ sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
|
|
sub_frag_mem_limit(qp->q.net, sum_truesize);
|
|
|
|
qp->q.flags = 0;
|
|
qp->q.len = 0;
|
|
qp->q.meat = 0;
|
|
qp->q.fragments = NULL;
|
|
+ qp->q.rb_fragments = RB_ROOT;
|
|
qp->q.fragments_tail = NULL;
|
|
+ qp->q.last_run_head = NULL;
|
|
qp->iif = 0;
|
|
qp->ecn = 0;
|
|
|
|
@@ -341,7 +344,9 @@ static int ip_frag_reinit(struct ipq *qp)
|
|
/* Add new segment to existing queue. */
|
|
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
{
|
|
- struct sk_buff *prev, *next;
|
|
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
|
|
+ struct rb_node **rbn, *parent;
|
|
+ struct sk_buff *skb1, *prev_tail;
|
|
struct net_device *dev;
|
|
unsigned int fragsize;
|
|
int flags, offset;
|
|
@@ -404,99 +409,61 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
if (err)
|
|
goto err;
|
|
|
|
- /* Find out which fragments are in front and at the back of us
|
|
- * in the chain of fragments so far. We must know where to put
|
|
- * this fragment, right?
|
|
- */
|
|
- prev = qp->q.fragments_tail;
|
|
- if (!prev || FRAG_CB(prev)->offset < offset) {
|
|
- next = NULL;
|
|
- goto found;
|
|
- }
|
|
- prev = NULL;
|
|
- for (next = qp->q.fragments; next != NULL; next = next->next) {
|
|
- if (FRAG_CB(next)->offset >= offset)
|
|
- break; /* bingo! */
|
|
- prev = next;
|
|
- }
|
|
-
|
|
-found:
|
|
- /* We found where to put this one. Check for overlap with
|
|
- * preceding fragment, and, if needed, align things so that
|
|
- * any overlaps are eliminated.
|
|
+ /* Note : skb->rbnode and skb->dev share the same location. */
|
|
+ dev = skb->dev;
|
|
+ /* Makes sure compiler wont do silly aliasing games */
|
|
+ barrier();
|
|
+
|
|
+ /* RFC5722, Section 4, amended by Errata ID : 3089
|
|
+ * When reassembling an IPv6 datagram, if
|
|
+ * one or more its constituent fragments is determined to be an
|
|
+ * overlapping fragment, the entire datagram (and any constituent
|
|
+ * fragments) MUST be silently discarded.
|
|
+ *
|
|
+ * We do the same here for IPv4 (and increment an snmp counter).
|
|
*/
|
|
- if (prev) {
|
|
- int i = (FRAG_CB(prev)->offset + prev->len) - offset;
|
|
|
|
- if (i > 0) {
|
|
- offset += i;
|
|
- err = -EINVAL;
|
|
- if (end <= offset)
|
|
- goto err;
|
|
- err = -ENOMEM;
|
|
- if (!pskb_pull(skb, i))
|
|
- goto err;
|
|
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
|
|
- skb->ip_summed = CHECKSUM_NONE;
|
|
- }
|
|
- }
|
|
-
|
|
- err = -ENOMEM;
|
|
-
|
|
- while (next && FRAG_CB(next)->offset < end) {
|
|
- int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
|
|
-
|
|
- if (i < next->len) {
|
|
- int delta = -next->truesize;
|
|
-
|
|
- /* Eat head of the next overlapped fragment
|
|
- * and leave the loop. The next ones cannot overlap.
|
|
- */
|
|
- if (!pskb_pull(next, i))
|
|
- goto err;
|
|
- delta += next->truesize;
|
|
- if (delta)
|
|
- add_frag_mem_limit(qp->q.net, delta);
|
|
- FRAG_CB(next)->offset += i;
|
|
- qp->q.meat -= i;
|
|
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
|
|
- next->ip_summed = CHECKSUM_NONE;
|
|
- break;
|
|
- } else {
|
|
- struct sk_buff *free_it = next;
|
|
-
|
|
- /* Old fragment is completely overridden with
|
|
- * new one drop it.
|
|
- */
|
|
- next = next->next;
|
|
-
|
|
- if (prev)
|
|
- prev->next = next;
|
|
- else
|
|
- qp->q.fragments = next;
|
|
-
|
|
- qp->q.meat -= free_it->len;
|
|
- sub_frag_mem_limit(qp->q.net, free_it->truesize);
|
|
- kfree_skb(free_it);
|
|
- }
|
|
+ /* Find out where to put this fragment. */
|
|
+ prev_tail = qp->q.fragments_tail;
|
|
+ if (!prev_tail)
|
|
+ ip4_frag_create_run(&qp->q, skb); /* First fragment. */
|
|
+ else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
|
|
+ /* This is the common case: skb goes to the end. */
|
|
+ /* Detect and discard overlaps. */
|
|
+ if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
|
|
+ goto discard_qp;
|
|
+ if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
|
|
+ ip4_frag_append_to_last_run(&qp->q, skb);
|
|
+ else
|
|
+ ip4_frag_create_run(&qp->q, skb);
|
|
+ } else {
|
|
+ /* Binary search. Note that skb can become the first fragment,
|
|
+ * but not the last (covered above).
|
|
+ */
|
|
+ rbn = &qp->q.rb_fragments.rb_node;
|
|
+ do {
|
|
+ parent = *rbn;
|
|
+ skb1 = rb_to_skb(parent);
|
|
+ if (end <= skb1->ip_defrag_offset)
|
|
+ rbn = &parent->rb_left;
|
|
+ else if (offset >= skb1->ip_defrag_offset +
|
|
+ FRAG_CB(skb1)->frag_run_len)
|
|
+ rbn = &parent->rb_right;
|
|
+ else /* Found an overlap with skb1. */
|
|
+ goto discard_qp;
|
|
+ } while (*rbn);
|
|
+ /* Here we have parent properly set, and rbn pointing to
|
|
+ * one of its NULL left/right children. Insert skb.
|
|
+ */
|
|
+ ip4_frag_init_run(skb);
|
|
+ rb_link_node(&skb->rbnode, parent, rbn);
|
|
+ rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
|
|
}
|
|
|
|
- FRAG_CB(skb)->offset = offset;
|
|
-
|
|
- /* Insert this fragment in the chain of fragments. */
|
|
- skb->next = next;
|
|
- if (!next)
|
|
- qp->q.fragments_tail = skb;
|
|
- if (prev)
|
|
- prev->next = skb;
|
|
- else
|
|
- qp->q.fragments = skb;
|
|
-
|
|
- dev = skb->dev;
|
|
- if (dev) {
|
|
+ if (dev)
|
|
qp->iif = dev->ifindex;
|
|
- skb->dev = NULL;
|
|
- }
|
|
+ skb->ip_defrag_offset = offset;
|
|
+
|
|
qp->q.stamp = skb->tstamp;
|
|
qp->q.meat += skb->len;
|
|
qp->ecn |= ecn;
|
|
@@ -518,7 +485,7 @@ found:
|
|
unsigned long orefdst = skb->_skb_refdst;
|
|
|
|
skb->_skb_refdst = 0UL;
|
|
- err = ip_frag_reasm(qp, prev, dev);
|
|
+ err = ip_frag_reasm(qp, skb, prev_tail, dev);
|
|
skb->_skb_refdst = orefdst;
|
|
return err;
|
|
}
|
|
@@ -526,20 +493,24 @@ found:
|
|
skb_dst_drop(skb);
|
|
return -EINPROGRESS;
|
|
|
|
+discard_qp:
|
|
+ inet_frag_kill(&qp->q);
|
|
+ err = -EINVAL;
|
|
+ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
|
|
err:
|
|
kfree_skb(skb);
|
|
return err;
|
|
}
|
|
|
|
-
|
|
/* Build a new IP datagram from all its fragments. */
|
|
-
|
|
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
|
- struct net_device *dev)
|
|
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
|
|
+ struct sk_buff *prev_tail, struct net_device *dev)
|
|
{
|
|
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
|
|
struct iphdr *iph;
|
|
- struct sk_buff *fp, *head = qp->q.fragments;
|
|
+ struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
|
|
+ struct sk_buff **nextp; /* To build frag_list. */
|
|
+ struct rb_node *rbn;
|
|
int len;
|
|
int ihlen;
|
|
int err;
|
|
@@ -553,26 +524,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
|
goto out_fail;
|
|
}
|
|
/* Make the one we just received the head. */
|
|
- if (prev) {
|
|
- head = prev->next;
|
|
- fp = skb_clone(head, GFP_ATOMIC);
|
|
+ if (head != skb) {
|
|
+ fp = skb_clone(skb, GFP_ATOMIC);
|
|
if (!fp)
|
|
goto out_nomem;
|
|
-
|
|
- fp->next = head->next;
|
|
- if (!fp->next)
|
|
+ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
|
|
+ if (RB_EMPTY_NODE(&skb->rbnode))
|
|
+ FRAG_CB(prev_tail)->next_frag = fp;
|
|
+ else
|
|
+ rb_replace_node(&skb->rbnode, &fp->rbnode,
|
|
+ &qp->q.rb_fragments);
|
|
+ if (qp->q.fragments_tail == skb)
|
|
qp->q.fragments_tail = fp;
|
|
- prev->next = fp;
|
|
-
|
|
- skb_morph(head, qp->q.fragments);
|
|
- head->next = qp->q.fragments->next;
|
|
-
|
|
- consume_skb(qp->q.fragments);
|
|
- qp->q.fragments = head;
|
|
+ skb_morph(skb, head);
|
|
+ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
|
|
+ rb_replace_node(&head->rbnode, &skb->rbnode,
|
|
+ &qp->q.rb_fragments);
|
|
+ consume_skb(head);
|
|
+ head = skb;
|
|
}
|
|
|
|
- WARN_ON(!head);
|
|
- WARN_ON(FRAG_CB(head)->offset != 0);
|
|
+ WARN_ON(head->ip_defrag_offset != 0);
|
|
|
|
/* Allocate a new buffer for the datagram. */
|
|
ihlen = ip_hdrlen(head);
|
|
@@ -596,35 +568,61 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
|
clone = alloc_skb(0, GFP_ATOMIC);
|
|
if (!clone)
|
|
goto out_nomem;
|
|
- clone->next = head->next;
|
|
- head->next = clone;
|
|
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
|
|
skb_frag_list_init(head);
|
|
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
|
|
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
|
|
clone->len = clone->data_len = head->data_len - plen;
|
|
- head->data_len -= clone->len;
|
|
- head->len -= clone->len;
|
|
+ head->truesize += clone->truesize;
|
|
clone->csum = 0;
|
|
clone->ip_summed = head->ip_summed;
|
|
add_frag_mem_limit(qp->q.net, clone->truesize);
|
|
+ skb_shinfo(head)->frag_list = clone;
|
|
+ nextp = &clone->next;
|
|
+ } else {
|
|
+ nextp = &skb_shinfo(head)->frag_list;
|
|
}
|
|
|
|
- skb_shinfo(head)->frag_list = head->next;
|
|
skb_push(head, head->data - skb_network_header(head));
|
|
|
|
- for (fp=head->next; fp; fp = fp->next) {
|
|
- head->data_len += fp->len;
|
|
- head->len += fp->len;
|
|
- if (head->ip_summed != fp->ip_summed)
|
|
- head->ip_summed = CHECKSUM_NONE;
|
|
- else if (head->ip_summed == CHECKSUM_COMPLETE)
|
|
- head->csum = csum_add(head->csum, fp->csum);
|
|
- head->truesize += fp->truesize;
|
|
+ /* Traverse the tree in order, to build frag_list. */
|
|
+ fp = FRAG_CB(head)->next_frag;
|
|
+ rbn = rb_next(&head->rbnode);
|
|
+ rb_erase(&head->rbnode, &qp->q.rb_fragments);
|
|
+ while (rbn || fp) {
|
|
+ /* fp points to the next sk_buff in the current run;
|
|
+ * rbn points to the next run.
|
|
+ */
|
|
+ /* Go through the current run. */
|
|
+ while (fp) {
|
|
+ *nextp = fp;
|
|
+ nextp = &fp->next;
|
|
+ fp->prev = NULL;
|
|
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
|
|
+ fp->sk = NULL;
|
|
+ head->data_len += fp->len;
|
|
+ head->len += fp->len;
|
|
+ if (head->ip_summed != fp->ip_summed)
|
|
+ head->ip_summed = CHECKSUM_NONE;
|
|
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
|
|
+ head->csum = csum_add(head->csum, fp->csum);
|
|
+ head->truesize += fp->truesize;
|
|
+ fp = FRAG_CB(fp)->next_frag;
|
|
+ }
|
|
+ /* Move to the next run. */
|
|
+ if (rbn) {
|
|
+ struct rb_node *rbnext = rb_next(rbn);
|
|
+
|
|
+ fp = rb_to_skb(rbn);
|
|
+ rb_erase(rbn, &qp->q.rb_fragments);
|
|
+ rbn = rbnext;
|
|
+ }
|
|
}
|
|
sub_frag_mem_limit(qp->q.net, head->truesize);
|
|
|
|
+ *nextp = NULL;
|
|
head->next = NULL;
|
|
+ head->prev = NULL;
|
|
head->dev = dev;
|
|
head->tstamp = qp->q.stamp;
|
|
IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
|
|
@@ -652,7 +650,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
|
|
|
__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
|
|
qp->q.fragments = NULL;
|
|
+ qp->q.rb_fragments = RB_ROOT;
|
|
qp->q.fragments_tail = NULL;
|
|
+ qp->q.last_run_head = NULL;
|
|
return 0;
|
|
|
|
out_nomem:
|
|
@@ -660,7 +660,7 @@ out_nomem:
|
|
err = -ENOMEM;
|
|
goto out_fail;
|
|
out_oversize:
|
|
- net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
|
|
+ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
|
|
out_fail:
|
|
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
|
|
return err;
|
|
@@ -734,25 +734,46 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
|
|
}
|
|
EXPORT_SYMBOL(ip_check_defrag);
|
|
|
|
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
|
|
+{
|
|
+ struct rb_node *p = rb_first(root);
|
|
+ unsigned int sum = 0;
|
|
+
|
|
+ while (p) {
|
|
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
|
|
+
|
|
+ p = rb_next(p);
|
|
+ rb_erase(&skb->rbnode, root);
|
|
+ while (skb) {
|
|
+ struct sk_buff *next = FRAG_CB(skb)->next_frag;
|
|
+
|
|
+ sum += skb->truesize;
|
|
+ kfree_skb(skb);
|
|
+ skb = next;
|
|
+ }
|
|
+ }
|
|
+ return sum;
|
|
+}
|
|
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
|
|
+
|
|
#ifdef CONFIG_SYSCTL
|
|
-static int zero;
|
|
+static int dist_min;
|
|
|
|
static struct ctl_table ip4_frags_ns_ctl_table[] = {
|
|
{
|
|
.procname = "ipfrag_high_thresh",
|
|
.data = &init_net.ipv4.frags.high_thresh,
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0644,
|
|
- .proc_handler = proc_dointvec_minmax,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
.extra1 = &init_net.ipv4.frags.low_thresh
|
|
},
|
|
{
|
|
.procname = "ipfrag_low_thresh",
|
|
.data = &init_net.ipv4.frags.low_thresh,
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0644,
|
|
- .proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = &zero,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
.extra2 = &init_net.ipv4.frags.high_thresh
|
|
},
|
|
{
|
|
@@ -768,7 +789,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
|
|
.maxlen = sizeof(int),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = &zero
|
|
+ .extra1 = &dist_min,
|
|
},
|
|
{ }
|
|
};
|
|
@@ -850,6 +871,8 @@ static void __init ip4_frags_ctl_register(void)
|
|
|
|
static int __net_init ipv4_frags_init_net(struct net *net)
|
|
{
|
|
+ int res;
|
|
+
|
|
/* Fragment cache limits.
|
|
*
|
|
* The fragment memory accounting code, (tries to) account for
|
|
@@ -874,16 +897,21 @@ static int __net_init ipv4_frags_init_net(struct net *net)
|
|
net->ipv4.frags.timeout = IP_FRAG_TIME;
|
|
|
|
net->ipv4.frags.max_dist = 64;
|
|
-
|
|
- inet_frags_init_net(&net->ipv4.frags);
|
|
-
|
|
- return ip4_frags_ns_ctl_register(net);
|
|
+ net->ipv4.frags.f = &ip4_frags;
|
|
+
|
|
+ res = inet_frags_init_net(&net->ipv4.frags);
|
|
+ if (res < 0)
|
|
+ return res;
|
|
+ res = ip4_frags_ns_ctl_register(net);
|
|
+ if (res < 0)
|
|
+ inet_frags_exit_net(&net->ipv4.frags);
|
|
+ return res;
|
|
}
|
|
|
|
static void __net_exit ipv4_frags_exit_net(struct net *net)
|
|
{
|
|
ip4_frags_ns_ctl_unregister(net);
|
|
- inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
|
|
+ inet_frags_exit_net(&net->ipv4.frags);
|
|
}
|
|
|
|
static struct pernet_operations ip4_frags_ops = {
|
|
@@ -891,17 +919,49 @@ static struct pernet_operations ip4_frags_ops = {
|
|
.exit = ipv4_frags_exit_net,
|
|
};
|
|
|
|
+
|
|
+static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
|
|
+{
|
|
+ return jhash2(data,
|
|
+ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
|
|
+}
|
|
+
|
|
+static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
|
|
+{
|
|
+ const struct inet_frag_queue *fq = data;
|
|
+
|
|
+ return jhash2((const u32 *)&fq->key.v4,
|
|
+ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
|
|
+}
|
|
+
|
|
+static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
|
|
+{
|
|
+ const struct frag_v4_compare_key *key = arg->key;
|
|
+ const struct inet_frag_queue *fq = ptr;
|
|
+
|
|
+ return !!memcmp(&fq->key, key, sizeof(*key));
|
|
+}
|
|
+
|
|
+static const struct rhashtable_params ip4_rhash_params = {
|
|
+ .head_offset = offsetof(struct inet_frag_queue, node),
|
|
+ .key_offset = offsetof(struct inet_frag_queue, key),
|
|
+ .key_len = sizeof(struct frag_v4_compare_key),
|
|
+ .hashfn = ip4_key_hashfn,
|
|
+ .obj_hashfn = ip4_obj_hashfn,
|
|
+ .obj_cmpfn = ip4_obj_cmpfn,
|
|
+ .automatic_shrinking = true,
|
|
+};
|
|
+
|
|
void __init ipfrag_init(void)
|
|
{
|
|
- ip4_frags_ctl_register();
|
|
- register_pernet_subsys(&ip4_frags_ops);
|
|
- ip4_frags.hashfn = ip4_hashfn;
|
|
ip4_frags.constructor = ip4_frag_init;
|
|
ip4_frags.destructor = ip4_frag_free;
|
|
ip4_frags.qsize = sizeof(struct ipq);
|
|
- ip4_frags.match = ip4_frag_match;
|
|
ip4_frags.frag_expire = ip_expire;
|
|
ip4_frags.frags_cache_name = ip_frag_cache_name;
|
|
+ ip4_frags.rhash_params = ip4_rhash_params;
|
|
if (inet_frags_init(&ip4_frags))
|
|
panic("IP: failed to allocate ip4_frags cache\n");
|
|
+ ip4_frags_ctl_register();
|
|
+ register_pernet_subsys(&ip4_frags_ops);
|
|
}
|
|
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
|
|
index 127153f1ed8a..3fbf688a1943 100644
|
|
--- a/net/ipv4/proc.c
|
|
+++ b/net/ipv4/proc.c
|
|
@@ -54,7 +54,6 @@
|
|
static int sockstat_seq_show(struct seq_file *seq, void *v)
|
|
{
|
|
struct net *net = seq->private;
|
|
- unsigned int frag_mem;
|
|
int orphans, sockets;
|
|
|
|
orphans = percpu_counter_sum_positive(&tcp_orphan_count);
|
|
@@ -72,8 +71,9 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
|
|
sock_prot_inuse_get(net, &udplite_prot));
|
|
seq_printf(seq, "RAW: inuse %d\n",
|
|
sock_prot_inuse_get(net, &raw_prot));
|
|
- frag_mem = ip_frag_mem(net);
|
|
- seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
|
|
+ seq_printf(seq, "FRAG: inuse %u memory %lu\n",
|
|
+ atomic_read(&net->ipv4.frags.rhashtable.nelems),
|
|
+ frag_mem_limit(&net->ipv4.frags));
|
|
return 0;
|
|
}
|
|
|
|
@@ -132,6 +132,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
|
|
SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
|
|
SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
|
|
SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
|
|
+ SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
|
|
SNMP_MIB_SENTINEL
|
|
};
|
|
|
|
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
|
|
index fbbeda647774..0567edb76522 100644
|
|
--- a/net/ipv4/tcp_fastopen.c
|
|
+++ b/net/ipv4/tcp_fastopen.c
|
|
@@ -458,17 +458,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
|
|
void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
- struct rb_node *p;
|
|
- struct sk_buff *skb;
|
|
struct dst_entry *dst;
|
|
+ struct sk_buff *skb;
|
|
|
|
if (!tp->syn_fastopen)
|
|
return;
|
|
|
|
if (!tp->data_segs_in) {
|
|
- p = rb_first(&tp->out_of_order_queue);
|
|
- if (p && !rb_next(p)) {
|
|
- skb = rb_entry(p, struct sk_buff, rbnode);
|
|
+ skb = skb_rb_first(&tp->out_of_order_queue);
|
|
+ if (skb && !skb_rb_next(skb)) {
|
|
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
|
|
tcp_fastopen_active_disable(sk);
|
|
return;
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index bdabd748f4bc..991f382afc1b 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -4372,7 +4372,7 @@ static void tcp_ofo_queue(struct sock *sk)
|
|
|
|
p = rb_first(&tp->out_of_order_queue);
|
|
while (p) {
|
|
- skb = rb_entry(p, struct sk_buff, rbnode);
|
|
+ skb = rb_to_skb(p);
|
|
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
|
|
break;
|
|
|
|
@@ -4440,7 +4440,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
|
|
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
- struct rb_node **p, *q, *parent;
|
|
+ struct rb_node **p, *parent;
|
|
struct sk_buff *skb1;
|
|
u32 seq, end_seq;
|
|
bool fragstolen;
|
|
@@ -4503,7 +4503,7 @@ coalesce_done:
|
|
parent = NULL;
|
|
while (*p) {
|
|
parent = *p;
|
|
- skb1 = rb_entry(parent, struct sk_buff, rbnode);
|
|
+ skb1 = rb_to_skb(parent);
|
|
if (before(seq, TCP_SKB_CB(skb1)->seq)) {
|
|
p = &parent->rb_left;
|
|
continue;
|
|
@@ -4548,9 +4548,7 @@ insert:
|
|
|
|
merge_right:
|
|
/* Remove other segments covered by skb. */
|
|
- while ((q = rb_next(&skb->rbnode)) != NULL) {
|
|
- skb1 = rb_entry(q, struct sk_buff, rbnode);
|
|
-
|
|
+ while ((skb1 = skb_rb_next(skb)) != NULL) {
|
|
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
|
|
break;
|
|
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
|
@@ -4565,7 +4563,7 @@ merge_right:
|
|
tcp_drop(sk, skb1);
|
|
}
|
|
/* If there is no skb after us, we are the last_skb ! */
|
|
- if (!q)
|
|
+ if (!skb1)
|
|
tp->ooo_last_skb = skb;
|
|
|
|
add_sack:
|
|
@@ -4749,7 +4747,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
|
|
if (list)
|
|
return !skb_queue_is_last(list, skb) ? skb->next : NULL;
|
|
|
|
- return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
|
|
+ return skb_rb_next(skb);
|
|
}
|
|
|
|
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
|
|
@@ -4778,7 +4776,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
|
|
|
|
while (*p) {
|
|
parent = *p;
|
|
- skb1 = rb_entry(parent, struct sk_buff, rbnode);
|
|
+ skb1 = rb_to_skb(parent);
|
|
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
|
|
p = &parent->rb_left;
|
|
else
|
|
@@ -4898,19 +4896,12 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
u32 range_truesize, sum_tiny = 0;
|
|
struct sk_buff *skb, *head;
|
|
- struct rb_node *p;
|
|
u32 start, end;
|
|
|
|
- p = rb_first(&tp->out_of_order_queue);
|
|
- skb = rb_entry_safe(p, struct sk_buff, rbnode);
|
|
+ skb = skb_rb_first(&tp->out_of_order_queue);
|
|
new_range:
|
|
if (!skb) {
|
|
- p = rb_last(&tp->out_of_order_queue);
|
|
- /* Note: This is possible p is NULL here. We do not
|
|
- * use rb_entry_safe(), as ooo_last_skb is valid only
|
|
- * if rbtree is not empty.
|
|
- */
|
|
- tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
|
|
+ tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
|
|
return;
|
|
}
|
|
start = TCP_SKB_CB(skb)->seq;
|
|
@@ -4918,7 +4909,7 @@ new_range:
|
|
range_truesize = skb->truesize;
|
|
|
|
for (head = skb;;) {
|
|
- skb = tcp_skb_next(skb, NULL);
|
|
+ skb = skb_rb_next(skb);
|
|
|
|
/* Range is terminated when we see a gap or when
|
|
* we are at the queue end.
|
|
@@ -4974,7 +4965,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
|
|
prev = rb_prev(node);
|
|
rb_erase(node, &tp->out_of_order_queue);
|
|
goal -= rb_to_skb(node)->truesize;
|
|
- tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
|
|
+ tcp_drop(sk, rb_to_skb(node));
|
|
if (!prev || goal <= 0) {
|
|
sk_mem_reclaim(sk);
|
|
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
|
@@ -4984,7 +4975,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
|
|
}
|
|
node = prev;
|
|
} while (node);
|
|
- tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
|
|
+ tp->ooo_last_skb = rb_to_skb(prev);
|
|
|
|
/* Reset SACK state. A conforming SACK implementation will
|
|
* do the same at a timeout based retransmit. When a connection
|
|
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
|
|
index ee33a6743f3b..2ed8536e10b6 100644
|
|
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
|
|
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
|
|
@@ -63,7 +63,6 @@ struct nf_ct_frag6_skb_cb
|
|
static struct inet_frags nf_frags;
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
-static int zero;
|
|
|
|
static struct ctl_table nf_ct_frag6_sysctl_table[] = {
|
|
{
|
|
@@ -76,18 +75,17 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
|
|
{
|
|
.procname = "nf_conntrack_frag6_low_thresh",
|
|
.data = &init_net.nf_frag.frags.low_thresh,
|
|
- .maxlen = sizeof(unsigned int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0644,
|
|
- .proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = &zero,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
.extra2 = &init_net.nf_frag.frags.high_thresh
|
|
},
|
|
{
|
|
.procname = "nf_conntrack_frag6_high_thresh",
|
|
.data = &init_net.nf_frag.frags.high_thresh,
|
|
- .maxlen = sizeof(unsigned int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0644,
|
|
- .proc_handler = proc_dointvec_minmax,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
.extra1 = &init_net.nf_frag.frags.low_thresh
|
|
},
|
|
{ }
|
|
@@ -152,59 +150,35 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
|
|
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
|
|
}
|
|
|
|
-static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
|
|
- const struct in6_addr *daddr)
|
|
-{
|
|
- net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
|
|
- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
|
|
- (__force u32)id, nf_frags.rnd);
|
|
-}
|
|
-
|
|
-
|
|
-static unsigned int nf_hashfn(const struct inet_frag_queue *q)
|
|
-{
|
|
- const struct frag_queue *nq;
|
|
-
|
|
- nq = container_of(q, struct frag_queue, q);
|
|
- return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
|
|
-}
|
|
-
|
|
-static void nf_ct_frag6_expire(unsigned long data)
|
|
+static void nf_ct_frag6_expire(struct timer_list *t)
|
|
{
|
|
+ struct inet_frag_queue *frag = from_timer(frag, t, timer);
|
|
struct frag_queue *fq;
|
|
struct net *net;
|
|
|
|
- fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
|
|
+ fq = container_of(frag, struct frag_queue, q);
|
|
net = container_of(fq->q.net, struct net, nf_frag.frags);
|
|
|
|
- ip6_expire_frag_queue(net, fq, &nf_frags);
|
|
+ ip6_expire_frag_queue(net, fq);
|
|
}
|
|
|
|
/* Creation primitives. */
|
|
-static inline struct frag_queue *fq_find(struct net *net, __be32 id,
|
|
- u32 user, struct in6_addr *src,
|
|
- struct in6_addr *dst, int iif, u8 ecn)
|
|
+static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
|
|
+ const struct ipv6hdr *hdr, int iif)
|
|
{
|
|
+ struct frag_v6_compare_key key = {
|
|
+ .id = id,
|
|
+ .saddr = hdr->saddr,
|
|
+ .daddr = hdr->daddr,
|
|
+ .user = user,
|
|
+ .iif = iif,
|
|
+ };
|
|
struct inet_frag_queue *q;
|
|
- struct ip6_create_arg arg;
|
|
- unsigned int hash;
|
|
-
|
|
- arg.id = id;
|
|
- arg.user = user;
|
|
- arg.src = src;
|
|
- arg.dst = dst;
|
|
- arg.iif = iif;
|
|
- arg.ecn = ecn;
|
|
-
|
|
- local_bh_disable();
|
|
- hash = nf_hash_frag(id, src, dst);
|
|
-
|
|
- q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
|
|
- local_bh_enable();
|
|
- if (IS_ERR_OR_NULL(q)) {
|
|
- inet_frag_maybe_warn_overflow(q, pr_fmt());
|
|
+
|
|
+ q = inet_frag_find(&net->nf_frag.frags, &key);
|
|
+ if (!q)
|
|
return NULL;
|
|
- }
|
|
+
|
|
return container_of(q, struct frag_queue, q);
|
|
}
|
|
|
|
@@ -263,7 +237,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|
* this case. -DaveM
|
|
*/
|
|
pr_debug("end of fragment not rounded to 8 bytes.\n");
|
|
- inet_frag_kill(&fq->q, &nf_frags);
|
|
+ inet_frag_kill(&fq->q);
|
|
return -EPROTO;
|
|
}
|
|
if (end > fq->q.len) {
|
|
@@ -356,7 +330,7 @@ found:
|
|
return 0;
|
|
|
|
discard_fq:
|
|
- inet_frag_kill(&fq->q, &nf_frags);
|
|
+ inet_frag_kill(&fq->q);
|
|
err:
|
|
return -EINVAL;
|
|
}
|
|
@@ -378,7 +352,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
|
|
int payload_len;
|
|
u8 ecn;
|
|
|
|
- inet_frag_kill(&fq->q, &nf_frags);
|
|
+ inet_frag_kill(&fq->q);
|
|
|
|
WARN_ON(head == NULL);
|
|
WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
|
|
@@ -479,6 +453,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
|
|
else if (head->ip_summed == CHECKSUM_COMPLETE)
|
|
head->csum = csum_add(head->csum, fp->csum);
|
|
head->truesize += fp->truesize;
|
|
+ fp->sk = NULL;
|
|
}
|
|
sub_frag_mem_limit(fq->q.net, head->truesize);
|
|
|
|
@@ -497,6 +472,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
|
|
head->csum);
|
|
|
|
fq->q.fragments = NULL;
|
|
+ fq->q.rb_fragments = RB_ROOT;
|
|
fq->q.fragments_tail = NULL;
|
|
|
|
return true;
|
|
@@ -591,9 +567,13 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
|
hdr = ipv6_hdr(skb);
|
|
fhdr = (struct frag_hdr *)skb_transport_header(skb);
|
|
|
|
+ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
|
|
+ fhdr->frag_off & htons(IP6_MF))
|
|
+ return -EINVAL;
|
|
+
|
|
skb_orphan(skb);
|
|
- fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
|
|
- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
|
|
+ fq = fq_find(net, fhdr->identification, user, hdr,
|
|
+ skb->dev ? skb->dev->ifindex : 0);
|
|
if (fq == NULL) {
|
|
pr_debug("Can't find and can't create new queue\n");
|
|
return -ENOMEM;
|
|
@@ -623,25 +603,33 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
|
|
|
out_unlock:
|
|
spin_unlock_bh(&fq->q.lock);
|
|
- inet_frag_put(&fq->q, &nf_frags);
|
|
+ inet_frag_put(&fq->q);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
|
|
|
|
static int nf_ct_net_init(struct net *net)
|
|
{
|
|
+ int res;
|
|
+
|
|
net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
|
|
net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
|
|
net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
|
|
- inet_frags_init_net(&net->nf_frag.frags);
|
|
-
|
|
- return nf_ct_frag6_sysctl_register(net);
|
|
+ net->nf_frag.frags.f = &nf_frags;
|
|
+
|
|
+ res = inet_frags_init_net(&net->nf_frag.frags);
|
|
+ if (res < 0)
|
|
+ return res;
|
|
+ res = nf_ct_frag6_sysctl_register(net);
|
|
+ if (res < 0)
|
|
+ inet_frags_exit_net(&net->nf_frag.frags);
|
|
+ return res;
|
|
}
|
|
|
|
static void nf_ct_net_exit(struct net *net)
|
|
{
|
|
nf_ct_frags6_sysctl_unregister(net);
|
|
- inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
|
|
+ inet_frags_exit_net(&net->nf_frag.frags);
|
|
}
|
|
|
|
static struct pernet_operations nf_ct_net_ops = {
|
|
@@ -653,13 +641,12 @@ int nf_ct_frag6_init(void)
|
|
{
|
|
int ret = 0;
|
|
|
|
- nf_frags.hashfn = nf_hashfn;
|
|
nf_frags.constructor = ip6_frag_init;
|
|
nf_frags.destructor = NULL;
|
|
nf_frags.qsize = sizeof(struct frag_queue);
|
|
- nf_frags.match = ip6_frag_match;
|
|
nf_frags.frag_expire = nf_ct_frag6_expire;
|
|
nf_frags.frags_cache_name = nf_frags_cache_name;
|
|
+ nf_frags.rhash_params = ip6_rhash_params;
|
|
ret = inet_frags_init(&nf_frags);
|
|
if (ret)
|
|
goto out;
|
|
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
|
|
index e88bcb8ff0fd..dc04c024986c 100644
|
|
--- a/net/ipv6/proc.c
|
|
+++ b/net/ipv6/proc.c
|
|
@@ -38,7 +38,6 @@
|
|
static int sockstat6_seq_show(struct seq_file *seq, void *v)
|
|
{
|
|
struct net *net = seq->private;
|
|
- unsigned int frag_mem = ip6_frag_mem(net);
|
|
|
|
seq_printf(seq, "TCP6: inuse %d\n",
|
|
sock_prot_inuse_get(net, &tcpv6_prot));
|
|
@@ -48,7 +47,9 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v)
|
|
sock_prot_inuse_get(net, &udplitev6_prot));
|
|
seq_printf(seq, "RAW6: inuse %d\n",
|
|
sock_prot_inuse_get(net, &rawv6_prot));
|
|
- seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
|
|
+ seq_printf(seq, "FRAG6: inuse %u memory %lu\n",
|
|
+ atomic_read(&net->ipv6.frags.rhashtable.nelems),
|
|
+ frag_mem_limit(&net->ipv6.frags));
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
|
|
index 846012eae526..ede0061b6f5d 100644
|
|
--- a/net/ipv6/reassembly.c
|
|
+++ b/net/ipv6/reassembly.c
|
|
@@ -79,130 +79,93 @@ static struct inet_frags ip6_frags;
|
|
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
|
struct net_device *dev);
|
|
|
|
-/*
|
|
- * callers should be careful not to use the hash value outside the ipfrag_lock
|
|
- * as doing so could race with ipfrag_hash_rnd being recalculated.
|
|
- */
|
|
-static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
|
|
- const struct in6_addr *daddr)
|
|
-{
|
|
- net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
|
|
- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
|
|
- (__force u32)id, ip6_frags.rnd);
|
|
-}
|
|
-
|
|
-static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
|
|
-{
|
|
- const struct frag_queue *fq;
|
|
-
|
|
- fq = container_of(q, struct frag_queue, q);
|
|
- return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
|
|
-}
|
|
-
|
|
-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
|
|
-{
|
|
- const struct frag_queue *fq;
|
|
- const struct ip6_create_arg *arg = a;
|
|
-
|
|
- fq = container_of(q, struct frag_queue, q);
|
|
- return fq->id == arg->id &&
|
|
- fq->user == arg->user &&
|
|
- ipv6_addr_equal(&fq->saddr, arg->src) &&
|
|
- ipv6_addr_equal(&fq->daddr, arg->dst) &&
|
|
- (arg->iif == fq->iif ||
|
|
- !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
|
|
- IPV6_ADDR_LINKLOCAL)));
|
|
-}
|
|
-EXPORT_SYMBOL(ip6_frag_match);
|
|
-
|
|
void ip6_frag_init(struct inet_frag_queue *q, const void *a)
|
|
{
|
|
struct frag_queue *fq = container_of(q, struct frag_queue, q);
|
|
- const struct ip6_create_arg *arg = a;
|
|
+ const struct frag_v6_compare_key *key = a;
|
|
|
|
- fq->id = arg->id;
|
|
- fq->user = arg->user;
|
|
- fq->saddr = *arg->src;
|
|
- fq->daddr = *arg->dst;
|
|
- fq->ecn = arg->ecn;
|
|
+ q->key.v6 = *key;
|
|
+ fq->ecn = 0;
|
|
}
|
|
EXPORT_SYMBOL(ip6_frag_init);
|
|
|
|
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
|
|
- struct inet_frags *frags)
|
|
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
|
|
{
|
|
struct net_device *dev = NULL;
|
|
+ struct sk_buff *head;
|
|
|
|
+ rcu_read_lock();
|
|
spin_lock(&fq->q.lock);
|
|
|
|
if (fq->q.flags & INET_FRAG_COMPLETE)
|
|
goto out;
|
|
|
|
- inet_frag_kill(&fq->q, frags);
|
|
+ inet_frag_kill(&fq->q);
|
|
|
|
- rcu_read_lock();
|
|
dev = dev_get_by_index_rcu(net, fq->iif);
|
|
if (!dev)
|
|
- goto out_rcu_unlock;
|
|
+ goto out;
|
|
|
|
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
|
|
-
|
|
- if (inet_frag_evicting(&fq->q))
|
|
- goto out_rcu_unlock;
|
|
-
|
|
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
|
|
|
|
/* Don't send error if the first segment did not arrive. */
|
|
- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
|
|
- goto out_rcu_unlock;
|
|
+ head = fq->q.fragments;
|
|
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
|
|
+ goto out;
|
|
|
|
/* But use as source device on which LAST ARRIVED
|
|
* segment was received. And do not use fq->dev
|
|
* pointer directly, device might already disappeared.
|
|
*/
|
|
- fq->q.fragments->dev = dev;
|
|
- icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
|
|
-out_rcu_unlock:
|
|
- rcu_read_unlock();
|
|
+ head->dev = dev;
|
|
+ skb_get(head);
|
|
+ spin_unlock(&fq->q.lock);
|
|
+
|
|
+ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
|
|
+ kfree_skb(head);
|
|
+ goto out_rcu_unlock;
|
|
+
|
|
out:
|
|
spin_unlock(&fq->q.lock);
|
|
- inet_frag_put(&fq->q, frags);
|
|
+out_rcu_unlock:
|
|
+ rcu_read_unlock();
|
|
+ inet_frag_put(&fq->q);
|
|
}
|
|
EXPORT_SYMBOL(ip6_expire_frag_queue);
|
|
|
|
-static void ip6_frag_expire(unsigned long data)
|
|
+static void ip6_frag_expire(struct timer_list *t)
|
|
{
|
|
+ struct inet_frag_queue *frag = from_timer(frag, t, timer);
|
|
struct frag_queue *fq;
|
|
struct net *net;
|
|
|
|
- fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
|
|
+ fq = container_of(frag, struct frag_queue, q);
|
|
net = container_of(fq->q.net, struct net, ipv6.frags);
|
|
|
|
- ip6_expire_frag_queue(net, fq, &ip6_frags);
|
|
+ ip6_expire_frag_queue(net, fq);
|
|
}
|
|
|
|
static struct frag_queue *
|
|
-fq_find(struct net *net, __be32 id, const struct in6_addr *src,
|
|
- const struct in6_addr *dst, int iif, u8 ecn)
|
|
+fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
|
|
{
|
|
+ struct frag_v6_compare_key key = {
|
|
+ .id = id,
|
|
+ .saddr = hdr->saddr,
|
|
+ .daddr = hdr->daddr,
|
|
+ .user = IP6_DEFRAG_LOCAL_DELIVER,
|
|
+ .iif = iif,
|
|
+ };
|
|
struct inet_frag_queue *q;
|
|
- struct ip6_create_arg arg;
|
|
- unsigned int hash;
|
|
|
|
- arg.id = id;
|
|
- arg.user = IP6_DEFRAG_LOCAL_DELIVER;
|
|
- arg.src = src;
|
|
- arg.dst = dst;
|
|
- arg.iif = iif;
|
|
- arg.ecn = ecn;
|
|
+ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
|
|
+ IPV6_ADDR_LINKLOCAL)))
|
|
+ key.iif = 0;
|
|
|
|
- hash = inet6_hash_frag(id, src, dst);
|
|
-
|
|
- q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
|
|
- if (IS_ERR_OR_NULL(q)) {
|
|
- inet_frag_maybe_warn_overflow(q, pr_fmt());
|
|
+ q = inet_frag_find(&net->ipv6.frags, &key);
|
|
+ if (!q)
|
|
return NULL;
|
|
- }
|
|
+
|
|
return container_of(q, struct frag_queue, q);
|
|
}
|
|
|
|
@@ -363,7 +326,7 @@ found:
|
|
return -1;
|
|
|
|
discard_fq:
|
|
- inet_frag_kill(&fq->q, &ip6_frags);
|
|
+ inet_frag_kill(&fq->q);
|
|
err:
|
|
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
|
IPSTATS_MIB_REASMFAILS);
|
|
@@ -390,7 +353,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
|
int sum_truesize;
|
|
u8 ecn;
|
|
|
|
- inet_frag_kill(&fq->q, &ip6_frags);
|
|
+ inet_frag_kill(&fq->q);
|
|
|
|
ecn = ip_frag_ecn_table[fq->ecn];
|
|
if (unlikely(ecn == 0xff))
|
|
@@ -509,6 +472,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
|
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
|
|
rcu_read_unlock();
|
|
fq->q.fragments = NULL;
|
|
+ fq->q.rb_fragments = RB_ROOT;
|
|
fq->q.fragments_tail = NULL;
|
|
return 1;
|
|
|
|
@@ -530,6 +494,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
|
|
struct frag_queue *fq;
|
|
const struct ipv6hdr *hdr = ipv6_hdr(skb);
|
|
struct net *net = dev_net(skb_dst(skb)->dev);
|
|
+ int iif;
|
|
|
|
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
|
|
goto fail_hdr;
|
|
@@ -558,17 +523,22 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
|
|
return 1;
|
|
}
|
|
|
|
- fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
|
|
- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
|
|
+ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
|
|
+ fhdr->frag_off & htons(IP6_MF))
|
|
+ goto fail_hdr;
|
|
+
|
|
+ iif = skb->dev ? skb->dev->ifindex : 0;
|
|
+ fq = fq_find(net, fhdr->identification, hdr, iif);
|
|
if (fq) {
|
|
int ret;
|
|
|
|
spin_lock(&fq->q.lock);
|
|
|
|
+ fq->iif = iif;
|
|
ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
|
|
|
|
spin_unlock(&fq->q.lock);
|
|
- inet_frag_put(&fq->q, &ip6_frags);
|
|
+ inet_frag_put(&fq->q);
|
|
return ret;
|
|
}
|
|
|
|
@@ -589,24 +559,22 @@ static const struct inet6_protocol frag_protocol = {
|
|
};
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
-static int zero;
|
|
|
|
static struct ctl_table ip6_frags_ns_ctl_table[] = {
|
|
{
|
|
.procname = "ip6frag_high_thresh",
|
|
.data = &init_net.ipv6.frags.high_thresh,
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0644,
|
|
- .proc_handler = proc_dointvec_minmax,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
.extra1 = &init_net.ipv6.frags.low_thresh
|
|
},
|
|
{
|
|
.procname = "ip6frag_low_thresh",
|
|
.data = &init_net.ipv6.frags.low_thresh,
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0644,
|
|
- .proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = &zero,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
.extra2 = &init_net.ipv6.frags.high_thresh
|
|
},
|
|
{
|
|
@@ -649,10 +617,6 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
|
|
table[1].data = &net->ipv6.frags.low_thresh;
|
|
table[1].extra2 = &net->ipv6.frags.high_thresh;
|
|
table[2].data = &net->ipv6.frags.timeout;
|
|
-
|
|
- /* Don't export sysctls to unprivileged users */
|
|
- if (net->user_ns != &init_user_ns)
|
|
- table[0].procname = NULL;
|
|
}
|
|
|
|
hdr = register_net_sysctl(net, "net/ipv6", table);
|
|
@@ -714,19 +678,27 @@ static void ip6_frags_sysctl_unregister(void)
|
|
|
|
static int __net_init ipv6_frags_init_net(struct net *net)
|
|
{
|
|
+ int res;
|
|
+
|
|
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
|
|
net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
|
|
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
|
|
+ net->ipv6.frags.f = &ip6_frags;
|
|
|
|
- inet_frags_init_net(&net->ipv6.frags);
|
|
+ res = inet_frags_init_net(&net->ipv6.frags);
|
|
+ if (res < 0)
|
|
+ return res;
|
|
|
|
- return ip6_frags_ns_sysctl_register(net);
|
|
+ res = ip6_frags_ns_sysctl_register(net);
|
|
+ if (res < 0)
|
|
+ inet_frags_exit_net(&net->ipv6.frags);
|
|
+ return res;
|
|
}
|
|
|
|
static void __net_exit ipv6_frags_exit_net(struct net *net)
|
|
{
|
|
ip6_frags_ns_sysctl_unregister(net);
|
|
- inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
|
|
+ inet_frags_exit_net(&net->ipv6.frags);
|
|
}
|
|
|
|
static struct pernet_operations ip6_frags_ops = {
|
|
@@ -734,14 +706,55 @@ static struct pernet_operations ip6_frags_ops = {
|
|
.exit = ipv6_frags_exit_net,
|
|
};
|
|
|
|
+static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
|
|
+{
|
|
+ return jhash2(data,
|
|
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
|
|
+}
|
|
+
|
|
+static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
|
|
+{
|
|
+ const struct inet_frag_queue *fq = data;
|
|
+
|
|
+ return jhash2((const u32 *)&fq->key.v6,
|
|
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
|
|
+}
|
|
+
|
|
+static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
|
|
+{
|
|
+ const struct frag_v6_compare_key *key = arg->key;
|
|
+ const struct inet_frag_queue *fq = ptr;
|
|
+
|
|
+ return !!memcmp(&fq->key, key, sizeof(*key));
|
|
+}
|
|
+
|
|
+const struct rhashtable_params ip6_rhash_params = {
|
|
+ .head_offset = offsetof(struct inet_frag_queue, node),
|
|
+ .hashfn = ip6_key_hashfn,
|
|
+ .obj_hashfn = ip6_obj_hashfn,
|
|
+ .obj_cmpfn = ip6_obj_cmpfn,
|
|
+ .automatic_shrinking = true,
|
|
+};
|
|
+EXPORT_SYMBOL(ip6_rhash_params);
|
|
+
|
|
int __init ipv6_frag_init(void)
|
|
{
|
|
int ret;
|
|
|
|
- ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
|
|
+ ip6_frags.constructor = ip6_frag_init;
|
|
+ ip6_frags.destructor = NULL;
|
|
+ ip6_frags.qsize = sizeof(struct frag_queue);
|
|
+ ip6_frags.frag_expire = ip6_frag_expire;
|
|
+ ip6_frags.frags_cache_name = ip6_frag_cache_name;
|
|
+ ip6_frags.rhash_params = ip6_rhash_params;
|
|
+ ret = inet_frags_init(&ip6_frags);
|
|
if (ret)
|
|
goto out;
|
|
|
|
+ ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
|
|
+ if (ret)
|
|
+ goto err_protocol;
|
|
+
|
|
ret = ip6_frags_sysctl_register();
|
|
if (ret)
|
|
goto err_sysctl;
|
|
@@ -750,16 +763,6 @@ int __init ipv6_frag_init(void)
|
|
if (ret)
|
|
goto err_pernet;
|
|
|
|
- ip6_frags.hashfn = ip6_hashfn;
|
|
- ip6_frags.constructor = ip6_frag_init;
|
|
- ip6_frags.destructor = NULL;
|
|
- ip6_frags.qsize = sizeof(struct frag_queue);
|
|
- ip6_frags.match = ip6_frag_match;
|
|
- ip6_frags.frag_expire = ip6_frag_expire;
|
|
- ip6_frags.frags_cache_name = ip6_frag_cache_name;
|
|
- ret = inet_frags_init(&ip6_frags);
|
|
- if (ret)
|
|
- goto err_pernet;
|
|
out:
|
|
return ret;
|
|
|
|
@@ -767,6 +770,8 @@ err_pernet:
|
|
ip6_frags_sysctl_unregister();
|
|
err_sysctl:
|
|
inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
|
|
+err_protocol:
|
|
+ inet_frags_fini(&ip6_frags);
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
|
|
index 8c8df75dbead..2a2ab6bfe5d8 100644
|
|
--- a/net/sched/sch_netem.c
|
|
+++ b/net/sched/sch_netem.c
|
|
@@ -149,12 +149,6 @@ struct netem_skb_cb {
|
|
ktime_t tstamp_save;
|
|
};
|
|
|
|
-
|
|
-static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
|
|
-{
|
|
- return rb_entry(rb, struct sk_buff, rbnode);
|
|
-}
|
|
-
|
|
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
|
|
{
|
|
/* we assume we can use skb next/prev/tstamp as storage for rb_node */
|
|
@@ -365,7 +359,7 @@ static void tfifo_reset(struct Qdisc *sch)
|
|
struct rb_node *p;
|
|
|
|
while ((p = rb_first(&q->t_root))) {
|
|
- struct sk_buff *skb = netem_rb_to_skb(p);
|
|
+ struct sk_buff *skb = rb_to_skb(p);
|
|
|
|
rb_erase(p, &q->t_root);
|
|
rtnl_kfree_skbs(skb, skb);
|
|
@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
|
struct sk_buff *skb;
|
|
|
|
parent = *p;
|
|
- skb = netem_rb_to_skb(parent);
|
|
+ skb = rb_to_skb(parent);
|
|
if (tnext >= netem_skb_cb(skb)->time_to_send)
|
|
p = &parent->rb_right;
|
|
else
|
|
@@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|
struct sk_buff *t_skb;
|
|
struct netem_skb_cb *t_last;
|
|
|
|
- t_skb = netem_rb_to_skb(rb_last(&q->t_root));
|
|
+ t_skb = skb_rb_last(&q->t_root);
|
|
t_last = netem_skb_cb(t_skb);
|
|
if (!last ||
|
|
t_last->time_to_send > last->time_to_send) {
|
|
@@ -618,7 +612,7 @@ deliver:
|
|
if (p) {
|
|
psched_time_t time_to_send;
|
|
|
|
- skb = netem_rb_to_skb(p);
|
|
+ skb = rb_to_skb(p);
|
|
|
|
/* if more time remaining? */
|
|
time_to_send = netem_skb_cb(skb)->time_to_send;
|
|
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
|
|
index 417abbb1f72c..8a027973f2ad 100644
|
|
--- a/sound/pci/hda/hda_codec.c
|
|
+++ b/sound/pci/hda/hda_codec.c
|
|
@@ -3923,7 +3923,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
|
|
|
|
list_for_each_codec(codec, bus) {
|
|
/* FIXME: maybe a better way needed for forced reset */
|
|
- cancel_delayed_work_sync(&codec->jackpoll_work);
|
|
+ if (current_work() != &codec->jackpoll_work.work)
|
|
+ cancel_delayed_work_sync(&codec->jackpoll_work);
|
|
#ifdef CONFIG_PM
|
|
if (hda_codec_is_power_on(codec)) {
|
|
hda_call_codec_suspend(codec);
|
|
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
|
|
index 3479a1bc7caa..fb76423022e8 100644
|
|
--- a/tools/perf/builtin-c2c.c
|
|
+++ b/tools/perf/builtin-c2c.c
|
|
@@ -2229,6 +2229,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
|
|
" s Togle full lenght of symbol and source line columns \n"
|
|
" q Return back to cacheline list \n";
|
|
|
|
+ if (!he)
|
|
+ return 0;
|
|
+
|
|
/* Display compact version first. */
|
|
c2c.symbol_full = false;
|
|
|
|
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
|
|
index 55086389fc06..96f62dd7e3ed 100644
|
|
--- a/tools/perf/perf.h
|
|
+++ b/tools/perf/perf.h
|
|
@@ -24,7 +24,9 @@ static inline unsigned long long rdclock(void)
|
|
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
|
|
}
|
|
|
|
+#ifndef MAX_NR_CPUS
|
|
#define MAX_NR_CPUS 1024
|
|
+#endif
|
|
|
|
extern const char *input_name;
|
|
extern bool perf_host, perf_guest;
|
|
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
|
|
index 226a9245d1db..2227ee92d8e2 100644
|
|
--- a/tools/perf/util/evsel.c
|
|
+++ b/tools/perf/util/evsel.c
|
|
@@ -824,6 +824,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
|
|
}
|
|
}
|
|
|
|
+static bool is_dummy_event(struct perf_evsel *evsel)
|
|
+{
|
|
+ return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
|
|
+ (evsel->attr.config == PERF_COUNT_SW_DUMMY);
|
|
+}
|
|
+
|
|
/*
|
|
* The enable_on_exec/disabled value strategy:
|
|
*
|
|
@@ -1054,6 +1060,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
|
|
else
|
|
perf_evsel__reset_sample_bit(evsel, PERIOD);
|
|
}
|
|
+
|
|
+ /*
|
|
+ * For initial_delay, a dummy event is added implicitly.
|
|
+ * The software event will trigger -EOPNOTSUPP error out,
|
|
+ * if BRANCH_STACK bit is set.
|
|
+ */
|
|
+ if (opts->initial_delay && is_dummy_event(evsel))
|
|
+ perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
|
|
}
|
|
|
|
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
|
|
index b53596ad601b..2e7fd8227969 100644
|
|
--- a/tools/testing/nvdimm/pmem-dax.c
|
|
+++ b/tools/testing/nvdimm/pmem-dax.c
|
|
@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
|
if (get_nfit_res(pmem->phys_addr + offset)) {
|
|
struct page *page;
|
|
|
|
- *kaddr = pmem->virt_addr + offset;
|
|
+ if (kaddr)
|
|
+ *kaddr = pmem->virt_addr + offset;
|
|
page = vmalloc_to_page(pmem->virt_addr + offset);
|
|
- *pfn = page_to_pfn_t(page);
|
|
+ if (pfn)
|
|
+ *pfn = page_to_pfn_t(page);
|
|
pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
|
|
__func__, pmem, pgoff, page_to_pfn(page));
|
|
|
|
return 1;
|
|
}
|
|
|
|
- *kaddr = pmem->virt_addr + offset;
|
|
- *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
|
+ if (kaddr)
|
|
+ *kaddr = pmem->virt_addr + offset;
|
|
+ if (pfn)
|
|
+ *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
|
|
|
/*
|
|
* If badblocks are present, limit known good range to the
|
|
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
|
|
index 9167ee976314..041dbbb30ff0 100644
|
|
--- a/tools/testing/selftests/bpf/test_verifier.c
|
|
+++ b/tools/testing/selftests/bpf/test_verifier.c
|
|
@@ -5895,7 +5895,7 @@ static struct bpf_test tests[] = {
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
|
BPF_FUNC_map_lookup_elem),
|
|
- BPF_MOV64_REG(BPF_REG_0, 0),
|
|
+ BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_in_map = { 3 },
|
|
@@ -5918,7 +5918,7 @@ static struct bpf_test tests[] = {
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
|
BPF_FUNC_map_lookup_elem),
|
|
- BPF_MOV64_REG(BPF_REG_0, 0),
|
|
+ BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_in_map = { 3 },
|
|
@@ -5941,7 +5941,7 @@ static struct bpf_test tests[] = {
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
|
BPF_FUNC_map_lookup_elem),
|
|
- BPF_MOV64_REG(BPF_REG_0, 0),
|
|
+ BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_in_map = { 3 },
|