mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-12 07:58:47 +00:00
* Change DEV to EDGE * Renaming patches dev folder to edge * Move patches into subdir where they will be archived. * Relink patch directories properly
2063 lines
67 KiB
Diff
2063 lines
67 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index b25ce26c1cd71..101b789e7c2ba 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 221
|
|
+SUBLEVEL = 222
|
|
EXTRAVERSION =
|
|
NAME = Petit Gorille
|
|
|
|
@@ -760,6 +760,13 @@ ifdef CONFIG_FUNCTION_TRACER
|
|
ifndef CC_FLAGS_FTRACE
|
|
CC_FLAGS_FTRACE := -pg
|
|
endif
|
|
+ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
|
+ # gcc 5 supports generating the mcount tables directly
|
|
+ ifeq ($(call cc-option-yn,-mrecord-mcount),y)
|
|
+ CC_FLAGS_FTRACE += -mrecord-mcount
|
|
+ export CC_USING_RECORD_MCOUNT := 1
|
|
+ endif
|
|
+endif
|
|
export CC_FLAGS_FTRACE
|
|
ifdef CONFIG_HAVE_FENTRY
|
|
CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY)
|
|
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
|
|
index c5b119ddb70b8..7f2b73cbd2280 100644
|
|
--- a/arch/arm/boot/dts/lpc32xx.dtsi
|
|
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
|
|
@@ -323,9 +323,6 @@
|
|
|
|
clocks = <&xtal_32k>, <&xtal>;
|
|
clock-names = "xtal_32k", "xtal";
|
|
-
|
|
- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
|
|
- assigned-clock-rates = <208000000>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
|
|
index 02e6b6dfffa7e..19e4ff507209b 100644
|
|
--- a/arch/arm/kernel/signal.c
|
|
+++ b/arch/arm/kernel/signal.c
|
|
@@ -667,18 +667,20 @@ struct page *get_signal_page(void)
|
|
|
|
addr = page_address(page);
|
|
|
|
+ /* Poison the entire page */
|
|
+ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
|
|
+ PAGE_SIZE / sizeof(u32));
|
|
+
|
|
/* Give the signal return code some randomness */
|
|
offset = 0x200 + (get_random_int() & 0x7fc);
|
|
signal_return_offset = offset;
|
|
|
|
- /*
|
|
- * Copy signal return handlers into the vector page, and
|
|
- * set sigreturn to be a pointer to these.
|
|
- */
|
|
+ /* Copy signal return handlers into the page */
|
|
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
|
|
|
|
- ptr = (unsigned long)addr + offset;
|
|
- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
|
|
+ /* Flush out all instructions in this page */
|
|
+ ptr = (unsigned long)addr;
|
|
+ flush_icache_range(ptr, ptr + PAGE_SIZE);
|
|
|
|
return page;
|
|
}
|
|
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
|
|
index e8e637c4f354d..32aa108b2b7cd 100644
|
|
--- a/arch/arm/xen/enlighten.c
|
|
+++ b/arch/arm/xen/enlighten.c
|
|
@@ -392,8 +392,6 @@ static int __init xen_guest_init(void)
|
|
return -ENOMEM;
|
|
}
|
|
gnttab_init();
|
|
- if (!xen_initial_domain())
|
|
- xenbus_probe();
|
|
|
|
/*
|
|
* Making sure board specific code will not set up ops for
|
|
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
|
|
index 0641ba54ab62a..ce538c51fa3fb 100644
|
|
--- a/arch/arm/xen/p2m.c
|
|
+++ b/arch/arm/xen/p2m.c
|
|
@@ -93,8 +93,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|
for (i = 0; i < count; i++) {
|
|
if (map_ops[i].status)
|
|
continue;
|
|
- set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
|
|
- map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT);
|
|
+ if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
|
|
+ map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) {
|
|
+ return -ENOMEM;
|
|
+ }
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
|
|
index 82747048381fa..721f4b6b262f1 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
|
|
@@ -231,6 +231,7 @@
|
|
reg = <0x0 0xf8000000 0x0 0x2000000>,
|
|
<0x0 0xfd000000 0x0 0x1000000>;
|
|
reg-names = "axi-base", "apb-base";
|
|
+ device_type = "pci";
|
|
#address-cells = <3>;
|
|
#size-cells = <2>;
|
|
#interrupt-cells = <1>;
|
|
@@ -249,7 +250,6 @@
|
|
<0 0 0 2 &pcie0_intc 1>,
|
|
<0 0 0 3 &pcie0_intc 2>,
|
|
<0 0 0 4 &pcie0_intc 3>;
|
|
- linux,pci-domain = <0>;
|
|
max-link-speed = <1>;
|
|
msi-map = <0x0 &its 0x0 0x1000>;
|
|
phys = <&pcie_phy 0>, <&pcie_phy 1>,
|
|
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
|
|
index 85e60509f0a83..d4b53af657c84 100644
|
|
--- a/arch/h8300/kernel/asm-offsets.c
|
|
+++ b/arch/h8300/kernel/asm-offsets.c
|
|
@@ -63,6 +63,9 @@ int main(void)
|
|
OFFSET(TI_FLAGS, thread_info, flags);
|
|
OFFSET(TI_CPU, thread_info, cpu);
|
|
OFFSET(TI_PRE, thread_info, preempt_count);
|
|
+#ifdef CONFIG_PREEMPTION
|
|
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
|
|
+#endif
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
|
|
index 3018582794efc..d16e6654a6555 100644
|
|
--- a/arch/mips/kernel/smp-bmips.c
|
|
+++ b/arch/mips/kernel/smp-bmips.c
|
|
@@ -574,7 +574,7 @@ asmlinkage void __weak plat_wired_tlb_setup(void)
|
|
*/
|
|
}
|
|
|
|
-void __init bmips_cpu_setup(void)
|
|
+void bmips_cpu_setup(void)
|
|
{
|
|
void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
|
|
u32 __maybe_unused cfg;
|
|
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
|
|
index 4c8e9f12b0c4d..9f33a69b56051 100644
|
|
--- a/arch/x86/Makefile
|
|
+++ b/arch/x86/Makefile
|
|
@@ -62,6 +62,9 @@ endif
|
|
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
|
|
KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
|
|
|
|
+# Intel CET isn't enabled in the kernel
|
|
+KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
|
|
+
|
|
ifeq ($(CONFIG_X86_32),y)
|
|
BITS := 32
|
|
UTS_MACHINE := i386
|
|
@@ -138,9 +141,6 @@ else
|
|
KBUILD_CFLAGS += -mno-red-zone
|
|
KBUILD_CFLAGS += -mcmodel=kernel
|
|
|
|
- # Intel CET isn't enabled in the kernel
|
|
- KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
|
|
-
|
|
# -funit-at-a-time shrinks the kernel .text considerably
|
|
# unfortunately it makes reading oopses harder.
|
|
KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
|
|
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
|
|
index 15812e553b95e..30295d2ebd924 100644
|
|
--- a/arch/x86/xen/p2m.c
|
|
+++ b/arch/x86/xen/p2m.c
|
|
@@ -708,7 +708,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|
unsigned long mfn, pfn;
|
|
|
|
/* Do not add to override if the map failed. */
|
|
- if (map_ops[i].status)
|
|
+ if (map_ops[i].status != GNTST_okay ||
|
|
+ (kmap_ops && kmap_ops[i].status != GNTST_okay))
|
|
continue;
|
|
|
|
if (map_ops[i].flags & GNTMAP_contains_pte) {
|
|
@@ -746,17 +747,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
|
|
unsigned long pfn = page_to_pfn(pages[i]);
|
|
|
|
- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
|
|
+ if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
|
|
+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
|
|
+ else
|
|
ret = -EINVAL;
|
|
- goto out;
|
|
- }
|
|
-
|
|
- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
|
|
}
|
|
if (kunmap_ops)
|
|
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
|
|
- kunmap_ops, count);
|
|
-out:
|
|
+ kunmap_ops, count) ?: ret;
|
|
+
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
|
|
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
|
|
index 04ae2474e3344..a703f365b5b19 100644
|
|
--- a/drivers/block/xen-blkback/blkback.c
|
|
+++ b/drivers/block/xen-blkback/blkback.c
|
|
@@ -843,8 +843,11 @@ again:
|
|
pages[i]->page = persistent_gnt->page;
|
|
pages[i]->persistent_gnt = persistent_gnt;
|
|
} else {
|
|
- if (get_free_page(ring, &pages[i]->page))
|
|
- goto out_of_memory;
|
|
+ if (get_free_page(ring, &pages[i]->page)) {
|
|
+ put_free_pages(ring, pages_to_gnt, segs_to_map);
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
addr = vaddr(pages[i]->page);
|
|
pages_to_gnt[segs_to_map] = pages[i]->page;
|
|
pages[i]->persistent_gnt = NULL;
|
|
@@ -860,10 +863,8 @@ again:
|
|
break;
|
|
}
|
|
|
|
- if (segs_to_map) {
|
|
+ if (segs_to_map)
|
|
ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
|
|
- BUG_ON(ret);
|
|
- }
|
|
|
|
/*
|
|
* Now swizzle the MFN in our domain with the MFN from the other domain
|
|
@@ -878,7 +879,7 @@ again:
|
|
pr_debug("invalid buffer -- could not remap it\n");
|
|
put_free_pages(ring, &pages[seg_idx]->page, 1);
|
|
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
|
|
- ret |= 1;
|
|
+ ret |= !ret;
|
|
goto next;
|
|
}
|
|
pages[seg_idx]->handle = map[new_map_idx].handle;
|
|
@@ -930,17 +931,18 @@ next:
|
|
}
|
|
segs_to_map = 0;
|
|
last_map = map_until;
|
|
- if (map_until != num)
|
|
+ if (!ret && map_until != num)
|
|
goto again;
|
|
|
|
- return ret;
|
|
-
|
|
-out_of_memory:
|
|
- pr_alert("%s: out of memory\n", __func__);
|
|
- put_free_pages(ring, pages_to_gnt, segs_to_map);
|
|
- for (i = last_map; i < num; i++)
|
|
+out:
|
|
+ for (i = last_map; i < num; i++) {
|
|
+ /* Don't zap current batch's valid persistent grants. */
|
|
+ if(i >= last_map + segs_to_map)
|
|
+ pages[i]->persistent_gnt = NULL;
|
|
pages[i]->handle = BLKBACK_INVALID_HANDLE;
|
|
- return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int xen_blkbk_map_seg(struct pending_req *pending_req)
|
|
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
|
|
index 14f60751729e7..9768921a164c0 100644
|
|
--- a/drivers/i2c/busses/i2c-stm32f7.c
|
|
+++ b/drivers/i2c/busses/i2c-stm32f7.c
|
|
@@ -42,6 +42,8 @@
|
|
|
|
/* STM32F7 I2C control 1 */
|
|
#define STM32F7_I2C_CR1_ANFOFF BIT(12)
|
|
+#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
|
|
+#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
|
|
#define STM32F7_I2C_CR1_ERRIE BIT(7)
|
|
#define STM32F7_I2C_CR1_TCIE BIT(6)
|
|
#define STM32F7_I2C_CR1_STOPIE BIT(5)
|
|
@@ -95,7 +97,7 @@
|
|
#define STM32F7_I2C_MAX_LEN 0xff
|
|
|
|
#define STM32F7_I2C_DNF_DEFAULT 0
|
|
-#define STM32F7_I2C_DNF_MAX 16
|
|
+#define STM32F7_I2C_DNF_MAX 15
|
|
|
|
#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
|
|
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
|
|
@@ -543,6 +545,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
|
|
else
|
|
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
|
STM32F7_I2C_CR1_ANFOFF);
|
|
+
|
|
+ /* Program the Digital Filter */
|
|
+ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
|
+ STM32F7_I2C_CR1_DNF_MASK);
|
|
+ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
|
+ STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
|
|
+
|
|
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
|
STM32F7_I2C_CR1_PE);
|
|
}
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
|
|
index 71a01df96f8b0..6db51abb8f4a3 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
|
|
@@ -518,7 +518,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
|
|
const size_t bufsz = sizeof(buf);
|
|
int pos = 0;
|
|
|
|
+ mutex_lock(&mvm->mutex);
|
|
iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
|
|
+ mutex_unlock(&mvm->mutex);
|
|
+
|
|
do_div(curr_os, NSEC_PER_USEC);
|
|
diff = curr_os - curr_gp2;
|
|
pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
index 54f411b83beae..dc0bc57767390 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
@@ -1169,6 +1169,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
|
|
reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
|
|
if (device_reprobe(reprobe->dev))
|
|
dev_err(reprobe->dev, "reprobe failed!\n");
|
|
+ put_device(reprobe->dev);
|
|
kfree(reprobe);
|
|
module_put(THIS_MODULE);
|
|
}
|
|
@@ -1219,7 +1220,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
|
module_put(THIS_MODULE);
|
|
return;
|
|
}
|
|
- reprobe->dev = mvm->trans->dev;
|
|
+ reprobe->dev = get_device(mvm->trans->dev);
|
|
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
|
|
schedule_work(&reprobe->work);
|
|
} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
|
|
index c3a2e6b6da65b..e1fb0258c9168 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
|
|
@@ -622,6 +622,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
|
|
|
+ if (!txq) {
|
|
+ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
spin_lock_bh(&txq->lock);
|
|
while (txq->write_ptr != txq->read_ptr) {
|
|
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
|
|
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
|
|
index b8100298017b9..fcaf4dd9d9c4c 100644
|
|
--- a/drivers/net/xen-netback/netback.c
|
|
+++ b/drivers/net/xen-netback/netback.c
|
|
@@ -1328,13 +1328,11 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
|
|
return 0;
|
|
|
|
gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
|
|
- if (nr_mops != 0) {
|
|
+ if (nr_mops != 0)
|
|
ret = gnttab_map_refs(queue->tx_map_ops,
|
|
NULL,
|
|
queue->pages_to_map,
|
|
nr_mops);
|
|
- BUG_ON(ret);
|
|
- }
|
|
|
|
work_done = xenvif_tx_submit(queue);
|
|
|
|
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
|
|
index f152246c7dfb7..ddfb1cfa2dd94 100644
|
|
--- a/drivers/net/xen-netback/rx.c
|
|
+++ b/drivers/net/xen-netback/rx.c
|
|
@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
|
RING_IDX prod, cons;
|
|
struct sk_buff *skb;
|
|
int needed;
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
|
|
|
skb = skb_peek(&queue->rx_queue);
|
|
- if (!skb)
|
|
+ if (!skb) {
|
|
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
|
return false;
|
|
+ }
|
|
|
|
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
|
|
if (skb_is_gso(skb))
|
|
@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
|
if (skb->sw_hash)
|
|
needed++;
|
|
|
|
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
|
+
|
|
do {
|
|
prod = queue->rx.sring->req_prod;
|
|
cons = queue->rx.req_cons;
|
|
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
|
|
index 952544ca0d84d..93fadd4abf14d 100644
|
|
--- a/drivers/platform/x86/hp-wmi.c
|
|
+++ b/drivers/platform/x86/hp-wmi.c
|
|
@@ -45,6 +45,10 @@ MODULE_LICENSE("GPL");
|
|
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
|
|
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
|
|
|
|
+static int enable_tablet_mode_sw = -1;
|
|
+module_param(enable_tablet_mode_sw, int, 0444);
|
|
+MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
|
|
+
|
|
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
|
|
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
|
|
|
|
@@ -656,10 +660,12 @@ static int __init hp_wmi_input_setup(void)
|
|
}
|
|
|
|
/* Tablet mode */
|
|
- val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
|
|
- if (!(val < 0)) {
|
|
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
|
|
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
|
|
+ if (enable_tablet_mode_sw > 0) {
|
|
+ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
|
|
+ if (val >= 0) {
|
|
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
|
|
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
|
|
+ }
|
|
}
|
|
|
|
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
|
|
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
|
|
index 81ec9b6805fcd..965f85a49ba0b 100644
|
|
--- a/drivers/remoteproc/qcom_q6v5_pil.c
|
|
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
|
|
@@ -293,6 +293,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
|
|
{
|
|
struct q6v5 *qproc = rproc->priv;
|
|
|
|
+ /* MBA is restricted to a maximum size of 1M */
|
|
+ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
|
|
+ dev_err(qproc->dev, "MBA firmware load failed\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
memcpy(qproc->mba_region, fw->data, fw->size);
|
|
|
|
return 0;
|
|
@@ -560,14 +566,13 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
|
|
|
|
if (phdr->p_filesz) {
|
|
snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
|
|
- ret = request_firmware(&seg_fw, seg_name, qproc->dev);
|
|
+ ret = request_firmware_into_buf(&seg_fw, seg_name, qproc->dev,
|
|
+ ptr, phdr->p_filesz);
|
|
if (ret) {
|
|
dev_err(qproc->dev, "failed to load %s\n", seg_name);
|
|
goto release_firmware;
|
|
}
|
|
|
|
- memcpy(ptr, seg_fw->data, seg_fw->size);
|
|
-
|
|
release_firmware(seg_fw);
|
|
}
|
|
|
|
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
|
|
index 733e8dcccf5c3..0b50871957a6d 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
|
|
@@ -897,7 +897,8 @@ qla27xx_template_checksum(void *p, ulong size)
|
|
static inline int
|
|
qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
|
|
{
|
|
- return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
|
|
+ return qla27xx_template_checksum(tmp,
|
|
+ le32_to_cpu(tmp->template_size)) == 0;
|
|
}
|
|
|
|
static inline int
|
|
@@ -913,7 +914,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
|
|
ulong len;
|
|
|
|
if (qla27xx_fwdt_template_valid(tmp)) {
|
|
- len = tmp->template_size;
|
|
+ len = le32_to_cpu(tmp->template_size);
|
|
tmp = memcpy(vha->hw->fw_dump, tmp, len);
|
|
ql27xx_edit_template(vha, tmp);
|
|
qla27xx_walk_template(vha, tmp, tmp, &len);
|
|
@@ -929,7 +930,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
|
|
ulong len = 0;
|
|
|
|
if (qla27xx_fwdt_template_valid(tmp)) {
|
|
- len = tmp->template_size;
|
|
+ len = le32_to_cpu(tmp->template_size);
|
|
qla27xx_walk_template(vha, tmp, NULL, &len);
|
|
}
|
|
|
|
@@ -941,7 +942,7 @@ qla27xx_fwdt_template_size(void *p)
|
|
{
|
|
struct qla27xx_fwdt_template *tmp = p;
|
|
|
|
- return tmp->template_size;
|
|
+ return le32_to_cpu(tmp->template_size);
|
|
}
|
|
|
|
ulong
|
|
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
|
|
index 141c1c5e73f42..2d3e1a8349b3b 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
|
|
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
|
|
@@ -13,7 +13,7 @@
|
|
struct __packed qla27xx_fwdt_template {
|
|
uint32_t template_type;
|
|
uint32_t entry_offset;
|
|
- uint32_t template_size;
|
|
+ __le32 template_size;
|
|
uint32_t reserved_1;
|
|
|
|
uint32_t entry_count;
|
|
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
|
|
index d3b68e97096e7..bc2dd9499ea03 100644
|
|
--- a/drivers/usb/dwc3/ulpi.c
|
|
+++ b/drivers/usb/dwc3/ulpi.c
|
|
@@ -10,6 +10,8 @@
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
+#include <linux/delay.h>
|
|
+#include <linux/time64.h>
|
|
#include <linux/ulpi/regs.h>
|
|
|
|
#include "core.h"
|
|
@@ -20,12 +22,22 @@
|
|
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
|
|
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
|
|
|
|
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
|
|
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
|
|
+
|
|
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
|
|
{
|
|
- unsigned count = 1000;
|
|
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
|
|
+ unsigned int count = 1000;
|
|
u32 reg;
|
|
|
|
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
|
|
+ ns += DWC3_ULPI_BASE_DELAY;
|
|
+
|
|
+ if (read)
|
|
+ ns += DWC3_ULPI_BASE_DELAY;
|
|
+
|
|
while (count--) {
|
|
+ ndelay(ns);
|
|
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
|
|
if (reg & DWC3_GUSB2PHYACC_DONE)
|
|
return 0;
|
|
@@ -50,7 +62,7 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
|
|
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
|
|
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
|
|
|
|
- ret = dwc3_ulpi_busyloop(dwc);
|
|
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -74,7 +86,7 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
|
|
reg |= DWC3_GUSB2PHYACC_WRITE | val;
|
|
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
|
|
|
|
- return dwc3_ulpi_busyloop(dwc);
|
|
+ return dwc3_ulpi_busyloop(dwc, addr, false);
|
|
}
|
|
|
|
static const struct ulpi_ops dwc3_ulpi_ops = {
|
|
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
|
|
index 716edd593a994..989682cc86868 100644
|
|
--- a/drivers/usb/gadget/function/u_ether.c
|
|
+++ b/drivers/usb/gadget/function/u_ether.c
|
|
@@ -49,9 +49,10 @@
|
|
#define UETH__VERSION "29-May-2008"
|
|
|
|
/* Experiments show that both Linux and Windows hosts allow up to 16k
|
|
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
|
|
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
|
|
* blocks and still have efficient handling. */
|
|
-#define GETHER_MAX_ETH_FRAME_LEN 15412
|
|
+#define GETHER_MAX_MTU_SIZE 15412
|
|
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
|
|
|
|
struct eth_dev {
|
|
/* lock is held while accessing port_usb
|
|
@@ -790,7 +791,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
|
|
|
|
/* MTU range: 14 - 15412 */
|
|
net->min_mtu = ETH_HLEN;
|
|
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
|
|
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
|
|
|
|
dev->gadget = g;
|
|
SET_NETDEV_DEV(net, &g->dev);
|
|
@@ -850,6 +851,10 @@ struct net_device *gether_setup_name_default(const char *netname)
|
|
net->ethtool_ops = &ops;
|
|
SET_NETDEV_DEVTYPE(net, &gadget_type);
|
|
|
|
+ /* MTU range: 14 - 15412 */
|
|
+ net->min_mtu = ETH_HLEN;
|
|
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
|
|
+
|
|
return net;
|
|
}
|
|
EXPORT_SYMBOL_GPL(gether_setup_name_default);
|
|
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
|
|
index bd56653b9bbc2..7b4ac5505f532 100644
|
|
--- a/drivers/xen/gntdev.c
|
|
+++ b/drivers/xen/gntdev.c
|
|
@@ -295,36 +295,47 @@ static int map_grant_pages(struct grant_map *map)
|
|
* to the kernel linear addresses of the struct pages.
|
|
* These ptes are completely different from the user ptes dealt
|
|
* with find_grant_ptes.
|
|
+ * Note that GNTMAP_device_map isn't needed here: The
|
|
+ * dev_bus_addr output field gets consumed only from ->map_ops,
|
|
+ * and by not requesting it when mapping we also avoid needing
|
|
+ * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
|
|
+ * reference to the page in the hypervisor).
|
|
*/
|
|
+ unsigned int flags = (map->flags & ~GNTMAP_device_map) |
|
|
+ GNTMAP_host_map;
|
|
+
|
|
for (i = 0; i < map->count; i++) {
|
|
unsigned long address = (unsigned long)
|
|
pfn_to_kaddr(page_to_pfn(map->pages[i]));
|
|
BUG_ON(PageHighMem(map->pages[i]));
|
|
|
|
- gnttab_set_map_op(&map->kmap_ops[i], address,
|
|
- map->flags | GNTMAP_host_map,
|
|
+ gnttab_set_map_op(&map->kmap_ops[i], address, flags,
|
|
map->grants[i].ref,
|
|
map->grants[i].domid);
|
|
gnttab_set_unmap_op(&map->kunmap_ops[i], address,
|
|
- map->flags | GNTMAP_host_map, -1);
|
|
+ flags, -1);
|
|
}
|
|
}
|
|
|
|
pr_debug("map %d+%d\n", map->index, map->count);
|
|
err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
|
|
map->pages, map->count);
|
|
- if (err)
|
|
- return err;
|
|
|
|
for (i = 0; i < map->count; i++) {
|
|
- if (map->map_ops[i].status) {
|
|
+ if (map->map_ops[i].status == GNTST_okay)
|
|
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
|
|
+ else if (!err)
|
|
err = -EINVAL;
|
|
- continue;
|
|
- }
|
|
|
|
- map->unmap_ops[i].handle = map->map_ops[i].handle;
|
|
- if (use_ptemod)
|
|
- map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
|
|
+ if (map->flags & GNTMAP_device_map)
|
|
+ map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
|
|
+
|
|
+ if (use_ptemod) {
|
|
+ if (map->kmap_ops[i].status == GNTST_okay)
|
|
+ map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
|
|
+ else if (!err)
|
|
+ err = -EINVAL;
|
|
+ }
|
|
}
|
|
return err;
|
|
}
|
|
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
|
|
index fd32c3459b668..6d5eaea3373ba 100644
|
|
--- a/drivers/xen/xen-scsiback.c
|
|
+++ b/drivers/xen/xen-scsiback.c
|
|
@@ -422,12 +422,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
|
|
return 0;
|
|
|
|
err = gnttab_map_refs(map, NULL, pg, cnt);
|
|
- BUG_ON(err);
|
|
for (i = 0; i < cnt; i++) {
|
|
if (unlikely(map[i].status != GNTST_okay)) {
|
|
pr_err("invalid buffer -- could not remap it\n");
|
|
map[i].handle = SCSIBACK_INVALID_HANDLE;
|
|
- err = -ENOMEM;
|
|
+ if (!err)
|
|
+ err = -ENOMEM;
|
|
} else {
|
|
get_page(pg[i]);
|
|
}
|
|
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
|
|
index e6a8d02d35254..139539b0ab20d 100644
|
|
--- a/drivers/xen/xenbus/xenbus.h
|
|
+++ b/drivers/xen/xenbus/xenbus.h
|
|
@@ -114,7 +114,6 @@ int xenbus_probe_node(struct xen_bus_type *bus,
|
|
const char *type,
|
|
const char *nodename);
|
|
int xenbus_probe_devices(struct xen_bus_type *bus);
|
|
-void xenbus_probe(void);
|
|
|
|
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
|
|
|
|
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
|
|
index 9cac938361a01..08f1ccdbe343f 100644
|
|
--- a/drivers/xen/xenbus/xenbus_probe.c
|
|
+++ b/drivers/xen/xenbus/xenbus_probe.c
|
|
@@ -674,7 +674,7 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
|
|
}
|
|
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
|
|
|
|
-void xenbus_probe(void)
|
|
+static void xenbus_probe(void)
|
|
{
|
|
xenstored_ready = 1;
|
|
|
|
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
|
|
index 384f95e1936dd..fde277be26420 100644
|
|
--- a/fs/fs-writeback.c
|
|
+++ b/fs/fs-writeback.c
|
|
@@ -1965,7 +1965,7 @@ void wb_workfn(struct work_struct *work)
|
|
struct bdi_writeback, dwork);
|
|
long pages_written;
|
|
|
|
- set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
|
|
+ set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
|
|
current->flags |= PF_SWAPWRITE;
|
|
|
|
if (likely(!current_is_workqueue_rescuer() ||
|
|
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
|
|
index 8e2e3d3b7b253..0737f193fc532 100644
|
|
--- a/fs/nfs/pnfs.c
|
|
+++ b/fs/nfs/pnfs.c
|
|
@@ -1973,7 +1973,13 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
|
|
* We got an entirely new state ID. Mark all segments for the
|
|
* inode invalid, and retry the layoutget
|
|
*/
|
|
- pnfs_mark_layout_stateid_invalid(lo, &free_me);
|
|
+ struct pnfs_layout_range range = {
|
|
+ .iomode = IOMODE_ANY,
|
|
+ .length = NFS4_MAX_UINT64,
|
|
+ };
|
|
+ pnfs_set_plh_return_info(lo, IOMODE_ANY, 0);
|
|
+ pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
|
|
+ &range, 0);
|
|
goto out_forget;
|
|
}
|
|
|
|
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
|
|
index b97fc1df62128..f3ed80e2966c3 100644
|
|
--- a/fs/overlayfs/copy_up.c
|
|
+++ b/fs/overlayfs/copy_up.c
|
|
@@ -95,6 +95,14 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
|
|
|
|
if (ovl_is_private_xattr(name))
|
|
continue;
|
|
+
|
|
+ error = security_inode_copy_up_xattr(name);
|
|
+ if (error < 0 && error != -EOPNOTSUPP)
|
|
+ break;
|
|
+ if (error == 1) {
|
|
+ error = 0;
|
|
+ continue; /* Discard */
|
|
+ }
|
|
retry:
|
|
size = vfs_getxattr(old, name, value, value_size);
|
|
if (size == -ERANGE)
|
|
@@ -118,13 +126,6 @@ retry:
|
|
goto retry;
|
|
}
|
|
|
|
- error = security_inode_copy_up_xattr(name);
|
|
- if (error < 0 && error != -EOPNOTSUPP)
|
|
- break;
|
|
- if (error == 1) {
|
|
- error = 0;
|
|
- continue; /* Discard */
|
|
- }
|
|
error = vfs_setxattr(new, name, value, size, 0);
|
|
if (error)
|
|
break;
|
|
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
|
|
index 30a1c7fc8c75c..ac6efac119fb9 100644
|
|
--- a/fs/overlayfs/inode.c
|
|
+++ b/fs/overlayfs/inode.c
|
|
@@ -216,7 +216,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
|
|
goto out;
|
|
|
|
if (!value && !upperdentry) {
|
|
+ old_cred = ovl_override_creds(dentry->d_sb);
|
|
err = vfs_getxattr(realdentry, name, NULL, 0);
|
|
+ revert_creds(old_cred);
|
|
if (err < 0)
|
|
goto out_drop_write;
|
|
}
|
|
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
|
|
index 8073b6532cf04..d2a806416c3ab 100644
|
|
--- a/fs/squashfs/export.c
|
|
+++ b/fs/squashfs/export.c
|
|
@@ -54,12 +54,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
|
|
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
|
int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
|
|
int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
|
|
- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
|
|
+ u64 start;
|
|
__le64 ino;
|
|
int err;
|
|
|
|
TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
|
|
|
|
+ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
|
|
+ return -EINVAL;
|
|
+
|
|
+ start = le64_to_cpu(msblk->inode_lookup_table[blk]);
|
|
+
|
|
err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
|
|
if (err < 0)
|
|
return err;
|
|
@@ -124,7 +129,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
|
|
u64 lookup_table_start, u64 next_table, unsigned int inodes)
|
|
{
|
|
unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
|
|
+ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
|
|
+ int n;
|
|
__le64 *table;
|
|
+ u64 start, end;
|
|
|
|
TRACE("In read_inode_lookup_table, length %d\n", length);
|
|
|
|
@@ -134,20 +142,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
|
|
if (inodes == 0)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- /* length bytes should not extend into the next table - this check
|
|
- * also traps instances where lookup_table_start is incorrectly larger
|
|
- * than the next table start
|
|
+ /*
|
|
+ * The computed size of the lookup table (length bytes) should exactly
|
|
+ * match the table start and end points
|
|
*/
|
|
- if (lookup_table_start + length > next_table)
|
|
+ if (length != (next_table - lookup_table_start))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
table = squashfs_read_table(sb, lookup_table_start, length);
|
|
+ if (IS_ERR(table))
|
|
+ return table;
|
|
|
|
/*
|
|
- * table[0] points to the first inode lookup table metadata block,
|
|
- * this should be less than lookup_table_start
|
|
+ * table0], table[1], ... table[indexes - 1] store the locations
|
|
+ * of the compressed inode lookup blocks. Each entry should be
|
|
+ * less than the next (i.e. table[0] < table[1]), and the difference
|
|
+ * between them should be SQUASHFS_METADATA_SIZE or less.
|
|
+ * table[indexes - 1] should be less than lookup_table_start, and
|
|
+ * again the difference should be SQUASHFS_METADATA_SIZE or less
|
|
*/
|
|
- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
|
|
+ for (n = 0; n < (indexes - 1); n++) {
|
|
+ start = le64_to_cpu(table[n]);
|
|
+ end = le64_to_cpu(table[n + 1]);
|
|
+
|
|
+ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
|
|
+ kfree(table);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ start = le64_to_cpu(table[indexes - 1]);
|
|
+ if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
|
|
kfree(table);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
|
|
index d38ea3dab9515..8ccc0e3f6ea5a 100644
|
|
--- a/fs/squashfs/id.c
|
|
+++ b/fs/squashfs/id.c
|
|
@@ -48,10 +48,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
|
|
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
|
int block = SQUASHFS_ID_BLOCK(index);
|
|
int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
|
|
- u64 start_block = le64_to_cpu(msblk->id_table[block]);
|
|
+ u64 start_block;
|
|
__le32 disk_id;
|
|
int err;
|
|
|
|
+ if (index >= msblk->ids)
|
|
+ return -EINVAL;
|
|
+
|
|
+ start_block = le64_to_cpu(msblk->id_table[block]);
|
|
+
|
|
err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
|
|
sizeof(disk_id));
|
|
if (err < 0)
|
|
@@ -69,7 +74,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
|
|
u64 id_table_start, u64 next_table, unsigned short no_ids)
|
|
{
|
|
unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
|
|
+ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
|
|
+ int n;
|
|
__le64 *table;
|
|
+ u64 start, end;
|
|
|
|
TRACE("In read_id_index_table, length %d\n", length);
|
|
|
|
@@ -80,20 +88,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
/*
|
|
- * length bytes should not extend into the next table - this check
|
|
- * also traps instances where id_table_start is incorrectly larger
|
|
- * than the next table start
|
|
+ * The computed size of the index table (length bytes) should exactly
|
|
+ * match the table start and end points
|
|
*/
|
|
- if (id_table_start + length > next_table)
|
|
+ if (length != (next_table - id_table_start))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
table = squashfs_read_table(sb, id_table_start, length);
|
|
+ if (IS_ERR(table))
|
|
+ return table;
|
|
|
|
/*
|
|
- * table[0] points to the first id lookup table metadata block, this
|
|
- * should be less than id_table_start
|
|
+ * table[0], table[1], ... table[indexes - 1] store the locations
|
|
+ * of the compressed id blocks. Each entry should be less than
|
|
+ * the next (i.e. table[0] < table[1]), and the difference between them
|
|
+ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
|
|
+ * should be less than id_table_start, and again the difference
|
|
+ * should be SQUASHFS_METADATA_SIZE or less
|
|
*/
|
|
- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
|
|
+ for (n = 0; n < (indexes - 1); n++) {
|
|
+ start = le64_to_cpu(table[n]);
|
|
+ end = le64_to_cpu(table[n + 1]);
|
|
+
|
|
+ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
|
|
+ kfree(table);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ start = le64_to_cpu(table[indexes - 1]);
|
|
+ if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
|
|
kfree(table);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
|
|
index ef69c31947bf8..5234c19a0eabc 100644
|
|
--- a/fs/squashfs/squashfs_fs_sb.h
|
|
+++ b/fs/squashfs/squashfs_fs_sb.h
|
|
@@ -77,5 +77,6 @@ struct squashfs_sb_info {
|
|
unsigned int inodes;
|
|
unsigned int fragments;
|
|
int xattr_ids;
|
|
+ unsigned int ids;
|
|
};
|
|
#endif
|
|
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
|
|
index 1516bb779b8d4..5abc9d03397c1 100644
|
|
--- a/fs/squashfs/super.c
|
|
+++ b/fs/squashfs/super.c
|
|
@@ -176,6 +176,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
|
|
msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
|
|
msblk->inodes = le32_to_cpu(sblk->inodes);
|
|
msblk->fragments = le32_to_cpu(sblk->fragments);
|
|
+ msblk->ids = le16_to_cpu(sblk->no_ids);
|
|
flags = le16_to_cpu(sblk->flags);
|
|
|
|
TRACE("Found valid superblock on %pg\n", sb->s_bdev);
|
|
@@ -187,7 +188,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
|
|
TRACE("Block size %d\n", msblk->block_size);
|
|
TRACE("Number of inodes %d\n", msblk->inodes);
|
|
TRACE("Number of fragments %d\n", msblk->fragments);
|
|
- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
|
|
+ TRACE("Number of ids %d\n", msblk->ids);
|
|
TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
|
|
TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
|
|
TRACE("sblk->fragment_table_start %llx\n",
|
|
@@ -244,8 +245,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
|
|
allocate_id_index_table:
|
|
/* Allocate and read id index table */
|
|
msblk->id_table = squashfs_read_id_index_table(sb,
|
|
- le64_to_cpu(sblk->id_table_start), next_table,
|
|
- le16_to_cpu(sblk->no_ids));
|
|
+ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
|
|
if (IS_ERR(msblk->id_table)) {
|
|
ERROR("unable to read id index table\n");
|
|
err = PTR_ERR(msblk->id_table);
|
|
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
|
|
index afe70f815e3de..86b0a0073e51f 100644
|
|
--- a/fs/squashfs/xattr.h
|
|
+++ b/fs/squashfs/xattr.h
|
|
@@ -30,8 +30,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
|
|
static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
|
|
u64 start, u64 *xattr_table_start, int *xattr_ids)
|
|
{
|
|
+ struct squashfs_xattr_id_table *id_table;
|
|
+
|
|
+ id_table = squashfs_read_table(sb, start, sizeof(*id_table));
|
|
+ if (IS_ERR(id_table))
|
|
+ return (__le64 *) id_table;
|
|
+
|
|
+ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
|
|
+ kfree(id_table);
|
|
+
|
|
ERROR("Xattrs in filesystem, these will be ignored\n");
|
|
- *xattr_table_start = start;
|
|
return ERR_PTR(-ENOTSUPP);
|
|
}
|
|
|
|
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
|
|
index c89607d690c48..3a655d879600c 100644
|
|
--- a/fs/squashfs/xattr_id.c
|
|
+++ b/fs/squashfs/xattr_id.c
|
|
@@ -44,10 +44,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
|
|
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
|
int block = SQUASHFS_XATTR_BLOCK(index);
|
|
int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
|
|
- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
|
|
+ u64 start_block;
|
|
struct squashfs_xattr_id id;
|
|
int err;
|
|
|
|
+ if (index >= msblk->xattr_ids)
|
|
+ return -EINVAL;
|
|
+
|
|
+ start_block = le64_to_cpu(msblk->xattr_id_table[block]);
|
|
+
|
|
err = squashfs_read_metadata(sb, &id, &start_block, &offset,
|
|
sizeof(id));
|
|
if (err < 0)
|
|
@@ -63,13 +68,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
|
|
/*
|
|
* Read uncompressed xattr id lookup table indexes from disk into memory
|
|
*/
|
|
-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
|
|
+__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
|
|
u64 *xattr_table_start, int *xattr_ids)
|
|
{
|
|
- unsigned int len;
|
|
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
|
|
+ unsigned int len, indexes;
|
|
struct squashfs_xattr_id_table *id_table;
|
|
+ __le64 *table;
|
|
+ u64 start, end;
|
|
+ int n;
|
|
|
|
- id_table = squashfs_read_table(sb, start, sizeof(*id_table));
|
|
+ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
|
|
if (IS_ERR(id_table))
|
|
return (__le64 *) id_table;
|
|
|
|
@@ -83,13 +92,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
|
|
if (*xattr_ids == 0)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- /* xattr_table should be less than start */
|
|
- if (*xattr_table_start >= start)
|
|
+ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
|
|
+ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
|
|
+
|
|
+ /*
|
|
+ * The computed size of the index table (len bytes) should exactly
|
|
+ * match the table start and end points
|
|
+ */
|
|
+ start = table_start + sizeof(*id_table);
|
|
+ end = msblk->bytes_used;
|
|
+
|
|
+ if (len != (end - start))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
|
|
+ table = squashfs_read_table(sb, start, len);
|
|
+ if (IS_ERR(table))
|
|
+ return table;
|
|
+
|
|
+ /* table[0], table[1], ... table[indexes - 1] store the locations
|
|
+ * of the compressed xattr id blocks. Each entry should be less than
|
|
+ * the next (i.e. table[0] < table[1]), and the difference between them
|
|
+ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
|
|
+ * should be less than table_start, and again the difference
|
|
+ * shouls be SQUASHFS_METADATA_SIZE or less.
|
|
+ *
|
|
+ * Finally xattr_table_start should be less than table[0].
|
|
+ */
|
|
+ for (n = 0; n < (indexes - 1); n++) {
|
|
+ start = le64_to_cpu(table[n]);
|
|
+ end = le64_to_cpu(table[n + 1]);
|
|
+
|
|
+ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
|
|
+ kfree(table);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ start = le64_to_cpu(table[indexes - 1]);
|
|
+ if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
|
|
+ kfree(table);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
|
|
- TRACE("In read_xattr_index_table, length %d\n", len);
|
|
+ if (*xattr_table_start >= le64_to_cpu(table[0])) {
|
|
+ kfree(table);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
|
|
- return squashfs_read_table(sb, start + sizeof(*id_table), len);
|
|
+ return table;
|
|
}
|
|
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
|
|
index 012adec975433..c947b29380547 100644
|
|
--- a/include/linux/backing-dev.h
|
|
+++ b/include/linux/backing-dev.h
|
|
@@ -13,6 +13,7 @@
|
|
#include <linux/fs.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/blkdev.h>
|
|
+#include <linux/device.h>
|
|
#include <linux/writeback.h>
|
|
#include <linux/blk-cgroup.h>
|
|
#include <linux/backing-dev-defs.h>
|
|
@@ -493,4 +494,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
|
|
(1 << WB_async_congested));
|
|
}
|
|
|
|
+extern const char *bdi_unknown_name;
|
|
+
|
|
+static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
|
|
+{
|
|
+ if (!bdi || !bdi->dev)
|
|
+ return bdi_unknown_name;
|
|
+ return dev_name(bdi->dev);
|
|
+}
|
|
+
|
|
#endif /* _LINUX_BACKING_DEV_H */
|
|
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
|
|
index e54d257983f28..2c9f2ddd62f92 100644
|
|
--- a/include/linux/ftrace.h
|
|
+++ b/include/linux/ftrace.h
|
|
@@ -792,7 +792,9 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
/* for init task */
|
|
-#define INIT_FTRACE_GRAPH .ret_stack = NULL,
|
|
+#define INIT_FTRACE_GRAPH \
|
|
+ .ret_stack = NULL, \
|
|
+ .tracing_graph_pause = ATOMIC_INIT(0),
|
|
|
|
/*
|
|
* Stack of return addresses for functions
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index 3512c337a4a6b..80579577a7005 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -3674,6 +3674,7 @@ static inline void netif_tx_disable(struct net_device *dev)
|
|
|
|
local_bh_disable();
|
|
cpu = smp_processor_id();
|
|
+ spin_lock(&dev->tx_global_lock);
|
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
|
|
|
@@ -3681,6 +3682,7 @@ static inline void netif_tx_disable(struct net_device *dev)
|
|
netif_tx_stop_queue(txq);
|
|
__netif_tx_unlock(txq);
|
|
}
|
|
+ spin_unlock(&dev->tx_global_lock);
|
|
local_bh_enable();
|
|
}
|
|
|
|
diff --git a/include/linux/string.h b/include/linux/string.h
|
|
index 315fef3aff4e6..3b5d01e80962a 100644
|
|
--- a/include/linux/string.h
|
|
+++ b/include/linux/string.h
|
|
@@ -30,6 +30,10 @@ size_t strlcpy(char *, const char *, size_t);
|
|
#ifndef __HAVE_ARCH_STRSCPY
|
|
ssize_t strscpy(char *, const char *, size_t);
|
|
#endif
|
|
+
|
|
+/* Wraps calls to strscpy()/memset(), no arch specific code required */
|
|
+ssize_t strscpy_pad(char *dest, const char *src, size_t count);
|
|
+
|
|
#ifndef __HAVE_ARCH_STRCAT
|
|
extern char * strcat(char *, const char *);
|
|
#endif
|
|
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
|
|
index d950223c64b1c..819f63e0edc15 100644
|
|
--- a/include/linux/sunrpc/xdr.h
|
|
+++ b/include/linux/sunrpc/xdr.h
|
|
@@ -26,8 +26,7 @@ struct rpc_rqst;
|
|
#define XDR_QUADLEN(l) (((l) + 3) >> 2)
|
|
|
|
/*
|
|
- * Generic opaque `network object.' At the kernel level, this type
|
|
- * is used only by lockd.
|
|
+ * Generic opaque `network object.'
|
|
*/
|
|
#define XDR_MAX_NETOBJ 1024
|
|
struct xdr_netobj {
|
|
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
|
|
index 627f5759b67d1..cb2a5016247af 100644
|
|
--- a/include/trace/events/writeback.h
|
|
+++ b/include/trace/events/writeback.h
|
|
@@ -65,8 +65,9 @@ TRACE_EVENT(writeback_dirty_page,
|
|
),
|
|
|
|
TP_fast_assign(
|
|
- strncpy(__entry->name,
|
|
- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
|
|
+ strscpy_pad(__entry->name,
|
|
+ bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
|
|
+ NULL), 32);
|
|
__entry->ino = mapping ? mapping->host->i_ino : 0;
|
|
__entry->index = page->index;
|
|
),
|
|
@@ -95,8 +96,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
|
|
struct backing_dev_info *bdi = inode_to_bdi(inode);
|
|
|
|
/* may be called for files on pseudo FSes w/ unregistered bdi */
|
|
- strncpy(__entry->name,
|
|
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
|
|
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
|
|
__entry->ino = inode->i_ino;
|
|
__entry->state = inode->i_state;
|
|
__entry->flags = flags;
|
|
@@ -175,8 +175,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
|
|
),
|
|
|
|
TP_fast_assign(
|
|
- strncpy(__entry->name,
|
|
- dev_name(inode_to_bdi(inode)->dev), 32);
|
|
+ strscpy_pad(__entry->name,
|
|
+ bdi_dev_name(inode_to_bdi(inode)), 32);
|
|
__entry->ino = inode->i_ino;
|
|
__entry->sync_mode = wbc->sync_mode;
|
|
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
|
|
@@ -219,8 +219,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
|
|
__field(unsigned int, cgroup_ino)
|
|
),
|
|
TP_fast_assign(
|
|
- strncpy(__entry->name,
|
|
- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
|
|
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
|
|
__entry->nr_pages = work->nr_pages;
|
|
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
|
|
__entry->sync_mode = work->sync_mode;
|
|
@@ -273,7 +272,7 @@ DECLARE_EVENT_CLASS(writeback_class,
|
|
__field(unsigned int, cgroup_ino)
|
|
),
|
|
TP_fast_assign(
|
|
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
|
|
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
|
|
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
|
|
),
|
|
TP_printk("bdi %s: cgroup_ino=%u",
|
|
@@ -296,7 +295,7 @@ TRACE_EVENT(writeback_bdi_register,
|
|
__array(char, name, 32)
|
|
),
|
|
TP_fast_assign(
|
|
- strncpy(__entry->name, dev_name(bdi->dev), 32);
|
|
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
|
|
),
|
|
TP_printk("bdi %s",
|
|
__entry->name
|
|
@@ -321,7 +320,7 @@ DECLARE_EVENT_CLASS(wbc_class,
|
|
),
|
|
|
|
TP_fast_assign(
|
|
- strncpy(__entry->name, dev_name(bdi->dev), 32);
|
|
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
|
|
__entry->nr_to_write = wbc->nr_to_write;
|
|
__entry->pages_skipped = wbc->pages_skipped;
|
|
__entry->sync_mode = wbc->sync_mode;
|
|
@@ -372,7 +371,7 @@ TRACE_EVENT(writeback_queue_io,
|
|
__field(unsigned int, cgroup_ino)
|
|
),
|
|
TP_fast_assign(
|
|
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
|
|
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
|
|
__entry->older = dirtied_before;
|
|
__entry->age = (jiffies - dirtied_before) * 1000 / HZ;
|
|
__entry->moved = moved;
|
|
@@ -457,7 +456,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
|
|
),
|
|
|
|
TP_fast_assign(
|
|
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
|
|
+ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
|
|
__entry->write_bw = KBps(wb->write_bandwidth);
|
|
__entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
|
|
__entry->dirty_rate = KBps(dirty_rate);
|
|
@@ -522,7 +521,7 @@ TRACE_EVENT(balance_dirty_pages,
|
|
|
|
TP_fast_assign(
|
|
unsigned long freerun = (thresh + bg_thresh) / 2;
|
|
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
|
|
+ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
|
|
|
|
__entry->limit = global_wb_domain.dirty_limit;
|
|
__entry->setpoint = (global_wb_domain.dirty_limit +
|
|
@@ -582,8 +581,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
|
|
),
|
|
|
|
TP_fast_assign(
|
|
- strncpy(__entry->name,
|
|
- dev_name(inode_to_bdi(inode)->dev), 32);
|
|
+ strscpy_pad(__entry->name,
|
|
+ bdi_dev_name(inode_to_bdi(inode)), 32);
|
|
__entry->ino = inode->i_ino;
|
|
__entry->state = inode->i_state;
|
|
__entry->dirtied_when = inode->dirtied_when;
|
|
@@ -656,8 +655,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
|
|
),
|
|
|
|
TP_fast_assign(
|
|
- strncpy(__entry->name,
|
|
- dev_name(inode_to_bdi(inode)->dev), 32);
|
|
+ strscpy_pad(__entry->name,
|
|
+ bdi_dev_name(inode_to_bdi(inode)), 32);
|
|
__entry->ino = inode->i_ino;
|
|
__entry->state = inode->i_state;
|
|
__entry->dirtied_when = inode->dirtied_when;
|
|
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
|
|
index 34b1379f9777d..f9d8aac170fbc 100644
|
|
--- a/include/xen/grant_table.h
|
|
+++ b/include/xen/grant_table.h
|
|
@@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
|
|
map->flags = flags;
|
|
map->ref = ref;
|
|
map->dom = domid;
|
|
+ map->status = 1; /* arbitrary positive value */
|
|
}
|
|
|
|
static inline void
|
|
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
|
|
index fe9a9fa2ebc45..14d47ed4114fd 100644
|
|
--- a/include/xen/xenbus.h
|
|
+++ b/include/xen/xenbus.h
|
|
@@ -187,8 +187,6 @@ void xs_suspend_cancel(void);
|
|
|
|
struct work_struct;
|
|
|
|
-void xenbus_probe(void);
|
|
-
|
|
#define XENBUS_IS_ERR_READ(str) ({ \
|
|
if (!IS_ERR(str) && strlen(str) == 0) { \
|
|
kfree(str); \
|
|
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
|
|
index 135be433e9a0f..1d4c3fba0f8cd 100644
|
|
--- a/kernel/bpf/stackmap.c
|
|
+++ b/kernel/bpf/stackmap.c
|
|
@@ -71,6 +71,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
|
|
|
/* hash table size must be power of 2 */
|
|
n_buckets = roundup_pow_of_two(attr->max_entries);
|
|
+ if (!n_buckets)
|
|
+ return ERR_PTR(-E2BIG);
|
|
|
|
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
|
|
if (cost >= U32_MAX - PAGE_SIZE)
|
|
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
|
|
index 5b200b8797654..0373d050ff0c4 100644
|
|
--- a/kernel/trace/ftrace.c
|
|
+++ b/kernel/trace/ftrace.c
|
|
@@ -6666,7 +6666,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
|
}
|
|
|
|
if (t->ret_stack == NULL) {
|
|
- atomic_set(&t->tracing_graph_pause, 0);
|
|
atomic_set(&t->trace_overrun, 0);
|
|
t->curr_ret_stack = -1;
|
|
/* Make sure the tasks see the -1 first: */
|
|
@@ -6878,7 +6877,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
|
|
static void
|
|
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
|
|
{
|
|
- atomic_set(&t->tracing_graph_pause, 0);
|
|
atomic_set(&t->trace_overrun, 0);
|
|
t->ftrace_timestamp = 0;
|
|
/* make curr_ret_stack visible before we add the ret_stack */
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index c0dbc683322fb..3a0691c647044 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -2285,7 +2285,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
|
|
(entry = this_cpu_read(trace_buffered_event))) {
|
|
/* Try to use the per cpu buffer first */
|
|
val = this_cpu_inc_return(trace_buffered_event_cnt);
|
|
- if (val == 1) {
|
|
+ if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
|
|
trace_event_setup(entry, type, flags, pc);
|
|
entry->array[0] = len;
|
|
return entry;
|
|
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
|
|
index d69c79ac97986..7b4af70d9dfd0 100644
|
|
--- a/kernel/trace/trace_events.c
|
|
+++ b/kernel/trace/trace_events.c
|
|
@@ -1114,7 +1114,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
mutex_lock(&event_mutex);
|
|
list_for_each_entry(file, &tr->events, list) {
|
|
call = file->event_call;
|
|
- if (!trace_event_name(call) || !call->class || !call->class->reg)
|
|
+ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
|
|
+ !trace_event_name(call) || !call->class || !call->class->reg)
|
|
continue;
|
|
|
|
if (system && strcmp(call->class->system, system->name) != 0)
|
|
diff --git a/lib/string.c b/lib/string.c
|
|
index db9abc18b2165..fba43e4ad5514 100644
|
|
--- a/lib/string.c
|
|
+++ b/lib/string.c
|
|
@@ -158,11 +158,9 @@ EXPORT_SYMBOL(strlcpy);
|
|
* @src: Where to copy the string from
|
|
* @count: Size of destination buffer
|
|
*
|
|
- * Copy the string, or as much of it as fits, into the dest buffer.
|
|
- * The routine returns the number of characters copied (not including
|
|
- * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
|
|
- * The behavior is undefined if the string buffers overlap.
|
|
- * The destination buffer is always NUL terminated, unless it's zero-sized.
|
|
+ * Copy the string, or as much of it as fits, into the dest buffer. The
|
|
+ * behavior is undefined if the string buffers overlap. The destination
|
|
+ * buffer is always NUL terminated, unless it's zero-sized.
|
|
*
|
|
* Preferred to strlcpy() since the API doesn't require reading memory
|
|
* from the src string beyond the specified "count" bytes, and since
|
|
@@ -172,8 +170,10 @@ EXPORT_SYMBOL(strlcpy);
|
|
*
|
|
* Preferred to strncpy() since it always returns a valid string, and
|
|
* doesn't unnecessarily force the tail of the destination buffer to be
|
|
- * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy()
|
|
- * with an overflow test, then just memset() the tail of the dest buffer.
|
|
+ * zeroed. If zeroing is desired please use strscpy_pad().
|
|
+ *
|
|
+ * Return: The number of characters copied (not including the trailing
|
|
+ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
|
|
*/
|
|
ssize_t strscpy(char *dest, const char *src, size_t count)
|
|
{
|
|
@@ -260,6 +260,39 @@ char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
|
|
}
|
|
EXPORT_SYMBOL(stpcpy);
|
|
|
|
+/**
|
|
+ * strscpy_pad() - Copy a C-string into a sized buffer
|
|
+ * @dest: Where to copy the string to
|
|
+ * @src: Where to copy the string from
|
|
+ * @count: Size of destination buffer
|
|
+ *
|
|
+ * Copy the string, or as much of it as fits, into the dest buffer. The
|
|
+ * behavior is undefined if the string buffers overlap. The destination
|
|
+ * buffer is always %NUL terminated, unless it's zero-sized.
|
|
+ *
|
|
+ * If the source string is shorter than the destination buffer, zeros
|
|
+ * the tail of the destination buffer.
|
|
+ *
|
|
+ * For full explanation of why you may want to consider using the
|
|
+ * 'strscpy' functions please see the function docstring for strscpy().
|
|
+ *
|
|
+ * Return: The number of characters copied (not including the trailing
|
|
+ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
|
|
+ */
|
|
+ssize_t strscpy_pad(char *dest, const char *src, size_t count)
|
|
+{
|
|
+ ssize_t written;
|
|
+
|
|
+ written = strscpy(dest, src, count);
|
|
+ if (written < 0 || written == count - 1)
|
|
+ return written;
|
|
+
|
|
+ memset(dest + written + 1, 0, count - written - 1);
|
|
+
|
|
+ return written;
|
|
+}
|
|
+EXPORT_SYMBOL(strscpy_pad);
|
|
+
|
|
#ifndef __HAVE_ARCH_STRCAT
|
|
/**
|
|
* strcat - Append one %NUL-terminated string to another
|
|
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
|
|
index 6fa31754eadd9..f5a5e9f82b221 100644
|
|
--- a/mm/backing-dev.c
|
|
+++ b/mm/backing-dev.c
|
|
@@ -19,6 +19,7 @@ struct backing_dev_info noop_backing_dev_info = {
|
|
EXPORT_SYMBOL_GPL(noop_backing_dev_info);
|
|
|
|
static struct class *bdi_class;
|
|
+const char *bdi_unknown_name = "(unknown)";
|
|
|
|
/*
|
|
* bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
|
|
diff --git a/mm/memblock.c b/mm/memblock.c
|
|
index e81d12c544e9f..5d36b4c549292 100644
|
|
--- a/mm/memblock.c
|
|
+++ b/mm/memblock.c
|
|
@@ -174,14 +174,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
|
|
*
|
|
* Find @size free area aligned to @align in the specified range and node.
|
|
*
|
|
- * When allocation direction is bottom-up, the @start should be greater
|
|
- * than the end of the kernel image. Otherwise, it will be trimmed. The
|
|
- * reason is that we want the bottom-up allocation just near the kernel
|
|
- * image so it is highly likely that the allocated memory and the kernel
|
|
- * will reside in the same node.
|
|
- *
|
|
- * If bottom-up allocation failed, will try to allocate memory top-down.
|
|
- *
|
|
* RETURNS:
|
|
* Found address on success, 0 on failure.
|
|
*/
|
|
@@ -189,8 +181,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
|
|
phys_addr_t align, phys_addr_t start,
|
|
phys_addr_t end, int nid, ulong flags)
|
|
{
|
|
- phys_addr_t kernel_end, ret;
|
|
-
|
|
/* pump up @end */
|
|
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
|
|
end = memblock.current_limit;
|
|
@@ -198,39 +188,13 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
|
|
/* avoid allocating the first page */
|
|
start = max_t(phys_addr_t, start, PAGE_SIZE);
|
|
end = max(start, end);
|
|
- kernel_end = __pa_symbol(_end);
|
|
-
|
|
- /*
|
|
- * try bottom-up allocation only when bottom-up mode
|
|
- * is set and @end is above the kernel image.
|
|
- */
|
|
- if (memblock_bottom_up() && end > kernel_end) {
|
|
- phys_addr_t bottom_up_start;
|
|
-
|
|
- /* make sure we will allocate above the kernel */
|
|
- bottom_up_start = max(start, kernel_end);
|
|
|
|
- /* ok, try bottom-up allocation first */
|
|
- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
|
|
- size, align, nid, flags);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- /*
|
|
- * we always limit bottom-up allocation above the kernel,
|
|
- * but top-down allocation doesn't have the limit, so
|
|
- * retrying top-down allocation may succeed when bottom-up
|
|
- * allocation failed.
|
|
- *
|
|
- * bottom-up allocation is expected to be fail very rarely,
|
|
- * so we use WARN_ONCE() here to see the stack trace if
|
|
- * fail happens.
|
|
- */
|
|
- WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
|
|
- }
|
|
-
|
|
- return __memblock_find_range_top_down(start, end, size, align, nid,
|
|
- flags);
|
|
+ if (memblock_bottom_up())
|
|
+ return __memblock_find_range_bottom_up(start, end, size, align,
|
|
+ nid, flags);
|
|
+ else
|
|
+ return __memblock_find_range_top_down(start, end, size, align,
|
|
+ nid, flags);
|
|
}
|
|
|
|
/**
|
|
diff --git a/net/key/af_key.c b/net/key/af_key.c
|
|
index 0747747fffe58..a10336cd7f974 100644
|
|
--- a/net/key/af_key.c
|
|
+++ b/net/key/af_key.c
|
|
@@ -2906,7 +2906,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
|
|
break;
|
|
if (!aalg->pfkey_supported)
|
|
continue;
|
|
- if (aalg_tmpl_set(t, aalg) && aalg->available)
|
|
+ if (aalg_tmpl_set(t, aalg))
|
|
sz += sizeof(struct sadb_comb);
|
|
}
|
|
return sz + sizeof(struct sadb_prop);
|
|
@@ -2924,7 +2924,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
|
|
if (!ealg->pfkey_supported)
|
|
continue;
|
|
|
|
- if (!(ealg_tmpl_set(t, ealg) && ealg->available))
|
|
+ if (!(ealg_tmpl_set(t, ealg)))
|
|
continue;
|
|
|
|
for (k = 1; ; k++) {
|
|
@@ -2935,7 +2935,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
|
|
if (!aalg->pfkey_supported)
|
|
continue;
|
|
|
|
- if (aalg_tmpl_set(t, aalg) && aalg->available)
|
|
+ if (aalg_tmpl_set(t, aalg))
|
|
sz += sizeof(struct sadb_comb);
|
|
}
|
|
}
|
|
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
|
|
index 8064d769c953c..ede0ab5dc400a 100644
|
|
--- a/net/netfilter/nf_conntrack_core.c
|
|
+++ b/net/netfilter/nf_conntrack_core.c
|
|
@@ -939,7 +939,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|
* Let nf_ct_resolve_clash() deal with this later.
|
|
*/
|
|
if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
|
- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
|
|
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
|
|
+ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
|
|
continue;
|
|
|
|
NF_CT_STAT_INC_ATOMIC(net, found);
|
|
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
|
|
index cf96d230e5a3c..cafbddf844d62 100644
|
|
--- a/net/netfilter/xt_recent.c
|
|
+++ b/net/netfilter/xt_recent.c
|
|
@@ -155,7 +155,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
|
|
/*
|
|
* Drop entries with timestamps older then 'time'.
|
|
*/
|
|
-static void recent_entry_reap(struct recent_table *t, unsigned long time)
|
|
+static void recent_entry_reap(struct recent_table *t, unsigned long time,
|
|
+ struct recent_entry *working, bool update)
|
|
{
|
|
struct recent_entry *e;
|
|
|
|
@@ -164,6 +165,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
|
|
*/
|
|
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
|
|
|
|
+ /*
|
|
+ * Do not reap the entry which are going to be updated.
|
|
+ */
|
|
+ if (e == working && update)
|
|
+ return;
|
|
+
|
|
/*
|
|
* The last time stamp is the most recent.
|
|
*/
|
|
@@ -306,7 +313,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|
|
|
/* info->seconds must be non-zero */
|
|
if (info->check_set & XT_RECENT_REAP)
|
|
- recent_entry_reap(t, time);
|
|
+ recent_entry_reap(t, time, e,
|
|
+ info->check_set & XT_RECENT_UPDATE && ret);
|
|
}
|
|
|
|
if (info->check_set & XT_RECENT_SET ||
|
|
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
|
|
index 1281b967dbf96..dc1eae4c206ba 100644
|
|
--- a/net/sunrpc/auth_gss/auth_gss.c
|
|
+++ b/net/sunrpc/auth_gss/auth_gss.c
|
|
@@ -53,6 +53,7 @@
|
|
#include <linux/uaccess.h>
|
|
#include <linux/hashtable.h>
|
|
|
|
+#include "auth_gss_internal.h"
|
|
#include "../netns.h"
|
|
|
|
static const struct rpc_authops authgss_ops;
|
|
@@ -147,35 +148,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
|
|
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
|
|
}
|
|
|
|
-static const void *
|
|
-simple_get_bytes(const void *p, const void *end, void *res, size_t len)
|
|
-{
|
|
- const void *q = (const void *)((const char *)p + len);
|
|
- if (unlikely(q > end || q < p))
|
|
- return ERR_PTR(-EFAULT);
|
|
- memcpy(res, p, len);
|
|
- return q;
|
|
-}
|
|
-
|
|
-static inline const void *
|
|
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
|
|
-{
|
|
- const void *q;
|
|
- unsigned int len;
|
|
-
|
|
- p = simple_get_bytes(p, end, &len, sizeof(len));
|
|
- if (IS_ERR(p))
|
|
- return p;
|
|
- q = (const void *)((const char *)p + len);
|
|
- if (unlikely(q > end || q < p))
|
|
- return ERR_PTR(-EFAULT);
|
|
- dest->data = kmemdup(p, len, GFP_NOFS);
|
|
- if (unlikely(dest->data == NULL))
|
|
- return ERR_PTR(-ENOMEM);
|
|
- dest->len = len;
|
|
- return q;
|
|
-}
|
|
-
|
|
static struct gss_cl_ctx *
|
|
gss_cred_get_ctx(struct rpc_cred *cred)
|
|
{
|
|
diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h
|
|
new file mode 100644
|
|
index 0000000000000..f6d9631bd9d00
|
|
--- /dev/null
|
|
+++ b/net/sunrpc/auth_gss/auth_gss_internal.h
|
|
@@ -0,0 +1,45 @@
|
|
+// SPDX-License-Identifier: BSD-3-Clause
|
|
+/*
|
|
+ * linux/net/sunrpc/auth_gss/auth_gss_internal.h
|
|
+ *
|
|
+ * Internal definitions for RPCSEC_GSS client authentication
|
|
+ *
|
|
+ * Copyright (c) 2000 The Regents of the University of Michigan.
|
|
+ * All rights reserved.
|
|
+ *
|
|
+ */
|
|
+#include <linux/err.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/sunrpc/xdr.h>
|
|
+
|
|
+static inline const void *
|
|
+simple_get_bytes(const void *p, const void *end, void *res, size_t len)
|
|
+{
|
|
+ const void *q = (const void *)((const char *)p + len);
|
|
+ if (unlikely(q > end || q < p))
|
|
+ return ERR_PTR(-EFAULT);
|
|
+ memcpy(res, p, len);
|
|
+ return q;
|
|
+}
|
|
+
|
|
+static inline const void *
|
|
+simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
|
|
+{
|
|
+ const void *q;
|
|
+ unsigned int len;
|
|
+
|
|
+ p = simple_get_bytes(p, end, &len, sizeof(len));
|
|
+ if (IS_ERR(p))
|
|
+ return p;
|
|
+ q = (const void *)((const char *)p + len);
|
|
+ if (unlikely(q > end || q < p))
|
|
+ return ERR_PTR(-EFAULT);
|
|
+ if (len) {
|
|
+ dest->data = kmemdup(p, len, GFP_NOFS);
|
|
+ if (unlikely(dest->data == NULL))
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ } else
|
|
+ dest->data = NULL;
|
|
+ dest->len = len;
|
|
+ return q;
|
|
+}
|
|
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
|
|
index 7bb2514aadd9d..14f2823ad6c20 100644
|
|
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
|
|
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
|
|
@@ -46,6 +46,8 @@
|
|
#include <linux/sunrpc/xdr.h>
|
|
#include <linux/sunrpc/gss_krb5_enctypes.h>
|
|
|
|
+#include "auth_gss_internal.h"
|
|
+
|
|
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
|
# define RPCDBG_FACILITY RPCDBG_AUTH
|
|
#endif
|
|
@@ -187,35 +189,6 @@ get_gss_krb5_enctype(int etype)
|
|
return NULL;
|
|
}
|
|
|
|
-static const void *
|
|
-simple_get_bytes(const void *p, const void *end, void *res, int len)
|
|
-{
|
|
- const void *q = (const void *)((const char *)p + len);
|
|
- if (unlikely(q > end || q < p))
|
|
- return ERR_PTR(-EFAULT);
|
|
- memcpy(res, p, len);
|
|
- return q;
|
|
-}
|
|
-
|
|
-static const void *
|
|
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
|
|
-{
|
|
- const void *q;
|
|
- unsigned int len;
|
|
-
|
|
- p = simple_get_bytes(p, end, &len, sizeof(len));
|
|
- if (IS_ERR(p))
|
|
- return p;
|
|
- q = (const void *)((const char *)p + len);
|
|
- if (unlikely(q > end || q < p))
|
|
- return ERR_PTR(-EFAULT);
|
|
- res->data = kmemdup(p, len, GFP_NOFS);
|
|
- if (unlikely(res->data == NULL))
|
|
- return ERR_PTR(-ENOMEM);
|
|
- res->len = len;
|
|
- return q;
|
|
-}
|
|
-
|
|
static inline const void *
|
|
get_key(const void *p, const void *end,
|
|
struct krb5_ctx *ctx, struct crypto_skcipher **res)
|
|
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
|
|
index 29f7491acb354..eafcc75f289ac 100644
|
|
--- a/net/vmw_vsock/af_vsock.c
|
|
+++ b/net/vmw_vsock/af_vsock.c
|
|
@@ -823,10 +823,12 @@ static int vsock_shutdown(struct socket *sock, int mode)
|
|
*/
|
|
|
|
sk = sock->sk;
|
|
+
|
|
+ lock_sock(sk);
|
|
if (sock->state == SS_UNCONNECTED) {
|
|
err = -ENOTCONN;
|
|
if (sk->sk_type == SOCK_STREAM)
|
|
- return err;
|
|
+ goto out;
|
|
} else {
|
|
sock->state = SS_DISCONNECTING;
|
|
err = 0;
|
|
@@ -835,10 +837,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
|
|
/* Receive and send shutdowns are treated alike. */
|
|
mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
|
|
if (mode) {
|
|
- lock_sock(sk);
|
|
sk->sk_shutdown |= mode;
|
|
sk->sk_state_change(sk);
|
|
- release_sock(sk);
|
|
|
|
if (sk->sk_type == SOCK_STREAM) {
|
|
sock_reset_flag(sk, SOCK_DONE);
|
|
@@ -846,6 +846,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
|
|
}
|
|
}
|
|
|
|
+out:
|
|
+ release_sock(sk);
|
|
return err;
|
|
}
|
|
|
|
@@ -1114,7 +1116,6 @@ static void vsock_connect_timeout(struct work_struct *work)
|
|
{
|
|
struct sock *sk;
|
|
struct vsock_sock *vsk;
|
|
- int cancel = 0;
|
|
|
|
vsk = container_of(work, struct vsock_sock, connect_work.work);
|
|
sk = sk_vsock(vsk);
|
|
@@ -1125,11 +1126,9 @@ static void vsock_connect_timeout(struct work_struct *work)
|
|
sk->sk_state = TCP_CLOSE;
|
|
sk->sk_err = ETIMEDOUT;
|
|
sk->sk_error_report(sk);
|
|
- cancel = 1;
|
|
+ vsock_transport_cancel_pkt(vsk);
|
|
}
|
|
release_sock(sk);
|
|
- if (cancel)
|
|
- vsock_transport_cancel_pkt(vsk);
|
|
|
|
sock_put(sk);
|
|
}
|
|
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
|
|
index 736b76ec8cf01..ea350a99cfc38 100644
|
|
--- a/net/vmw_vsock/hyperv_transport.c
|
|
+++ b/net/vmw_vsock/hyperv_transport.c
|
|
@@ -444,14 +444,10 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
|
|
|
|
static int hvs_shutdown(struct vsock_sock *vsk, int mode)
|
|
{
|
|
- struct sock *sk = sk_vsock(vsk);
|
|
-
|
|
if (!(mode & SEND_SHUTDOWN))
|
|
return 0;
|
|
|
|
- lock_sock(sk);
|
|
hvs_shutdown_lock_held(vsk->trans, mode);
|
|
- release_sock(sk);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
|
|
index 8e4c13cc61ba8..349311f6d1958 100644
|
|
--- a/net/vmw_vsock/virtio_transport_common.c
|
|
+++ b/net/vmw_vsock/virtio_transport_common.c
|
|
@@ -1029,10 +1029,10 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
|
|
|
|
vsk = vsock_sk(sk);
|
|
|
|
- space_available = virtio_transport_space_update(sk, pkt);
|
|
-
|
|
lock_sock(sk);
|
|
|
|
+ space_available = virtio_transport_space_update(sk, pkt);
|
|
+
|
|
/* Update CID in case it has changed after a transport reset event */
|
|
vsk->local_addr.svm_cid = dst.svm_cid;
|
|
|
|
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
|
|
index 3edc9c04cb468..f4b752cb17516 100644
|
|
--- a/scripts/Makefile.build
|
|
+++ b/scripts/Makefile.build
|
|
@@ -224,6 +224,8 @@ cmd_modversions_c = \
|
|
endif
|
|
|
|
ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
|
+ifndef CC_USING_RECORD_MCOUNT
|
|
+# compiler will not generate __mcount_loc use recordmcount or recordmcount.pl
|
|
ifdef BUILD_C_RECORDMCOUNT
|
|
ifeq ("$(origin RECORDMCOUNT_WARN)", "command line")
|
|
RECORDMCOUNT_FLAGS = -w
|
|
@@ -252,6 +254,7 @@ cmd_record_mcount = \
|
|
"$(CC_FLAGS_FTRACE)" ]; then \
|
|
$(sub_cmd_record_mcount) \
|
|
fi;
|
|
+endif # CC_USING_RECORD_MCOUNT
|
|
endif # CONFIG_FTRACE_MCOUNT_RECORD
|
|
|
|
ifdef CONFIG_STACK_VALIDATION
|
|
diff --git a/security/commoncap.c b/security/commoncap.c
|
|
index ac031fa391908..bf689d61b293c 100644
|
|
--- a/security/commoncap.c
|
|
+++ b/security/commoncap.c
|
|
@@ -378,10 +378,11 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
|
|
{
|
|
int size, ret;
|
|
kuid_t kroot;
|
|
+ u32 nsmagic, magic;
|
|
uid_t root, mappedroot;
|
|
char *tmpbuf = NULL;
|
|
struct vfs_cap_data *cap;
|
|
- struct vfs_ns_cap_data *nscap;
|
|
+ struct vfs_ns_cap_data *nscap = NULL;
|
|
struct dentry *dentry;
|
|
struct user_namespace *fs_ns;
|
|
|
|
@@ -403,46 +404,61 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
|
|
fs_ns = inode->i_sb->s_user_ns;
|
|
cap = (struct vfs_cap_data *) tmpbuf;
|
|
if (is_v2header((size_t) ret, cap)) {
|
|
- /* If this is sizeof(vfs_cap_data) then we're ok with the
|
|
- * on-disk value, so return that. */
|
|
- if (alloc)
|
|
- *buffer = tmpbuf;
|
|
- else
|
|
- kfree(tmpbuf);
|
|
- return ret;
|
|
- } else if (!is_v3header((size_t) ret, cap)) {
|
|
- kfree(tmpbuf);
|
|
- return -EINVAL;
|
|
+ root = 0;
|
|
+ } else if (is_v3header((size_t) ret, cap)) {
|
|
+ nscap = (struct vfs_ns_cap_data *) tmpbuf;
|
|
+ root = le32_to_cpu(nscap->rootid);
|
|
+ } else {
|
|
+ size = -EINVAL;
|
|
+ goto out_free;
|
|
}
|
|
|
|
- nscap = (struct vfs_ns_cap_data *) tmpbuf;
|
|
- root = le32_to_cpu(nscap->rootid);
|
|
kroot = make_kuid(fs_ns, root);
|
|
|
|
/* If the root kuid maps to a valid uid in current ns, then return
|
|
* this as a nscap. */
|
|
mappedroot = from_kuid(current_user_ns(), kroot);
|
|
if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
|
|
+ size = sizeof(struct vfs_ns_cap_data);
|
|
if (alloc) {
|
|
- *buffer = tmpbuf;
|
|
+ if (!nscap) {
|
|
+ /* v2 -> v3 conversion */
|
|
+ nscap = kzalloc(size, GFP_ATOMIC);
|
|
+ if (!nscap) {
|
|
+ size = -ENOMEM;
|
|
+ goto out_free;
|
|
+ }
|
|
+ nsmagic = VFS_CAP_REVISION_3;
|
|
+ magic = le32_to_cpu(cap->magic_etc);
|
|
+ if (magic & VFS_CAP_FLAGS_EFFECTIVE)
|
|
+ nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
|
|
+ memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
|
|
+ nscap->magic_etc = cpu_to_le32(nsmagic);
|
|
+ } else {
|
|
+ /* use allocated v3 buffer */
|
|
+ tmpbuf = NULL;
|
|
+ }
|
|
nscap->rootid = cpu_to_le32(mappedroot);
|
|
- } else
|
|
- kfree(tmpbuf);
|
|
- return size;
|
|
+ *buffer = nscap;
|
|
+ }
|
|
+ goto out_free;
|
|
}
|
|
|
|
if (!rootid_owns_currentns(kroot)) {
|
|
- kfree(tmpbuf);
|
|
- return -EOPNOTSUPP;
|
|
+ size = -EOVERFLOW;
|
|
+ goto out_free;
|
|
}
|
|
|
|
/* This comes from a parent namespace. Return as a v2 capability */
|
|
size = sizeof(struct vfs_cap_data);
|
|
if (alloc) {
|
|
- *buffer = kmalloc(size, GFP_ATOMIC);
|
|
- if (*buffer) {
|
|
- struct vfs_cap_data *cap = *buffer;
|
|
- __le32 nsmagic, magic;
|
|
+ if (nscap) {
|
|
+ /* v3 -> v2 conversion */
|
|
+ cap = kzalloc(size, GFP_ATOMIC);
|
|
+ if (!cap) {
|
|
+ size = -ENOMEM;
|
|
+ goto out_free;
|
|
+ }
|
|
magic = VFS_CAP_REVISION_2;
|
|
nsmagic = le32_to_cpu(nscap->magic_etc);
|
|
if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
|
|
@@ -450,9 +466,12 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
|
|
memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
|
|
cap->magic_etc = cpu_to_le32(magic);
|
|
} else {
|
|
- size = -ENOMEM;
|
|
+ /* use unconverted v2 */
|
|
+ tmpbuf = NULL;
|
|
}
|
|
+ *buffer = cap;
|
|
}
|
|
+out_free:
|
|
kfree(tmpbuf);
|
|
return size;
|
|
}
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index c1ca4d40157b1..547ae59199db2 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -382,9 +382,8 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|
*/
|
|
kvm->mmu_notifier_count++;
|
|
need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
|
|
- need_tlb_flush |= kvm->tlbs_dirty;
|
|
/* we've to flush the tlb before the pages can be freed */
|
|
- if (need_tlb_flush)
|
|
+ if (need_tlb_flush || kvm->tlbs_dirty)
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
|
spin_unlock(&kvm->mmu_lock);
|