mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-30 02:31:46 +00:00
1873 lines
60 KiB
Diff
1873 lines
60 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index ee92a12e3a4b..9b795164122e 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 4
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 147
|
|
+SUBLEVEL = 148
|
|
EXTRAVERSION =
|
|
NAME = Blurry Fish Butt
|
|
|
|
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
|
|
index 167f77b3bd43..6963dff815dc 100644
|
|
--- a/arch/arm/boot/dts/imx6sx.dtsi
|
|
+++ b/arch/arm/boot/dts/imx6sx.dtsi
|
|
@@ -1250,7 +1250,7 @@
|
|
/* non-prefetchable memory */
|
|
0x82000000 0 0x08000000 0x08000000 0 0x00f00000>;
|
|
num-lanes = <1>;
|
|
- interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>,
|
|
<&clks IMX6SX_CLK_PCIE_AXI>,
|
|
<&clks IMX6SX_CLK_LVDS1_OUT>,
|
|
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
|
|
index 729f89163bc3..210b3d675261 100644
|
|
--- a/arch/parisc/Kconfig
|
|
+++ b/arch/parisc/Kconfig
|
|
@@ -177,7 +177,7 @@ config PREFETCH
|
|
|
|
config MLONGCALLS
|
|
bool "Enable the -mlong-calls compiler option for big kernels"
|
|
- def_bool y if (!MODULES)
|
|
+ default y
|
|
depends on PA8X00
|
|
help
|
|
If you configure the kernel to include many drivers built-in instead
|
|
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
|
|
new file mode 100644
|
|
index 000000000000..dbaaca84f27f
|
|
--- /dev/null
|
|
+++ b/arch/parisc/include/asm/barrier.h
|
|
@@ -0,0 +1,32 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+#ifndef __ASM_BARRIER_H
|
|
+#define __ASM_BARRIER_H
|
|
+
|
|
+#ifndef __ASSEMBLY__
|
|
+
|
|
+/* The synchronize caches instruction executes as a nop on systems in
|
|
+ which all memory references are performed in order. */
|
|
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
|
|
+
|
|
+#if defined(CONFIG_SMP)
|
|
+#define mb() do { synchronize_caches(); } while (0)
|
|
+#define rmb() mb()
|
|
+#define wmb() mb()
|
|
+#define dma_rmb() mb()
|
|
+#define dma_wmb() mb()
|
|
+#else
|
|
+#define mb() barrier()
|
|
+#define rmb() barrier()
|
|
+#define wmb() barrier()
|
|
+#define dma_rmb() barrier()
|
|
+#define dma_wmb() barrier()
|
|
+#endif
|
|
+
|
|
+#define __smp_mb() mb()
|
|
+#define __smp_rmb() mb()
|
|
+#define __smp_wmb() mb()
|
|
+
|
|
+#include <asm-generic/barrier.h>
|
|
+
|
|
+#endif /* !__ASSEMBLY__ */
|
|
+#endif /* __ASM_BARRIER_H */
|
|
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
|
|
index 5dc831955de5..13cb2461fef5 100644
|
|
--- a/arch/parisc/kernel/entry.S
|
|
+++ b/arch/parisc/kernel/entry.S
|
|
@@ -481,6 +481,8 @@
|
|
/* Release pa_tlb_lock lock without reloading lock address. */
|
|
.macro tlb_unlock0 spc,tmp
|
|
#ifdef CONFIG_SMP
|
|
+ or,COND(=) %r0,\spc,%r0
|
|
+ sync
|
|
or,COND(=) %r0,\spc,%r0
|
|
stw \spc,0(\tmp)
|
|
#endif
|
|
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
|
|
index 16073f472118..b3434a7fd3c9 100644
|
|
--- a/arch/parisc/kernel/pacache.S
|
|
+++ b/arch/parisc/kernel/pacache.S
|
|
@@ -354,6 +354,7 @@ ENDPROC(flush_data_cache_local)
|
|
.macro tlb_unlock la,flags,tmp
|
|
#ifdef CONFIG_SMP
|
|
ldi 1,\tmp
|
|
+ sync
|
|
stw \tmp,0(\la)
|
|
mtsm \flags
|
|
#endif
|
|
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
|
|
index 9f22195b90ed..f68eedc72484 100644
|
|
--- a/arch/parisc/kernel/syscall.S
|
|
+++ b/arch/parisc/kernel/syscall.S
|
|
@@ -631,6 +631,7 @@ cas_action:
|
|
sub,<> %r28, %r25, %r0
|
|
2: stw,ma %r24, 0(%r26)
|
|
/* Free lock */
|
|
+ sync
|
|
stw,ma %r20, 0(%sr2,%r20)
|
|
#if ENABLE_LWS_DEBUG
|
|
/* Clear thread register indicator */
|
|
@@ -645,6 +646,7 @@ cas_action:
|
|
3:
|
|
/* Error occurred on load or store */
|
|
/* Free lock */
|
|
+ sync
|
|
stw %r20, 0(%sr2,%r20)
|
|
#if ENABLE_LWS_DEBUG
|
|
stw %r0, 4(%sr2,%r20)
|
|
@@ -846,6 +848,7 @@ cas2_action:
|
|
|
|
cas2_end:
|
|
/* Free lock */
|
|
+ sync
|
|
stw,ma %r20, 0(%sr2,%r20)
|
|
/* Enable interrupts */
|
|
ssm PSW_SM_I, %r0
|
|
@@ -856,6 +859,7 @@ cas2_end:
|
|
22:
|
|
/* Error occurred on load or store */
|
|
/* Free lock */
|
|
+ sync
|
|
stw %r20, 0(%sr2,%r20)
|
|
ssm PSW_SM_I, %r0
|
|
ldo 1(%r0),%r28
|
|
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
|
|
index f4b175db70f4..dd2269dcbc47 100644
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -193,12 +193,12 @@
|
|
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
|
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
|
|
|
+#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
|
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
|
+
|
|
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
|
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
|
|
|
-#define X86_FEATURE_RETPOLINE ( 7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
|
-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
|
-
|
|
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
|
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
|
|
|
|
@@ -214,7 +214,7 @@
|
|
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
|
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
|
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
|
|
-
|
|
+#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
|
|
|
/* Virtualization flags: Linux defined, word 8 */
|
|
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
|
@@ -310,6 +310,7 @@
|
|
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
|
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
|
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
|
+#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
|
|
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
|
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
|
|
|
|
@@ -331,5 +332,6 @@
|
|
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
|
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
|
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
|
+#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
|
|
|
#endif /* _ASM_X86_CPUFEATURES_H */
|
|
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
|
|
index 0056bc945cd1..cb7f04981c6b 100644
|
|
--- a/arch/x86/include/asm/irqflags.h
|
|
+++ b/arch/x86/include/asm/irqflags.h
|
|
@@ -8,6 +8,8 @@
|
|
* Interrupt control:
|
|
*/
|
|
|
|
+/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
|
|
+extern inline unsigned long native_save_fl(void);
|
|
extern inline unsigned long native_save_fl(void)
|
|
{
|
|
unsigned long flags;
|
|
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
|
|
index 3a52ee0e726d..bfceb5cc6347 100644
|
|
--- a/arch/x86/include/asm/page_32_types.h
|
|
+++ b/arch/x86/include/asm/page_32_types.h
|
|
@@ -27,8 +27,13 @@
|
|
#define N_EXCEPTION_STACKS 1
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
-/* 44=32+12, the limit we can fit into an unsigned long pfn */
|
|
-#define __PHYSICAL_MASK_SHIFT 44
|
|
+/*
|
|
+ * This is beyond the 44 bit limit imposed by the 32bit long pfns,
|
|
+ * but we need the full mask to make sure inverted PROT_NONE
|
|
+ * entries have all the host bits set in a guest.
|
|
+ * The real limit is still 44 bits.
|
|
+ */
|
|
+#define __PHYSICAL_MASK_SHIFT 52
|
|
#define __VIRTUAL_MASK_SHIFT 32
|
|
|
|
#else /* !CONFIG_X86_PAE */
|
|
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
|
|
index fd74a11959de..89c50332a71e 100644
|
|
--- a/arch/x86/include/asm/pgtable-2level.h
|
|
+++ b/arch/x86/include/asm/pgtable-2level.h
|
|
@@ -77,4 +77,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
|
|
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
|
|
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
|
|
|
+/* No inverted PFNs on 2 level page tables */
|
|
+
|
|
+static inline u64 protnone_mask(u64 val)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
|
|
+{
|
|
+ return val;
|
|
+}
|
|
+
|
|
+static inline bool __pte_needs_invert(u64 val)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */
|
|
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
|
|
index cdaa58c9b39e..5c686382d84b 100644
|
|
--- a/arch/x86/include/asm/pgtable-3level.h
|
|
+++ b/arch/x86/include/asm/pgtable-3level.h
|
|
@@ -177,11 +177,44 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
|
|
#endif
|
|
|
|
/* Encode and de-code a swap entry */
|
|
+#define SWP_TYPE_BITS 5
|
|
+
|
|
+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
|
|
+
|
|
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
|
|
+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
|
|
+
|
|
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
|
|
#define __swp_type(x) (((x).val) & 0x1f)
|
|
#define __swp_offset(x) ((x).val >> 5)
|
|
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
|
|
-#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
|
|
-#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
|
|
+
|
|
+/*
|
|
+ * Normally, __swp_entry() converts from arch-independent swp_entry_t to
|
|
+ * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
|
|
+ * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
|
|
+ * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
|
|
+ * __swp_entry_to_pte() through the following helper macro based on 64bit
|
|
+ * __swp_entry().
|
|
+ */
|
|
+#define __swp_pteval_entry(type, offset) ((pteval_t) { \
|
|
+ (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
|
|
+ | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
|
|
+
|
|
+#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
|
|
+ __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
|
|
+/*
|
|
+ * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
|
|
+ * swp_entry_t, but also has to convert it from 64bit to the 32bit
|
|
+ * intermediate representation, using the following macros based on 64bit
|
|
+ * __swp_type() and __swp_offset().
|
|
+ */
|
|
+#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
|
|
+#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
|
|
+
|
|
+#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
|
|
+ __pteval_swp_offset(pte)))
|
|
+
|
|
+#include <asm/pgtable-invert.h>
|
|
|
|
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
|
|
diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h
|
|
new file mode 100644
|
|
index 000000000000..44b1203ece12
|
|
--- /dev/null
|
|
+++ b/arch/x86/include/asm/pgtable-invert.h
|
|
@@ -0,0 +1,32 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+#ifndef _ASM_PGTABLE_INVERT_H
|
|
+#define _ASM_PGTABLE_INVERT_H 1
|
|
+
|
|
+#ifndef __ASSEMBLY__
|
|
+
|
|
+static inline bool __pte_needs_invert(u64 val)
|
|
+{
|
|
+ return !(val & _PAGE_PRESENT);
|
|
+}
|
|
+
|
|
+/* Get a mask to xor with the page table entry to get the correct pfn. */
|
|
+static inline u64 protnone_mask(u64 val)
|
|
+{
|
|
+ return __pte_needs_invert(val) ? ~0ull : 0;
|
|
+}
|
|
+
|
|
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
|
|
+{
|
|
+ /*
|
|
+ * When a PTE transitions from NONE to !NONE or vice-versa
|
|
+ * invert the PFN part to stop speculation.
|
|
+ * pte_pfn undoes this when needed.
|
|
+ */
|
|
+ if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
|
|
+ val = (val & ~mask) | (~val & mask);
|
|
+ return val;
|
|
+}
|
|
+
|
|
+#endif /* __ASSEMBLY__ */
|
|
+
|
|
+#endif
|
|
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
|
|
index 84c62d950023..4de6c282c02a 100644
|
|
--- a/arch/x86/include/asm/pgtable.h
|
|
+++ b/arch/x86/include/asm/pgtable.h
|
|
@@ -148,19 +148,29 @@ static inline int pte_special(pte_t pte)
|
|
return pte_flags(pte) & _PAGE_SPECIAL;
|
|
}
|
|
|
|
+/* Entries that were set to PROT_NONE are inverted */
|
|
+
|
|
+static inline u64 protnone_mask(u64 val);
|
|
+
|
|
static inline unsigned long pte_pfn(pte_t pte)
|
|
{
|
|
- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
|
+ phys_addr_t pfn = pte_val(pte);
|
|
+ pfn ^= protnone_mask(pfn);
|
|
+ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
|
|
}
|
|
|
|
static inline unsigned long pmd_pfn(pmd_t pmd)
|
|
{
|
|
- return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
|
|
+ phys_addr_t pfn = pmd_val(pmd);
|
|
+ pfn ^= protnone_mask(pfn);
|
|
+ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
|
|
}
|
|
|
|
static inline unsigned long pud_pfn(pud_t pud)
|
|
{
|
|
- return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
|
|
+ phys_addr_t pfn = pud_val(pud);
|
|
+ pfn ^= protnone_mask(pfn);
|
|
+ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
|
|
}
|
|
|
|
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
|
@@ -305,11 +315,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
|
return pmd_set_flags(pmd, _PAGE_RW);
|
|
}
|
|
|
|
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
|
-{
|
|
- return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
|
|
-}
|
|
-
|
|
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
|
|
static inline int pte_soft_dirty(pte_t pte)
|
|
{
|
|
@@ -359,19 +364,58 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
|
|
|
|
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
|
|
{
|
|
- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
|
- massage_pgprot(pgprot));
|
|
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
|
|
+ pfn ^= protnone_mask(pgprot_val(pgprot));
|
|
+ pfn &= PTE_PFN_MASK;
|
|
+ return __pte(pfn | massage_pgprot(pgprot));
|
|
}
|
|
|
|
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
|
|
{
|
|
- return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
|
- massage_pgprot(pgprot));
|
|
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
|
|
+ pfn ^= protnone_mask(pgprot_val(pgprot));
|
|
+ pfn &= PHYSICAL_PMD_PAGE_MASK;
|
|
+ return __pmd(pfn | massage_pgprot(pgprot));
|
|
+}
|
|
+
|
|
+static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
|
|
+{
|
|
+ phys_addr_t pfn = page_nr << PAGE_SHIFT;
|
|
+ pfn ^= protnone_mask(pgprot_val(pgprot));
|
|
+ pfn &= PHYSICAL_PUD_PAGE_MASK;
|
|
+ return __pud(pfn | massage_pgprot(pgprot));
|
|
+}
|
|
+
|
|
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
|
+{
|
|
+ return pfn_pmd(pmd_pfn(pmd),
|
|
+ __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
|
|
}
|
|
|
|
+static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
|
|
+{
|
|
+ pudval_t v = native_pud_val(pud);
|
|
+
|
|
+ return __pud(v | set);
|
|
+}
|
|
+
|
|
+static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
|
|
+{
|
|
+ pudval_t v = native_pud_val(pud);
|
|
+
|
|
+ return __pud(v & ~clear);
|
|
+}
|
|
+
|
|
+static inline pud_t pud_mkhuge(pud_t pud)
|
|
+{
|
|
+ return pud_set_flags(pud, _PAGE_PSE);
|
|
+}
|
|
+
|
|
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
|
|
+
|
|
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|
{
|
|
- pteval_t val = pte_val(pte);
|
|
+ pteval_t val = pte_val(pte), oldval = val;
|
|
|
|
/*
|
|
* Chop off the NX bit (if present), and add the NX portion of
|
|
@@ -379,17 +423,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|
*/
|
|
val &= _PAGE_CHG_MASK;
|
|
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
|
|
-
|
|
+ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
|
|
return __pte(val);
|
|
}
|
|
|
|
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
{
|
|
- pmdval_t val = pmd_val(pmd);
|
|
+ pmdval_t val = pmd_val(pmd), oldval = val;
|
|
|
|
val &= _HPAGE_CHG_MASK;
|
|
val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
|
|
-
|
|
+ val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
|
|
return __pmd(val);
|
|
}
|
|
|
|
@@ -926,6 +970,14 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
|
|
}
|
|
#endif
|
|
|
|
+#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
|
|
+extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
|
|
+
|
|
+static inline bool arch_has_pfn_modify_check(void)
|
|
+{
|
|
+ return boot_cpu_has_bug(X86_BUG_L1TF);
|
|
+}
|
|
+
|
|
#include <asm-generic/pgtable.h>
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
|
|
index c810226e741a..221a32ed1372 100644
|
|
--- a/arch/x86/include/asm/pgtable_64.h
|
|
+++ b/arch/x86/include/asm/pgtable_64.h
|
|
@@ -163,18 +163,52 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
|
|
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
|
|
#define pte_unmap(pte) ((void)(pte))/* NOP */
|
|
|
|
-/* Encode and de-code a swap entry */
|
|
-#define SWP_TYPE_BITS 5
|
|
-#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
|
|
+/*
|
|
+ * Encode and de-code a swap entry
|
|
+ *
|
|
+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
|
|
+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
|
|
+ * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
|
|
+ *
|
|
+ * G (8) is aliased and used as a PROT_NONE indicator for
|
|
+ * !present ptes. We need to start storing swap entries above
|
|
+ * there. We also need to avoid using A and D because of an
|
|
+ * erratum where they can be incorrectly set by hardware on
|
|
+ * non-present PTEs.
|
|
+ *
|
|
+ * SD (1) in swp entry is used to store soft dirty bit, which helps us
|
|
+ * remember soft dirty over page migration
|
|
+ *
|
|
+ * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
|
|
+ * but also L and G.
|
|
+ *
|
|
+ * The offset is inverted by a binary not operation to make the high
|
|
+ * physical bits set.
|
|
+ */
|
|
+#define SWP_TYPE_BITS 5
|
|
+
|
|
+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
|
|
+
|
|
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
|
|
+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
|
|
|
|
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
|
|
|
|
-#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
|
|
- & ((1U << SWP_TYPE_BITS) - 1))
|
|
-#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
|
|
-#define __swp_entry(type, offset) ((swp_entry_t) { \
|
|
- ((type) << (_PAGE_BIT_PRESENT + 1)) \
|
|
- | ((offset) << SWP_OFFSET_SHIFT) })
|
|
+/* Extract the high bits for type */
|
|
+#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
|
|
+
|
|
+/* Shift up (to get rid of type), then down to get value */
|
|
+#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
|
|
+
|
|
+/*
|
|
+ * Shift the offset up "too far" by TYPE bits, then down again
|
|
+ * The offset is inverted by a binary not operation to make the high
|
|
+ * physical bits set.
|
|
+ */
|
|
+#define __swp_entry(type, offset) ((swp_entry_t) { \
|
|
+ (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
|
|
+ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
|
|
+
|
|
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
|
|
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
|
|
|
@@ -201,6 +235,8 @@ extern void cleanup_highmap(void);
|
|
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
|
|
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
|
|
|
|
+#include <asm/pgtable-invert.h>
|
|
+
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_X86_PGTABLE_64_H */
|
|
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
|
|
index 8dba273da25a..7572ce32055e 100644
|
|
--- a/arch/x86/include/asm/pgtable_types.h
|
|
+++ b/arch/x86/include/asm/pgtable_types.h
|
|
@@ -70,15 +70,15 @@
|
|
/*
|
|
* Tracking soft dirty bit when a page goes to a swap is tricky.
|
|
* We need a bit which can be stored in pte _and_ not conflict
|
|
- * with swap entry format. On x86 bits 6 and 7 are *not* involved
|
|
- * into swap entry computation, but bit 6 is used for nonlinear
|
|
- * file mapping, so we borrow bit 7 for soft dirty tracking.
|
|
+ * with swap entry format. On x86 bits 1-4 are *not* involved
|
|
+ * into swap entry computation, but bit 7 is used for thp migration,
|
|
+ * so we borrow bit 1 for soft dirty tracking.
|
|
*
|
|
* Please note that this bit must be treated as swap dirty page
|
|
- * mark if and only if the PTE has present bit clear!
|
|
+ * mark if and only if the PTE/PMD has present bit clear!
|
|
*/
|
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
-#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
|
|
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
|
|
#else
|
|
#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
|
|
#endif
|
|
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
|
|
index 8e415cf65457..a3a53955f01c 100644
|
|
--- a/arch/x86/include/asm/processor.h
|
|
+++ b/arch/x86/include/asm/processor.h
|
|
@@ -172,6 +172,11 @@ extern const struct seq_operations cpuinfo_op;
|
|
|
|
extern void cpu_detect(struct cpuinfo_x86 *c);
|
|
|
|
+static inline unsigned long l1tf_pfn_limit(void)
|
|
+{
|
|
+ return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
|
|
+}
|
|
+
|
|
extern void early_cpu_init(void);
|
|
extern void identify_boot_cpu(void);
|
|
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
|
|
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
|
|
index 12a8867071f3..34e4aaaf03d2 100644
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -26,9 +26,11 @@
|
|
#include <asm/pgtable.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/intel-family.h>
|
|
+#include <asm/e820.h>
|
|
|
|
static void __init spectre_v2_select_mitigation(void);
|
|
static void __init ssb_select_mitigation(void);
|
|
+static void __init l1tf_select_mitigation(void);
|
|
|
|
/*
|
|
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
|
@@ -80,6 +82,8 @@ void __init check_bugs(void)
|
|
*/
|
|
ssb_select_mitigation();
|
|
|
|
+ l1tf_select_mitigation();
|
|
+
|
|
#ifdef CONFIG_X86_32
|
|
/*
|
|
* Check whether we are able to run this kernel safely on SMP.
|
|
@@ -309,23 +313,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
|
return cmd;
|
|
}
|
|
|
|
-/* Check for Skylake-like CPUs (for RSB handling) */
|
|
-static bool __init is_skylake_era(void)
|
|
-{
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
|
- boot_cpu_data.x86 == 6) {
|
|
- switch (boot_cpu_data.x86_model) {
|
|
- case INTEL_FAM6_SKYLAKE_MOBILE:
|
|
- case INTEL_FAM6_SKYLAKE_DESKTOP:
|
|
- case INTEL_FAM6_SKYLAKE_X:
|
|
- case INTEL_FAM6_KABYLAKE_MOBILE:
|
|
- case INTEL_FAM6_KABYLAKE_DESKTOP:
|
|
- return true;
|
|
- }
|
|
- }
|
|
- return false;
|
|
-}
|
|
-
|
|
static void __init spectre_v2_select_mitigation(void)
|
|
{
|
|
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
|
@@ -386,22 +373,15 @@ retpoline_auto:
|
|
pr_info("%s\n", spectre_v2_strings[mode]);
|
|
|
|
/*
|
|
- * If neither SMEP nor PTI are available, there is a risk of
|
|
- * hitting userspace addresses in the RSB after a context switch
|
|
- * from a shallow call stack to a deeper one. To prevent this fill
|
|
- * the entire RSB, even when using IBRS.
|
|
+ * If spectre v2 protection has been enabled, unconditionally fill
|
|
+ * RSB during a context switch; this protects against two independent
|
|
+ * issues:
|
|
*
|
|
- * Skylake era CPUs have a separate issue with *underflow* of the
|
|
- * RSB, when they will predict 'ret' targets from the generic BTB.
|
|
- * The proper mitigation for this is IBRS. If IBRS is not supported
|
|
- * or deactivated in favour of retpolines the RSB fill on context
|
|
- * switch is required.
|
|
+ * - RSB underflow (and switch to BTB) on Skylake+
|
|
+ * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
|
|
*/
|
|
- if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
|
|
- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
|
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
|
- pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
|
|
- }
|
|
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
|
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
|
|
|
|
/* Initialize Indirect Branch Prediction Barrier if supported */
|
|
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
|
@@ -652,6 +632,35 @@ void x86_spec_ctrl_setup_ap(void)
|
|
x86_amd_ssb_disable();
|
|
}
|
|
|
|
+#undef pr_fmt
|
|
+#define pr_fmt(fmt) "L1TF: " fmt
|
|
+static void __init l1tf_select_mitigation(void)
|
|
+{
|
|
+ u64 half_pa;
|
|
+
|
|
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
|
+ return;
|
|
+
|
|
+#if CONFIG_PGTABLE_LEVELS == 2
|
|
+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
|
|
+ return;
|
|
+#endif
|
|
+
|
|
+ /*
|
|
+ * This is extremely unlikely to happen because almost all
|
|
+ * systems have far more MAX_PA/2 than RAM can be fit into
|
|
+ * DIMM slots.
|
|
+ */
|
|
+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
|
+ if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
|
|
+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
|
|
+}
|
|
+#undef pr_fmt
|
|
+
|
|
#ifdef CONFIG_SYSFS
|
|
|
|
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
|
@@ -679,6 +688,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|
case X86_BUG_SPEC_STORE_BYPASS:
|
|
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
|
|
|
+ case X86_BUG_L1TF:
|
|
+ if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
|
|
+ return sprintf(buf, "Mitigation: Page Table Inversion\n");
|
|
+ break;
|
|
+
|
|
default:
|
|
break;
|
|
}
|
|
@@ -705,4 +719,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
|
|
{
|
|
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
|
|
}
|
|
+
|
|
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
|
|
+}
|
|
#endif
|
|
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
|
index 3d21b28f9826..4d3fa79c0f09 100644
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -880,6 +880,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
|
{}
|
|
};
|
|
|
|
+static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
|
|
+ /* in addition to cpu_no_speculation */
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
|
+ {}
|
|
+};
|
|
+
|
|
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|
{
|
|
u64 ia32_cap = 0;
|
|
@@ -905,6 +920,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|
return;
|
|
|
|
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
|
+
|
|
+ if (x86_match_cpu(cpu_no_l1tf))
|
|
+ return;
|
|
+
|
|
+ setup_force_cpu_bug(X86_BUG_L1TF);
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
|
|
index 1f5c47a49e35..c6f466d6cc57 100644
|
|
--- a/arch/x86/kernel/kprobes/core.c
|
|
+++ b/arch/x86/kernel/kprobes/core.c
|
|
@@ -393,7 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src)
|
|
newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
|
|
if ((s64) (s32) newdisp != newdisp) {
|
|
pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
|
|
- pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
|
|
return 0;
|
|
}
|
|
disp = (u8 *) dest + insn_offset_displacement(&insn);
|
|
@@ -609,8 +608,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
|
|
* Raise a BUG or we'll continue in an endless reentering loop
|
|
* and eventually a stack overflow.
|
|
*/
|
|
- printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
|
|
- p->addr);
|
|
+ pr_err("Unrecoverable kprobe detected.\n");
|
|
dump_kprobe(p);
|
|
BUG();
|
|
default:
|
|
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
|
|
index f534a0e3af53..632195b41688 100644
|
|
--- a/arch/x86/kernel/paravirt.c
|
|
+++ b/arch/x86/kernel/paravirt.c
|
|
@@ -97,10 +97,12 @@ unsigned paravirt_patch_call(void *insnbuf,
|
|
struct branch *b = insnbuf;
|
|
unsigned long delta = (unsigned long)target - (addr+5);
|
|
|
|
- if (tgt_clobbers & ~site_clobbers)
|
|
- return len; /* target would clobber too much for this site */
|
|
- if (len < 5)
|
|
+ if (len < 5) {
|
|
+#ifdef CONFIG_RETPOLINE
|
|
+ WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
|
|
+#endif
|
|
return len; /* call too long for patch site */
|
|
+ }
|
|
|
|
b->opcode = 0xe8; /* call */
|
|
b->delta = delta;
|
|
@@ -115,8 +117,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
|
struct branch *b = insnbuf;
|
|
unsigned long delta = (unsigned long)target - (addr+5);
|
|
|
|
- if (len < 5)
|
|
+ if (len < 5) {
|
|
+#ifdef CONFIG_RETPOLINE
|
|
+ WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
|
|
+#endif
|
|
return len; /* call too long for patch site */
|
|
+ }
|
|
|
|
b->opcode = 0xe9; /* jmp */
|
|
b->delta = delta;
|
|
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
|
|
index bbaae4cf9e8e..31c4bc0d3372 100644
|
|
--- a/arch/x86/kernel/setup.c
|
|
+++ b/arch/x86/kernel/setup.c
|
|
@@ -851,6 +851,12 @@ void __init setup_arch(char **cmdline_p)
|
|
memblock_reserve(__pa_symbol(_text),
|
|
(unsigned long)__bss_stop - (unsigned long)_text);
|
|
|
|
+ /*
|
|
+ * Make sure page 0 is always reserved because on systems with
|
|
+ * L1TF its contents can be leaked to user processes.
|
|
+ */
|
|
+ memblock_reserve(0, PAGE_SIZE);
|
|
+
|
|
early_reserve_initrd();
|
|
|
|
/*
|
|
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
|
|
index 151fd33e9043..4954a6cef50a 100644
|
|
--- a/arch/x86/mm/init.c
|
|
+++ b/arch/x86/mm/init.c
|
|
@@ -4,6 +4,8 @@
|
|
#include <linux/swap.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/bootmem.h> /* for max_low_pfn */
|
|
+#include <linux/swapfile.h>
|
|
+#include <linux/swapops.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/e820.h>
|
|
@@ -767,3 +769,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
|
|
__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
|
|
__pte2cachemode_tbl[entry] = cache;
|
|
}
|
|
+
|
|
+#ifdef CONFIG_SWAP
|
|
+unsigned long max_swapfile_size(void)
|
|
+{
|
|
+ unsigned long pages;
|
|
+
|
|
+ pages = generic_max_swapfile_size();
|
|
+
|
|
+ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
|
|
+ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
|
|
+ unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
|
|
+ /*
|
|
+ * We encode swap offsets also with 3 bits below those for pfn
|
|
+ * which makes the usable limit higher.
|
|
+ */
|
|
+#if CONFIG_PGTABLE_LEVELS > 2
|
|
+ l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
|
|
+#endif
|
|
+ pages = min_t(unsigned long, l1tf_limit, pages);
|
|
+ }
|
|
+ return pages;
|
|
+}
|
|
+#endif
|
|
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
|
|
index 76604c8a2a48..7bf14e74fc8f 100644
|
|
--- a/arch/x86/mm/kmmio.c
|
|
+++ b/arch/x86/mm/kmmio.c
|
|
@@ -125,24 +125,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
|
|
|
|
static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
|
|
{
|
|
+ pmd_t new_pmd;
|
|
pmdval_t v = pmd_val(*pmd);
|
|
if (clear) {
|
|
- *old = v & _PAGE_PRESENT;
|
|
- v &= ~_PAGE_PRESENT;
|
|
- } else /* presume this has been called with clear==true previously */
|
|
- v |= *old;
|
|
- set_pmd(pmd, __pmd(v));
|
|
+ *old = v;
|
|
+ new_pmd = pmd_mknotpresent(*pmd);
|
|
+ } else {
|
|
+ /* Presume this has been called with clear==true previously */
|
|
+ new_pmd = __pmd(*old);
|
|
+ }
|
|
+ set_pmd(pmd, new_pmd);
|
|
}
|
|
|
|
static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
|
|
{
|
|
pteval_t v = pte_val(*pte);
|
|
if (clear) {
|
|
- *old = v & _PAGE_PRESENT;
|
|
- v &= ~_PAGE_PRESENT;
|
|
- } else /* presume this has been called with clear==true previously */
|
|
- v |= *old;
|
|
- set_pte_atomic(pte, __pte(v));
|
|
+ *old = v;
|
|
+ /* Nothing should care about address */
|
|
+ pte_clear(&init_mm, 0, pte);
|
|
+ } else {
|
|
+ /* Presume this has been called with clear==true previously */
|
|
+ set_pte_atomic(pte, __pte(*old));
|
|
+ }
|
|
}
|
|
|
|
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
|
|
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
|
|
index 307f60ecfc6d..9a055ea279eb 100644
|
|
--- a/arch/x86/mm/mmap.c
|
|
+++ b/arch/x86/mm/mmap.c
|
|
@@ -121,3 +121,24 @@ const char *arch_vma_name(struct vm_area_struct *vma)
|
|
return "[mpx]";
|
|
return NULL;
|
|
}
|
|
+
|
|
+/*
|
|
+ * Only allow root to set high MMIO mappings to PROT_NONE.
|
|
+ * This prevents an unpriv. user to set them to PROT_NONE and invert
|
|
+ * them, then pointing to valid memory for L1TF speculation.
|
|
+ *
|
|
+ * Note: for locked down kernels may want to disable the root override.
|
|
+ */
|
|
+bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
|
+{
|
|
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
|
+ return true;
|
|
+ if (!__pte_needs_invert(pgprot_val(prot)))
|
|
+ return true;
|
|
+ /* If it's real memory always allow */
|
|
+ if (pfn_valid(pfn))
|
|
+ return true;
|
|
+ if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
|
|
index 79377e2a7bcd..27610c2d1821 100644
|
|
--- a/arch/x86/mm/pageattr.c
|
|
+++ b/arch/x86/mm/pageattr.c
|
|
@@ -1006,8 +1006,8 @@ static int populate_pmd(struct cpa_data *cpa,
|
|
|
|
pmd = pmd_offset(pud, start);
|
|
|
|
- set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
|
|
- massage_pgprot(pmd_pgprot)));
|
|
+ set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
|
|
+ canon_pgprot(pmd_pgprot))));
|
|
|
|
start += PMD_SIZE;
|
|
cpa->pfn += PMD_SIZE;
|
|
@@ -1079,8 +1079,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
|
|
* Map everything starting from the Gb boundary, possibly with 1G pages
|
|
*/
|
|
while (end - start >= PUD_SIZE) {
|
|
- set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
|
|
- massage_pgprot(pud_pgprot)));
|
|
+ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
|
|
+ canon_pgprot(pud_pgprot))));
|
|
|
|
start += PUD_SIZE;
|
|
cpa->pfn += PUD_SIZE;
|
|
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
|
|
index f9e0d09f7c66..8a0f77fb5181 100644
|
|
--- a/drivers/acpi/acpi_lpss.c
|
|
+++ b/drivers/acpi/acpi_lpss.c
|
|
@@ -154,10 +154,12 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
|
|
|
|
static const struct lpss_device_desc byt_pwm_dev_desc = {
|
|
.flags = LPSS_SAVE_CTX,
|
|
+ .prv_offset = 0x800,
|
|
};
|
|
|
|
static const struct lpss_device_desc bsw_pwm_dev_desc = {
|
|
.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
|
|
+ .prv_offset = 0x800,
|
|
};
|
|
|
|
static const struct lpss_device_desc byt_uart_dev_desc = {
|
|
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
|
|
index 143edea1076f..41090ef5facb 100644
|
|
--- a/drivers/base/cpu.c
|
|
+++ b/drivers/base/cpu.c
|
|
@@ -524,16 +524,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
|
|
return sprintf(buf, "Not affected\n");
|
|
}
|
|
|
|
+ssize_t __weak cpu_show_l1tf(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ return sprintf(buf, "Not affected\n");
|
|
+}
|
|
+
|
|
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
|
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
|
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
|
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
|
|
+static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
|
|
|
|
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
|
&dev_attr_meltdown.attr,
|
|
&dev_attr_spectre_v1.attr,
|
|
&dev_attr_spectre_v2.attr,
|
|
&dev_attr_spec_store_bypass.attr,
|
|
+ &dev_attr_l1tf.attr,
|
|
NULL
|
|
};
|
|
|
|
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
|
|
index 912ad30be585..4719aa781bf2 100644
|
|
--- a/drivers/char/tpm/tpm-dev.c
|
|
+++ b/drivers/char/tpm/tpm-dev.c
|
|
@@ -25,7 +25,7 @@ struct file_priv {
|
|
struct tpm_chip *chip;
|
|
|
|
/* Data passed to and from the tpm via the read/write calls */
|
|
- atomic_t data_pending;
|
|
+ size_t data_pending;
|
|
struct mutex buffer_mutex;
|
|
|
|
struct timer_list user_read_timer; /* user needs to claim result */
|
|
@@ -46,7 +46,7 @@ static void timeout_work(struct work_struct *work)
|
|
struct file_priv *priv = container_of(work, struct file_priv, work);
|
|
|
|
mutex_lock(&priv->buffer_mutex);
|
|
- atomic_set(&priv->data_pending, 0);
|
|
+ priv->data_pending = 0;
|
|
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
|
|
mutex_unlock(&priv->buffer_mutex);
|
|
}
|
|
@@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode, struct file *file)
|
|
}
|
|
|
|
priv->chip = chip;
|
|
- atomic_set(&priv->data_pending, 0);
|
|
mutex_init(&priv->buffer_mutex);
|
|
setup_timer(&priv->user_read_timer, user_reader_timeout,
|
|
(unsigned long)priv);
|
|
@@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *file, char __user *buf,
|
|
size_t size, loff_t *off)
|
|
{
|
|
struct file_priv *priv = file->private_data;
|
|
- ssize_t ret_size;
|
|
+ ssize_t ret_size = 0;
|
|
int rc;
|
|
|
|
del_singleshot_timer_sync(&priv->user_read_timer);
|
|
flush_work(&priv->work);
|
|
- ret_size = atomic_read(&priv->data_pending);
|
|
- if (ret_size > 0) { /* relay data */
|
|
- ssize_t orig_ret_size = ret_size;
|
|
- if (size < ret_size)
|
|
- ret_size = size;
|
|
+ mutex_lock(&priv->buffer_mutex);
|
|
|
|
- mutex_lock(&priv->buffer_mutex);
|
|
+ if (priv->data_pending) {
|
|
+ ret_size = min_t(ssize_t, size, priv->data_pending);
|
|
rc = copy_to_user(buf, priv->data_buffer, ret_size);
|
|
- memset(priv->data_buffer, 0, orig_ret_size);
|
|
+ memset(priv->data_buffer, 0, priv->data_pending);
|
|
if (rc)
|
|
ret_size = -EFAULT;
|
|
|
|
- mutex_unlock(&priv->buffer_mutex);
|
|
+ priv->data_pending = 0;
|
|
}
|
|
|
|
- atomic_set(&priv->data_pending, 0);
|
|
-
|
|
+ mutex_unlock(&priv->buffer_mutex);
|
|
return ret_size;
|
|
}
|
|
|
|
@@ -118,18 +113,20 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
|
|
size_t in_size = size;
|
|
ssize_t out_size;
|
|
|
|
- /* cannot perform a write until the read has cleared
|
|
- either via tpm_read or a user_read_timer timeout.
|
|
- This also prevents splitted buffered writes from blocking here.
|
|
- */
|
|
- if (atomic_read(&priv->data_pending) != 0)
|
|
- return -EBUSY;
|
|
-
|
|
if (in_size > TPM_BUFSIZE)
|
|
return -E2BIG;
|
|
|
|
mutex_lock(&priv->buffer_mutex);
|
|
|
|
+ /* Cannot perform a write until the read has cleared either via
|
|
+ * tpm_read or a user_read_timer timeout. This also prevents split
|
|
+ * buffered writes from blocking here.
|
|
+ */
|
|
+ if (priv->data_pending != 0) {
|
|
+ mutex_unlock(&priv->buffer_mutex);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
if (copy_from_user
|
|
(priv->data_buffer, (void __user *) buf, in_size)) {
|
|
mutex_unlock(&priv->buffer_mutex);
|
|
@@ -153,7 +150,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
|
|
return out_size;
|
|
}
|
|
|
|
- atomic_set(&priv->data_pending, out_size);
|
|
+ priv->data_pending = out_size;
|
|
mutex_unlock(&priv->buffer_mutex);
|
|
|
|
/* Set a timeout by which the reader must come claim the result */
|
|
@@ -172,7 +169,7 @@ static int tpm_release(struct inode *inode, struct file *file)
|
|
del_singleshot_timer_sync(&priv->user_read_timer);
|
|
flush_work(&priv->work);
|
|
file->private_data = NULL;
|
|
- atomic_set(&priv->data_pending, 0);
|
|
+ priv->data_pending = 0;
|
|
clear_bit(0, &priv->chip->is_open);
|
|
kfree(priv);
|
|
return 0;
|
|
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
|
|
index 6790ebb366dd..98fd9a594841 100644
|
|
--- a/drivers/infiniband/core/umem.c
|
|
+++ b/drivers/infiniband/core/umem.c
|
|
@@ -122,16 +122,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|
umem->address = addr;
|
|
umem->page_size = PAGE_SIZE;
|
|
umem->pid = get_task_pid(current, PIDTYPE_PID);
|
|
- /*
|
|
- * We ask for writable memory if any of the following
|
|
- * access flags are set. "Local write" and "remote write"
|
|
- * obviously require write access. "Remote atomic" can do
|
|
- * things like fetch and add, which will modify memory, and
|
|
- * "MW bind" can change permissions by binding a window.
|
|
- */
|
|
- umem->writable = !!(access &
|
|
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
|
|
- IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
|
|
+ umem->writable = ib_access_writable(access);
|
|
|
|
if (access & IB_ACCESS_ON_DEMAND) {
|
|
put_pid(umem->pid);
|
|
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
|
|
index ce87e9cc7eff..bf52e35dd506 100644
|
|
--- a/drivers/infiniband/hw/mlx4/mr.c
|
|
+++ b/drivers/infiniband/hw/mlx4/mr.c
|
|
@@ -130,6 +130,40 @@ out:
|
|
return err;
|
|
}
|
|
|
|
+static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
|
|
+ u64 length, u64 virt_addr,
|
|
+ int access_flags)
|
|
+{
|
|
+ /*
|
|
+ * Force registering the memory as writable if the underlying pages
|
|
+ * are writable. This is so rereg can change the access permissions
|
|
+ * from readable to writable without having to run through ib_umem_get
|
|
+ * again
|
|
+ */
|
|
+ if (!ib_access_writable(access_flags)) {
|
|
+ struct vm_area_struct *vma;
|
|
+
|
|
+ down_read(¤t->mm->mmap_sem);
|
|
+ /*
|
|
+ * FIXME: Ideally this would iterate over all the vmas that
|
|
+ * cover the memory, but for now it requires a single vma to
|
|
+ * entirely cover the MR to support RO mappings.
|
|
+ */
|
|
+ vma = find_vma(current->mm, start);
|
|
+ if (vma && vma->vm_end >= start + length &&
|
|
+ vma->vm_start <= start) {
|
|
+ if (vma->vm_flags & VM_WRITE)
|
|
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
|
|
+ } else {
|
|
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
|
|
+ }
|
|
+
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
+ }
|
|
+
|
|
+ return ib_umem_get(context, start, length, access_flags, 0);
|
|
+}
|
|
+
|
|
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|
u64 virt_addr, int access_flags,
|
|
struct ib_udata *udata)
|
|
@@ -144,10 +178,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|
if (!mr)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
- /* Force registering the memory as writable. */
|
|
- /* Used for memory re-registeration. HCA protects the access */
|
|
- mr->umem = ib_umem_get(pd->uobject->context, start, length,
|
|
- access_flags | IB_ACCESS_LOCAL_WRITE, 0);
|
|
+ mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
|
|
+ virt_addr, access_flags);
|
|
if (IS_ERR(mr->umem)) {
|
|
err = PTR_ERR(mr->umem);
|
|
goto err_free;
|
|
@@ -214,6 +246,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
|
}
|
|
|
|
if (flags & IB_MR_REREG_ACCESS) {
|
|
+ if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
|
|
+ return -EPERM;
|
|
+
|
|
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
|
|
convert_access(mr_access_flags));
|
|
|
|
@@ -227,10 +262,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
|
|
|
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
|
|
ib_umem_release(mmr->umem);
|
|
- mmr->umem = ib_umem_get(mr->uobject->context, start, length,
|
|
- mr_access_flags |
|
|
- IB_ACCESS_LOCAL_WRITE,
|
|
- 0);
|
|
+ mmr->umem =
|
|
+ mlx4_get_umem_mr(mr->uobject->context, start, length,
|
|
+ virt_addr, mr_access_flags);
|
|
if (IS_ERR(mmr->umem)) {
|
|
err = PTR_ERR(mmr->umem);
|
|
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
|
|
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
|
|
index 748b63b86cbc..40242ead096f 100644
|
|
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
|
|
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
|
|
@@ -643,7 +643,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
|
|
struct ocrdma_stats *pstats = filp->private_data;
|
|
struct ocrdma_dev *dev = pstats->dev;
|
|
|
|
- if (count > 32)
|
|
+ if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
|
|
goto err;
|
|
|
|
if (copy_from_user(tmp_str, buffer, count))
|
|
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
|
|
index bec9f099573b..68d0a5c9d437 100644
|
|
--- a/drivers/net/xen-netfront.c
|
|
+++ b/drivers/net/xen-netfront.c
|
|
@@ -879,7 +879,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|
struct sk_buff *skb,
|
|
struct sk_buff_head *list)
|
|
{
|
|
- struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
RING_IDX cons = queue->rx.rsp_cons;
|
|
struct sk_buff *nskb;
|
|
|
|
@@ -888,15 +887,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|
RING_GET_RESPONSE(&queue->rx, ++cons);
|
|
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
|
|
|
- if (shinfo->nr_frags == MAX_SKB_FRAGS) {
|
|
+ if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
|
|
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
|
|
|
BUG_ON(pull_to <= skb_headlen(skb));
|
|
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
|
}
|
|
- BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
|
|
+ BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
|
|
|
|
- skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
|
|
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
|
+ skb_frag_page(nfrag),
|
|
rx->offset, rx->status, PAGE_SIZE);
|
|
|
|
skb_shinfo(nskb)->nr_frags = 0;
|
|
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
|
|
index de53c9694b68..5dc288fecace 100644
|
|
--- a/drivers/scsi/sr.c
|
|
+++ b/drivers/scsi/sr.c
|
|
@@ -520,18 +520,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
|
|
static int sr_block_open(struct block_device *bdev, fmode_t mode)
|
|
{
|
|
struct scsi_cd *cd;
|
|
+ struct scsi_device *sdev;
|
|
int ret = -ENXIO;
|
|
|
|
+ cd = scsi_cd_get(bdev->bd_disk);
|
|
+ if (!cd)
|
|
+ goto out;
|
|
+
|
|
+ sdev = cd->device;
|
|
+ scsi_autopm_get_device(sdev);
|
|
check_disk_change(bdev);
|
|
|
|
mutex_lock(&sr_mutex);
|
|
- cd = scsi_cd_get(bdev->bd_disk);
|
|
- if (cd) {
|
|
- ret = cdrom_open(&cd->cdi, bdev, mode);
|
|
- if (ret)
|
|
- scsi_cd_put(cd);
|
|
- }
|
|
+ ret = cdrom_open(&cd->cdi, bdev, mode);
|
|
mutex_unlock(&sr_mutex);
|
|
+
|
|
+ scsi_autopm_put_device(sdev);
|
|
+ if (ret)
|
|
+ scsi_cd_put(cd);
|
|
+
|
|
+out:
|
|
return ret;
|
|
}
|
|
|
|
@@ -559,6 +567,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|
if (ret)
|
|
goto out;
|
|
|
|
+ scsi_autopm_get_device(sdev);
|
|
+
|
|
/*
|
|
* Send SCSI addressing ioctls directly to mid level, send other
|
|
* ioctls to cdrom/block level.
|
|
@@ -567,15 +577,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|
case SCSI_IOCTL_GET_IDLUN:
|
|
case SCSI_IOCTL_GET_BUS_NUMBER:
|
|
ret = scsi_ioctl(sdev, cmd, argp);
|
|
- goto out;
|
|
+ goto put;
|
|
}
|
|
|
|
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
|
|
if (ret != -ENOSYS)
|
|
- goto out;
|
|
+ goto put;
|
|
|
|
ret = scsi_ioctl(sdev, cmd, argp);
|
|
|
|
+put:
|
|
+ scsi_autopm_put_device(sdev);
|
|
+
|
|
out:
|
|
mutex_unlock(&sr_mutex);
|
|
return ret;
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index 250c1222e30c..807efaab838e 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -1954,10 +1954,12 @@ struct dentry *d_make_root(struct inode *root_inode)
|
|
static const struct qstr name = QSTR_INIT("/", 1);
|
|
|
|
res = __d_alloc(root_inode->i_sb, &name);
|
|
- if (res)
|
|
+ if (res) {
|
|
+ res->d_flags |= DCACHE_RCUACCESS;
|
|
d_instantiate(res, root_inode);
|
|
- else
|
|
+ } else {
|
|
iput(root_inode);
|
|
+ }
|
|
}
|
|
return res;
|
|
}
|
|
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
|
|
index 041117fd8fd7..0963213e9cd3 100644
|
|
--- a/fs/ext4/ialloc.c
|
|
+++ b/fs/ext4/ialloc.c
|
|
@@ -1308,7 +1308,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
|
|
ext4_itable_unused_count(sb, gdp)),
|
|
sbi->s_inodes_per_block);
|
|
|
|
- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
|
|
+ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
|
|
+ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
|
|
+ ext4_itable_unused_count(sb, gdp)) <
|
|
+ EXT4_FIRST_INO(sb)))) {
|
|
ext4_error(sb, "Something is wrong with group %u: "
|
|
"used itable blocks: %d; "
|
|
"itable unused count: %u",
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index 3e4d8ac1974e..8d18f6142da5 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -2875,14 +2875,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
|
|
if (!gdp)
|
|
continue;
|
|
|
|
- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
|
|
- continue;
|
|
- if (group != 0)
|
|
+ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
|
|
break;
|
|
- ext4_error(sb, "Inode table for bg 0 marked as "
|
|
- "needing zeroing");
|
|
- if (sb->s_flags & MS_RDONLY)
|
|
- return ngroups;
|
|
}
|
|
|
|
return group;
|
|
diff --git a/fs/namespace.c b/fs/namespace.c
|
|
index a879560ea144..b56b50e3da11 100644
|
|
--- a/fs/namespace.c
|
|
+++ b/fs/namespace.c
|
|
@@ -603,12 +603,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
|
|
return 0;
|
|
mnt = real_mount(bastard);
|
|
mnt_add_count(mnt, 1);
|
|
+ smp_mb(); // see mntput_no_expire()
|
|
if (likely(!read_seqretry(&mount_lock, seq)))
|
|
return 0;
|
|
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
|
|
mnt_add_count(mnt, -1);
|
|
return 1;
|
|
}
|
|
+ lock_mount_hash();
|
|
+ if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
|
|
+ mnt_add_count(mnt, -1);
|
|
+ unlock_mount_hash();
|
|
+ return 1;
|
|
+ }
|
|
+ unlock_mount_hash();
|
|
+ /* caller will mntput() */
|
|
return -1;
|
|
}
|
|
|
|
@@ -1124,12 +1133,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
|
|
static void mntput_no_expire(struct mount *mnt)
|
|
{
|
|
rcu_read_lock();
|
|
- mnt_add_count(mnt, -1);
|
|
- if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
|
|
+ if (likely(READ_ONCE(mnt->mnt_ns))) {
|
|
+ /*
|
|
+ * Since we don't do lock_mount_hash() here,
|
|
+ * ->mnt_ns can change under us. However, if it's
|
|
+ * non-NULL, then there's a reference that won't
|
|
+ * be dropped until after an RCU delay done after
|
|
+ * turning ->mnt_ns NULL. So if we observe it
|
|
+ * non-NULL under rcu_read_lock(), the reference
|
|
+ * we are dropping is not the final one.
|
|
+ */
|
|
+ mnt_add_count(mnt, -1);
|
|
rcu_read_unlock();
|
|
return;
|
|
}
|
|
lock_mount_hash();
|
|
+ /*
|
|
+ * make sure that if __legitimize_mnt() has not seen us grab
|
|
+ * mount_lock, we'll see their refcount increment here.
|
|
+ */
|
|
+ smp_mb();
|
|
+ mnt_add_count(mnt, -1);
|
|
if (mnt_get_count(mnt)) {
|
|
rcu_read_unlock();
|
|
unlock_mount_hash();
|
|
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
|
|
index 25b793325b09..dabecb661264 100644
|
|
--- a/include/asm-generic/pgtable.h
|
|
+++ b/include/asm-generic/pgtable.h
|
|
@@ -799,6 +799,18 @@ static inline int pmd_free_pte_page(pmd_t *pmd)
|
|
}
|
|
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
|
|
|
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
|
|
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
|
+{
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static inline bool arch_has_pfn_modify_check(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
|
|
+
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#ifndef io_remap_pfn_range
|
|
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
|
|
index 2f9d12022100..063c73ed6d78 100644
|
|
--- a/include/linux/cpu.h
|
|
+++ b/include/linux/cpu.h
|
|
@@ -48,6 +48,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
+extern ssize_t cpu_show_l1tf(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf);
|
|
|
|
extern __printf(4, 5)
|
|
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
|
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
|
index a100946607a5..1f4366567e7d 100644
|
|
--- a/include/linux/mm.h
|
|
+++ b/include/linux/mm.h
|
|
@@ -2083,6 +2083,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
|
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
|
|
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|
unsigned long pfn);
|
|
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
|
+ unsigned long pfn, pgprot_t pgprot);
|
|
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
|
unsigned long pfn);
|
|
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
|
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
|
|
index 388293a91e8c..e4594de79bc4 100644
|
|
--- a/include/linux/swapfile.h
|
|
+++ b/include/linux/swapfile.h
|
|
@@ -9,5 +9,7 @@ extern spinlock_t swap_lock;
|
|
extern struct plist_head swap_active_head;
|
|
extern struct swap_info_struct *swap_info[];
|
|
extern int try_to_unuse(unsigned int, bool, unsigned long);
|
|
+extern unsigned long generic_max_swapfile_size(void);
|
|
+extern unsigned long max_swapfile_size(void);
|
|
|
|
#endif /* _LINUX_SWAPFILE_H */
|
|
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
|
|
index ff307b548ed3..646891f3bc1e 100644
|
|
--- a/include/linux/thread_info.h
|
|
+++ b/include/linux/thread_info.h
|
|
@@ -55,11 +55,7 @@ extern long do_no_restart_syscall(struct restart_block *parm);
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
-#ifdef CONFIG_DEBUG_STACK_USAGE
|
|
-# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
|
-#else
|
|
-# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK)
|
|
-#endif
|
|
+#define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
|
|
|
/*
|
|
* flag set/clear/test wrappers
|
|
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
|
|
index 120da1d7f57e..10fefb0dc640 100644
|
|
--- a/include/rdma/ib_verbs.h
|
|
+++ b/include/rdma/ib_verbs.h
|
|
@@ -3007,6 +3007,20 @@ static inline int ib_check_mr_access(int flags)
|
|
return 0;
|
|
}
|
|
|
|
+static inline bool ib_access_writable(int access_flags)
|
|
+{
|
|
+ /*
|
|
+ * We have writable memory backing the MR if any of the following
|
|
+ * access flags are set. "Local write" and "remote write" obviously
|
|
+ * require write access. "Remote atomic" can do things like fetch and
|
|
+ * add, which will modify memory, and "MW bind" can change permissions
|
|
+ * by binding a window.
|
|
+ */
|
|
+ return access_flags &
|
|
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
|
|
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
|
|
+}
|
|
+
|
|
/**
|
|
* ib_check_mr_status: lightweight check of MR status.
|
|
* This routine may provide status checks on a selected
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index 177cb7d111a9..d5bb1465d30c 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -1604,9 +1604,30 @@ out:
|
|
*/
|
|
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|
unsigned long pfn)
|
|
+{
|
|
+ return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
|
|
+}
|
|
+EXPORT_SYMBOL(vm_insert_pfn);
|
|
+
|
|
+/**
|
|
+ * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
|
|
+ * @vma: user vma to map to
|
|
+ * @addr: target user address of this page
|
|
+ * @pfn: source kernel pfn
|
|
+ * @pgprot: pgprot flags for the inserted page
|
|
+ *
|
|
+ * This is exactly like vm_insert_pfn, except that it allows drivers to
|
|
+ * to override pgprot on a per-page basis.
|
|
+ *
|
|
+ * This only makes sense for IO mappings, and it makes no sense for
|
|
+ * cow mappings. In general, using multiple vmas is preferable;
|
|
+ * vm_insert_pfn_prot should only be used if using multiple VMAs is
|
|
+ * impractical.
|
|
+ */
|
|
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
|
+ unsigned long pfn, pgprot_t pgprot)
|
|
{
|
|
int ret;
|
|
- pgprot_t pgprot = vma->vm_page_prot;
|
|
/*
|
|
* Technically, architectures with pte_special can avoid all these
|
|
* restrictions (same for remap_pfn_range). However we would like
|
|
@@ -1624,19 +1645,29 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|
if (track_pfn_insert(vma, &pgprot, pfn))
|
|
return -EINVAL;
|
|
|
|
+ if (!pfn_modify_allowed(pfn, pgprot))
|
|
+ return -EACCES;
|
|
+
|
|
ret = insert_pfn(vma, addr, pfn, pgprot);
|
|
|
|
return ret;
|
|
}
|
|
-EXPORT_SYMBOL(vm_insert_pfn);
|
|
+EXPORT_SYMBOL(vm_insert_pfn_prot);
|
|
|
|
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
|
unsigned long pfn)
|
|
{
|
|
+ pgprot_t pgprot = vma->vm_page_prot;
|
|
+
|
|
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
|
|
|
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
|
return -EFAULT;
|
|
+ if (track_pfn_insert(vma, &pgprot, pfn))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (!pfn_modify_allowed(pfn, pgprot))
|
|
+ return -EACCES;
|
|
|
|
/*
|
|
* If we don't have pte special, then we have to use the pfn_valid()
|
|
@@ -1649,9 +1680,9 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
|
struct page *page;
|
|
|
|
page = pfn_to_page(pfn);
|
|
- return insert_page(vma, addr, page, vma->vm_page_prot);
|
|
+ return insert_page(vma, addr, page, pgprot);
|
|
}
|
|
- return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
|
+ return insert_pfn(vma, addr, pfn, pgprot);
|
|
}
|
|
EXPORT_SYMBOL(vm_insert_mixed);
|
|
|
|
@@ -1666,6 +1697,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
{
|
|
pte_t *pte;
|
|
spinlock_t *ptl;
|
|
+ int err = 0;
|
|
|
|
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
|
if (!pte)
|
|
@@ -1673,12 +1705,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
arch_enter_lazy_mmu_mode();
|
|
do {
|
|
BUG_ON(!pte_none(*pte));
|
|
+ if (!pfn_modify_allowed(pfn, prot)) {
|
|
+ err = -EACCES;
|
|
+ break;
|
|
+ }
|
|
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
|
|
pfn++;
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
arch_leave_lazy_mmu_mode();
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
- return 0;
|
|
+ return err;
|
|
}
|
|
|
|
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
@@ -1687,6 +1723,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
{
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
+ int err;
|
|
|
|
pfn -= addr >> PAGE_SHIFT;
|
|
pmd = pmd_alloc(mm, pud, addr);
|
|
@@ -1695,9 +1732,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
VM_BUG_ON(pmd_trans_huge(*pmd));
|
|
do {
|
|
next = pmd_addr_end(addr, end);
|
|
- if (remap_pte_range(mm, pmd, addr, next,
|
|
- pfn + (addr >> PAGE_SHIFT), prot))
|
|
- return -ENOMEM;
|
|
+ err = remap_pte_range(mm, pmd, addr, next,
|
|
+ pfn + (addr >> PAGE_SHIFT), prot);
|
|
+ if (err)
|
|
+ return err;
|
|
} while (pmd++, addr = next, addr != end);
|
|
return 0;
|
|
}
|
|
@@ -1708,6 +1746,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
{
|
|
pud_t *pud;
|
|
unsigned long next;
|
|
+ int err;
|
|
|
|
pfn -= addr >> PAGE_SHIFT;
|
|
pud = pud_alloc(mm, pgd, addr);
|
|
@@ -1715,9 +1754,10 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
return -ENOMEM;
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
- if (remap_pmd_range(mm, pud, addr, next,
|
|
- pfn + (addr >> PAGE_SHIFT), prot))
|
|
- return -ENOMEM;
|
|
+ err = remap_pmd_range(mm, pud, addr, next,
|
|
+ pfn + (addr >> PAGE_SHIFT), prot);
|
|
+ if (err)
|
|
+ return err;
|
|
} while (pud++, addr = next, addr != end);
|
|
return 0;
|
|
}
|
|
diff --git a/mm/mprotect.c b/mm/mprotect.c
|
|
index c0b4b2a49462..a277f3412a5d 100644
|
|
--- a/mm/mprotect.c
|
|
+++ b/mm/mprotect.c
|
|
@@ -255,6 +255,42 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
|
|
return pages;
|
|
}
|
|
|
|
+static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
|
|
+ unsigned long next, struct mm_walk *walk)
|
|
+{
|
|
+ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
|
|
+ 0 : -EACCES;
|
|
+}
|
|
+
|
|
+static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
|
+ unsigned long addr, unsigned long next,
|
|
+ struct mm_walk *walk)
|
|
+{
|
|
+ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
|
|
+ 0 : -EACCES;
|
|
+}
|
|
+
|
|
+static int prot_none_test(unsigned long addr, unsigned long next,
|
|
+ struct mm_walk *walk)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
|
|
+ unsigned long end, unsigned long newflags)
|
|
+{
|
|
+ pgprot_t new_pgprot = vm_get_page_prot(newflags);
|
|
+ struct mm_walk prot_none_walk = {
|
|
+ .pte_entry = prot_none_pte_entry,
|
|
+ .hugetlb_entry = prot_none_hugetlb_entry,
|
|
+ .test_walk = prot_none_test,
|
|
+ .mm = current->mm,
|
|
+ .private = &new_pgprot,
|
|
+ };
|
|
+
|
|
+ return walk_page_range(start, end, &prot_none_walk);
|
|
+}
|
|
+
|
|
int
|
|
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|
unsigned long start, unsigned long end, unsigned long newflags)
|
|
@@ -272,6 +308,19 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|
return 0;
|
|
}
|
|
|
|
+ /*
|
|
+ * Do PROT_NONE PFN permission checks here when we can still
|
|
+ * bail out without undoing a lot of state. This is a rather
|
|
+ * uncommon case, so doesn't need to be very optimized.
|
|
+ */
|
|
+ if (arch_has_pfn_modify_check() &&
|
|
+ (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
|
|
+ (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
|
|
+ error = prot_none_walk(vma, start, end, newflags);
|
|
+ if (error)
|
|
+ return error;
|
|
+ }
|
|
+
|
|
/*
|
|
* If we make a private mapping writable we increase our commit;
|
|
* but (without finer accounting) cannot reduce our commit if we
|
|
diff --git a/mm/swapfile.c b/mm/swapfile.c
|
|
index 674bf177ce44..8e25ff2b693a 100644
|
|
--- a/mm/swapfile.c
|
|
+++ b/mm/swapfile.c
|
|
@@ -2206,6 +2206,35 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
|
|
return 0;
|
|
}
|
|
|
|
+
|
|
+/*
|
|
+ * Find out how many pages are allowed for a single swap device. There
|
|
+ * are two limiting factors:
|
|
+ * 1) the number of bits for the swap offset in the swp_entry_t type, and
|
|
+ * 2) the number of bits in the swap pte, as defined by the different
|
|
+ * architectures.
|
|
+ *
|
|
+ * In order to find the largest possible bit mask, a swap entry with
|
|
+ * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
|
|
+ * decoded to a swp_entry_t again, and finally the swap offset is
|
|
+ * extracted.
|
|
+ *
|
|
+ * This will mask all the bits from the initial ~0UL mask that can't
|
|
+ * be encoded in either the swp_entry_t or the architecture definition
|
|
+ * of a swap pte.
|
|
+ */
|
|
+unsigned long generic_max_swapfile_size(void)
|
|
+{
|
|
+ return swp_offset(pte_to_swp_entry(
|
|
+ swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
|
|
+}
|
|
+
|
|
+/* Can be overridden by an architecture for additional checks. */
|
|
+__weak unsigned long max_swapfile_size(void)
|
|
+{
|
|
+ return generic_max_swapfile_size();
|
|
+}
|
|
+
|
|
static unsigned long read_swap_header(struct swap_info_struct *p,
|
|
union swap_header *swap_header,
|
|
struct inode *inode)
|
|
@@ -2241,22 +2270,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
|
|
p->cluster_next = 1;
|
|
p->cluster_nr = 0;
|
|
|
|
- /*
|
|
- * Find out how many pages are allowed for a single swap
|
|
- * device. There are two limiting factors: 1) the number
|
|
- * of bits for the swap offset in the swp_entry_t type, and
|
|
- * 2) the number of bits in the swap pte as defined by the
|
|
- * different architectures. In order to find the
|
|
- * largest possible bit mask, a swap entry with swap type 0
|
|
- * and swap offset ~0UL is created, encoded to a swap pte,
|
|
- * decoded to a swp_entry_t again, and finally the swap
|
|
- * offset is extracted. This will mask all the bits from
|
|
- * the initial ~0UL mask that can't be encoded in either
|
|
- * the swp_entry_t or the architecture definition of a
|
|
- * swap pte.
|
|
- */
|
|
- maxpages = swp_offset(pte_to_swp_entry(
|
|
- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
|
|
+ maxpages = max_swapfile_size();
|
|
last_page = swap_header->info.last_page;
|
|
if (!last_page) {
|
|
pr_warn("Empty swap-file\n");
|
|
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
|
|
index 93581bba8643..09d6c4a6b53d 100644
|
|
--- a/net/ipv4/Kconfig
|
|
+++ b/net/ipv4/Kconfig
|
|
@@ -354,6 +354,7 @@ config INET_ESP
|
|
select CRYPTO_CBC
|
|
select CRYPTO_SHA1
|
|
select CRYPTO_DES
|
|
+ select CRYPTO_ECHAINIV
|
|
---help---
|
|
Support for IPsec ESP.
|
|
|
|
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
|
|
index 851d5c9e3ecc..0f50248bad17 100644
|
|
--- a/net/ipv6/Kconfig
|
|
+++ b/net/ipv6/Kconfig
|
|
@@ -69,6 +69,7 @@ config INET6_ESP
|
|
select CRYPTO_CBC
|
|
select CRYPTO_SHA1
|
|
select CRYPTO_DES
|
|
+ select CRYPTO_ECHAINIV
|
|
---help---
|
|
Support for IPsec ESP.
|
|
|