mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 23:32:14 +00:00
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 pti updates from Ingo Molnar: "The main changes: - Make the IBPB barrier more strict and add STIBP support (Jiri Kosina) - Micro-optimize and clean up the entry code (Andy Lutomirski) - ... plus misc other fixes" * 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/speculation: Propagate information about RSB filling mitigation to sysfs x86/speculation: Enable cross-hyperthread spectre v2 STIBP mitigation x86/speculation: Apply IBPB more strictly to avoid cross-process data leak x86/speculation: Add RETPOLINE_AMD support to the inline asm CALL_NOSPEC variant x86/CPU: Fix unused variable warning when !CONFIG_IA32_EMULATION x86/pti/64: Remove the SYSCALL64 entry trampoline x86/entry/64: Use the TSS sp2 slot for SYSCALL/SYSRET scratch space x86/entry/64: Document idtentry
This commit is contained in:
commit
d82924c3b8
19 changed files with 222 additions and 176 deletions
|
@ -96,13 +96,12 @@ void common(void) {
|
|||
OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
|
||||
|
||||
/* Layout info for cpu_entry_area */
|
||||
OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
|
||||
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
|
||||
OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
|
||||
DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
|
||||
DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));
|
||||
|
||||
/* Offset for sp0 and sp1 into the tss_struct */
|
||||
/* Offset for fields in tss_struct */
|
||||
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
|
||||
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
|
||||
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
|
||||
}
|
||||
|
|
|
@ -35,12 +35,10 @@ static void __init spectre_v2_select_mitigation(void);
|
|||
static void __init ssb_select_mitigation(void);
|
||||
static void __init l1tf_select_mitigation(void);
|
||||
|
||||
/*
|
||||
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
||||
* writes to SPEC_CTRL contain whatever reserved bits have been set.
|
||||
*/
|
||||
u64 __ro_after_init x86_spec_ctrl_base;
|
||||
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
|
||||
u64 x86_spec_ctrl_base;
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
/*
|
||||
* The vendor and possibly platform specific bits which can be modified in
|
||||
|
@ -326,6 +324,46 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
|||
return cmd;
|
||||
}
|
||||
|
||||
static bool stibp_needed(void)
|
||||
{
|
||||
if (spectre_v2_enabled == SPECTRE_V2_NONE)
|
||||
return false;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_STIBP))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void update_stibp_msr(void *info)
|
||||
{
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
}
|
||||
|
||||
void arch_smt_update(void)
|
||||
{
|
||||
u64 mask;
|
||||
|
||||
if (!stibp_needed())
|
||||
return;
|
||||
|
||||
mutex_lock(&spec_ctrl_mutex);
|
||||
mask = x86_spec_ctrl_base;
|
||||
if (cpu_smt_control == CPU_SMT_ENABLED)
|
||||
mask |= SPEC_CTRL_STIBP;
|
||||
else
|
||||
mask &= ~SPEC_CTRL_STIBP;
|
||||
|
||||
if (mask != x86_spec_ctrl_base) {
|
||||
pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
|
||||
cpu_smt_control == CPU_SMT_ENABLED ?
|
||||
"Enabling" : "Disabling");
|
||||
x86_spec_ctrl_base = mask;
|
||||
on_each_cpu(update_stibp_msr, NULL, 1);
|
||||
}
|
||||
mutex_unlock(&spec_ctrl_mutex);
|
||||
}
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||
|
@ -426,6 +464,9 @@ specv2_set_mode:
|
|||
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
|
||||
pr_info("Enabling Restricted Speculation for firmware calls\n");
|
||||
}
|
||||
|
||||
/* Enable STIBP if appropriate */
|
||||
arch_smt_update();
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
|
@ -816,6 +857,8 @@ static ssize_t l1tf_show_state(char *buf)
|
|||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!boot_cpu_has_bug(bug))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
|
@ -833,10 +876,13 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
|
||||
case X86_BUG_SPECTRE_V2:
|
||||
return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
||||
(x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
|
||||
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
|
||||
spectre_v2_module_string());
|
||||
return ret;
|
||||
|
||||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
||||
|
|
|
@ -1534,19 +1534,8 @@ EXPORT_PER_CPU_SYMBOL(__preempt_count);
|
|||
/* May not be marked __init: used by software suspend */
|
||||
void syscall_init(void)
|
||||
{
|
||||
extern char _entry_trampoline[];
|
||||
extern char entry_SYSCALL_64_trampoline[];
|
||||
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long SYSCALL64_entry_trampoline =
|
||||
(unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
|
||||
(entry_SYSCALL_64_trampoline - _entry_trampoline);
|
||||
|
||||
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
|
||||
if (static_cpu_has(X86_FEATURE_PTI))
|
||||
wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
|
||||
else
|
||||
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
|
||||
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
|
||||
|
@ -1557,7 +1546,8 @@ void syscall_init(void)
|
|||
* AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
|
||||
*/
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
|
||||
(unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
|
||||
#else
|
||||
wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
|
||||
|
|
|
@ -1028,18 +1028,10 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
|
|||
|
||||
bool arch_within_kprobe_blacklist(unsigned long addr)
|
||||
{
|
||||
bool is_in_entry_trampoline_section = false;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
is_in_entry_trampoline_section =
|
||||
(addr >= (unsigned long)__entry_trampoline_start &&
|
||||
addr < (unsigned long)__entry_trampoline_end);
|
||||
#endif
|
||||
return (addr >= (unsigned long)__kprobes_text_start &&
|
||||
addr < (unsigned long)__kprobes_text_end) ||
|
||||
(addr >= (unsigned long)__entry_text_start &&
|
||||
addr < (unsigned long)__entry_text_end) ||
|
||||
is_in_entry_trampoline_section;
|
||||
addr < (unsigned long)__entry_text_end);
|
||||
}
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
|
|
|
@ -60,8 +60,6 @@
|
|||
#include <asm/unistd_32_ia32.h>
|
||||
#endif
|
||||
|
||||
__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
|
||||
|
||||
/* Prints also some state that isn't saved in the pt_regs */
|
||||
void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
|
||||
{
|
||||
|
|
|
@ -383,6 +383,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
|
|||
* we won't enable interupts or schedule before we invoke
|
||||
* general_protection, so nothing will clobber the stack
|
||||
* frame we just set up.
|
||||
*
|
||||
* We will enter general_protection with kernel GSBASE,
|
||||
* which is what the stub expects, given that the faulting
|
||||
* RIP will be the IRET instruction.
|
||||
*/
|
||||
regs->ip = (unsigned long)general_protection;
|
||||
regs->sp = (unsigned long)&gpregs->orig_ax;
|
||||
|
|
|
@ -136,16 +136,6 @@ SECTIONS
|
|||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__entry_trampoline_start = .;
|
||||
_entry_trampoline = .;
|
||||
*(.entry_trampoline)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__entry_trampoline_end = .;
|
||||
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
__indirect_thunk_start = .;
|
||||
*(.text.__x86.indirect_thunk)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue