mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-08 07:38:10 +00:00
The JIT compiler emits ia32 bit instructions. Currently, It supports eBPF only. Classic BPF is supported because of the conversion by BPF core. Almost all instructions from eBPF ISA supported except the following: BPF_ALU64 | BPF_DIV | BPF_K BPF_ALU64 | BPF_DIV | BPF_X BPF_ALU64 | BPF_MOD | BPF_K BPF_ALU64 | BPF_MOD | BPF_X BPF_STX | BPF_XADD | BPF_W BPF_STX | BPF_XADD | BPF_DW It doesn't support BPF_JMP|BPF_CALL with BPF_PSEUDO_CALL at the moment. IA32 has few general purpose registers, EAX|EDX|ECX|EBX|ESI|EDI. I use EAX|EDX|ECX|EBX as temporary registers to simulate instructions in eBPF ISA, and allocate ESI|EDI to BPF_REG_AX for constant blinding, all others eBPF registers, R0-R10, are simulated through scratch space on stack. The reasons behind the hardware registers allocation policy are: 1:MUL need EAX:EDX, shift operation need ECX, so they aren't fit for general eBPF 64bit register simulation. 2:We need at least 4 registers to simulate most eBPF ISA operations on registers operands instead of on register&memory operands. 3:We need to put BPF_REG_AX on hardware registers, or constant blinding will degrade jit performance heavily. Tested on PC (Intel(R) Core(TM) i5-5200U CPU). Testing results on i5-5200U: 1) test_bpf: Summary: 349 PASSED, 0 FAILED, [319/341 JIT'ed] 2) test_progs: Summary: 83 PASSED, 0 FAILED. 3) test_lpm: OK 4) test_lru_map: OK 5) test_verifier: Summary: 828 PASSED, 0 FAILED. Above tests are all done in following two conditions separately: 1:bpf_jit_enable=1 and bpf_jit_harden=0 2:bpf_jit_enable=1 and bpf_jit_harden=2 Below are some numbers for this jit implementation: Note: I run test_progs in kselftest 100 times continuously for every condition, the numbers are in format: total/times=avg. The numbers that test_bpf reports show almost the same relation. a:jit_enable=0 and jit_harden=0 b:jit_enable=1 and jit_harden=0 test_pkt_access:PASS:ipv4:15622/100=156 test_pkt_access:PASS:ipv4:10674/100=106 test_pkt_access:PASS:ipv6:9130/100=91 test_pkt_access:PASS:ipv6:4855/100=48 test_xdp:PASS:ipv4:240198/100=2401 test_xdp:PASS:ipv4:138912/100=1389 test_xdp:PASS:ipv6:137326/100=1373 test_xdp:PASS:ipv6:68542/100=685 test_l4lb:PASS:ipv4:61100/100=611 test_l4lb:PASS:ipv4:37302/100=373 test_l4lb:PASS:ipv6:101000/100=1010 test_l4lb:PASS:ipv6:55030/100=550 c:jit_enable=1 and jit_harden=2 test_pkt_access:PASS:ipv4:10558/100=105 test_pkt_access:PASS:ipv6:5092/100=50 test_xdp:PASS:ipv4:131902/100=1319 test_xdp:PASS:ipv6:77932/100=779 test_l4lb:PASS:ipv4:38924/100=389 test_l4lb:PASS:ipv6:57520/100=575 The numbers show we get 30%~50% improvement. See Documentation/networking/filter.txt for more information. Changelog: Changes v5-v6: 1:Add do {} while (0) to RETPOLINE_RAX_BPF_JIT for consistence reason. 2:Clean up non-standard comments, reported by Daniel Borkmann. 3:Fix a memory leak issue, repoted by Daniel Borkmann. Changes v4-v5: 1:Delete is_on_stack, BPF_REG_AX is the only one on real hardware registers, so just check with it. 2:Apply commit1612a981b7
("bpf, x64: fix JIT emission for dead code"), suggested by Daniel Borkmann. Changes v3-v4: 1:Fix changelog in commit. I install llvm-6.0, then test_progs willn't report errors. I submit another patch: "bpf: fix misaligned access for BPF_PROG_TYPE_PERF_EVENT program type on x86_32 platform" to fix another problem, after that patch, test_verifier willn't report errors too. 2:Fix clear r0[1] twice unnecessarily in *BPF_IND|BPF_ABS* simulation. Changes v2-v3: 1:Move BPF_REG_AX to real hardware registers for performance reason. 3:Using bpf_load_pointer instead of bpf_jit32.S, suggested by Daniel Borkmann. 4:Delete partial codes in1c2a088a66
, suggested by Daniel Borkmann. 5:Some bug fixes and comments improvement. Changes v1-v2: 1:Fix bug in emit_ia32_neg64. 2:Fix bug in emit_ia32_arsh_r64. 3:Delete filename in top level comment, suggested by Thomas Gleixner. 4:Delete unnecessary boiler plate text, suggested by Thomas Gleixner. 5:Rewrite some words in changelog. 6:CodingSytle improvement and a little more comments. Signed-off-by: Wang YanQing <udknight@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
342 lines
9.2 KiB
C
342 lines
9.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _ASM_X86_NOSPEC_BRANCH_H_
|
|
#define _ASM_X86_NOSPEC_BRANCH_H_
|
|
|
|
#include <asm/alternative.h>
|
|
#include <asm/alternative-asm.h>
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/msr-index.h>
|
|
|
|
/*
|
|
* Fill the CPU return stack buffer.
|
|
*
|
|
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
|
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
|
|
*
|
|
* This is required in various cases for retpoline and IBRS-based
|
|
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
|
* eliminate potentially bogus entries from the RSB, and sometimes
|
|
* purely to ensure that it doesn't get empty, which on some CPUs would
|
|
* allow predictions from other (unwanted!) sources to be used.
|
|
*
|
|
* We define a CPP macro such that it can be used from both .S files and
|
|
* inline assembly. It's possible to do a .macro and then include that
|
|
* from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
|
|
*/
|
|
|
|
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
|
|
#define RSB_FILL_LOOPS 16 /* To avoid underflow */
|
|
|
|
/*
|
|
* Google experimented with loop-unrolling and this turned out to be
|
|
* the optimal version — two calls, each with their own speculation
|
|
* trap should their return address end up getting used, in a loop.
|
|
*/
|
|
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
|
mov $(nr/2), reg; \
|
|
771: \
|
|
call 772f; \
|
|
773: /* speculation trap */ \
|
|
pause; \
|
|
lfence; \
|
|
jmp 773b; \
|
|
772: \
|
|
call 774f; \
|
|
775: /* speculation trap */ \
|
|
pause; \
|
|
lfence; \
|
|
jmp 775b; \
|
|
774: \
|
|
dec reg; \
|
|
jnz 771b; \
|
|
add $(BITS_PER_LONG/8) * nr, sp;
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
/*
|
|
* This should be used immediately before a retpoline alternative. It tells
|
|
* objtool where the retpolines are so that it can make sense of the control
|
|
* flow by just reading the original instruction(s) and ignoring the
|
|
* alternatives.
|
|
*/
|
|
.macro ANNOTATE_NOSPEC_ALTERNATIVE
|
|
.Lannotate_\@:
|
|
.pushsection .discard.nospec
|
|
.long .Lannotate_\@ - .
|
|
.popsection
|
|
.endm
|
|
|
|
/*
|
|
* This should be used immediately before an indirect jump/call. It tells
|
|
* objtool the subsequent indirect jump/call is vouched safe for retpoline
|
|
* builds.
|
|
*/
|
|
.macro ANNOTATE_RETPOLINE_SAFE
|
|
.Lannotate_\@:
|
|
.pushsection .discard.retpoline_safe
|
|
_ASM_PTR .Lannotate_\@
|
|
.popsection
|
|
.endm
|
|
|
|
/*
|
|
* These are the bare retpoline primitives for indirect jmp and call.
|
|
* Do not use these directly; they only exist to make the ALTERNATIVE
|
|
* invocation below less ugly.
|
|
*/
|
|
.macro RETPOLINE_JMP reg:req
|
|
call .Ldo_rop_\@
|
|
.Lspec_trap_\@:
|
|
pause
|
|
lfence
|
|
jmp .Lspec_trap_\@
|
|
.Ldo_rop_\@:
|
|
mov \reg, (%_ASM_SP)
|
|
ret
|
|
.endm
|
|
|
|
/*
|
|
* This is a wrapper around RETPOLINE_JMP so the called function in reg
|
|
* returns to the instruction after the macro.
|
|
*/
|
|
.macro RETPOLINE_CALL reg:req
|
|
jmp .Ldo_call_\@
|
|
.Ldo_retpoline_jmp_\@:
|
|
RETPOLINE_JMP \reg
|
|
.Ldo_call_\@:
|
|
call .Ldo_retpoline_jmp_\@
|
|
.endm
|
|
|
|
/*
|
|
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
|
|
* indirect jmp/call which may be susceptible to the Spectre variant 2
|
|
* attack.
|
|
*/
|
|
.macro JMP_NOSPEC reg:req
|
|
#ifdef CONFIG_RETPOLINE
|
|
ANNOTATE_NOSPEC_ALTERNATIVE
|
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
|
|
__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
|
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
|
|
#else
|
|
jmp *\reg
|
|
#endif
|
|
.endm
|
|
|
|
.macro CALL_NOSPEC reg:req
|
|
#ifdef CONFIG_RETPOLINE
|
|
ANNOTATE_NOSPEC_ALTERNATIVE
|
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
|
|
__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
|
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
|
|
#else
|
|
call *\reg
|
|
#endif
|
|
.endm
|
|
|
|
/*
|
|
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
|
|
* monstrosity above, manually.
|
|
*/
|
|
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
|
|
#ifdef CONFIG_RETPOLINE
|
|
ANNOTATE_NOSPEC_ALTERNATIVE
|
|
ALTERNATIVE "jmp .Lskip_rsb_\@", \
|
|
__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
|
|
\ftr
|
|
.Lskip_rsb_\@:
|
|
#endif
|
|
.endm
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
#define ANNOTATE_NOSPEC_ALTERNATIVE \
|
|
"999:\n\t" \
|
|
".pushsection .discard.nospec\n\t" \
|
|
".long 999b - .\n\t" \
|
|
".popsection\n\t"
|
|
|
|
#define ANNOTATE_RETPOLINE_SAFE \
|
|
"999:\n\t" \
|
|
".pushsection .discard.retpoline_safe\n\t" \
|
|
_ASM_PTR " 999b\n\t" \
|
|
".popsection\n\t"
|
|
|
|
#if defined(CONFIG_X86_64) && defined(RETPOLINE)
|
|
|
|
/*
|
|
* Since the inline asm uses the %V modifier which is only in newer GCC,
|
|
* the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
|
|
*/
|
|
# define CALL_NOSPEC \
|
|
ANNOTATE_NOSPEC_ALTERNATIVE \
|
|
ALTERNATIVE( \
|
|
ANNOTATE_RETPOLINE_SAFE \
|
|
"call *%[thunk_target]\n", \
|
|
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
|
X86_FEATURE_RETPOLINE)
|
|
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
|
|
|
|
#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
|
|
/*
|
|
* For i386 we use the original ret-equivalent retpoline, because
|
|
* otherwise we'll run out of registers. We don't care about CET
|
|
* here, anyway.
|
|
*/
|
|
# define CALL_NOSPEC \
|
|
ALTERNATIVE( \
|
|
ANNOTATE_RETPOLINE_SAFE \
|
|
"call *%[thunk_target]\n", \
|
|
" jmp 904f;\n" \
|
|
" .align 16\n" \
|
|
"901: call 903f;\n" \
|
|
"902: pause;\n" \
|
|
" lfence;\n" \
|
|
" jmp 902b;\n" \
|
|
" .align 16\n" \
|
|
"903: addl $4, %%esp;\n" \
|
|
" pushl %[thunk_target];\n" \
|
|
" ret;\n" \
|
|
" .align 16\n" \
|
|
"904: call 901b;\n", \
|
|
X86_FEATURE_RETPOLINE)
|
|
|
|
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
|
#else /* No retpoline for C / inline asm */
|
|
# define CALL_NOSPEC "call *%[thunk_target]\n"
|
|
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
|
#endif
|
|
|
|
/* The Spectre V2 mitigation variants */
|
|
enum spectre_v2_mitigation {
|
|
SPECTRE_V2_NONE,
|
|
SPECTRE_V2_RETPOLINE_MINIMAL,
|
|
SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
|
|
SPECTRE_V2_RETPOLINE_GENERIC,
|
|
SPECTRE_V2_RETPOLINE_AMD,
|
|
SPECTRE_V2_IBRS,
|
|
};
|
|
|
|
extern char __indirect_thunk_start[];
|
|
extern char __indirect_thunk_end[];
|
|
|
|
/*
|
|
* On VMEXIT we must ensure that no RSB predictions learned in the guest
|
|
* can be followed in the host, by overwriting the RSB completely. Both
|
|
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
|
|
* CPUs with IBRS_ALL *might* it be avoided.
|
|
*/
|
|
static inline void vmexit_fill_RSB(void)
|
|
{
|
|
#ifdef CONFIG_RETPOLINE
|
|
unsigned long loops;
|
|
|
|
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
|
|
ALTERNATIVE("jmp 910f",
|
|
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
|
|
X86_FEATURE_RETPOLINE)
|
|
"910:"
|
|
: "=r" (loops), ASM_CALL_CONSTRAINT
|
|
: : "memory" );
|
|
#endif
|
|
}
|
|
|
|
#define alternative_msr_write(_msr, _val, _feature) \
|
|
asm volatile(ALTERNATIVE("", \
|
|
"movl %[msr], %%ecx\n\t" \
|
|
"movl %[val], %%eax\n\t" \
|
|
"movl $0, %%edx\n\t" \
|
|
"wrmsr", \
|
|
_feature) \
|
|
: : [msr] "i" (_msr), [val] "i" (_val) \
|
|
: "eax", "ecx", "edx", "memory")
|
|
|
|
static inline void indirect_branch_prediction_barrier(void)
|
|
{
|
|
alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
|
|
X86_FEATURE_USE_IBPB);
|
|
}
|
|
|
|
/*
|
|
* With retpoline, we must use IBRS to restrict branch prediction
|
|
* before calling into firmware.
|
|
*
|
|
* (Implemented as CPP macros due to header hell.)
|
|
*/
|
|
#define firmware_restrict_branch_speculation_start() \
|
|
do { \
|
|
preempt_disable(); \
|
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
|
|
X86_FEATURE_USE_IBRS_FW); \
|
|
} while (0)
|
|
|
|
#define firmware_restrict_branch_speculation_end() \
|
|
do { \
|
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
|
|
X86_FEATURE_USE_IBRS_FW); \
|
|
preempt_enable(); \
|
|
} while (0)
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
/*
|
|
* Below is used in the eBPF JIT compiler and emits the byte sequence
|
|
* for the following assembly:
|
|
*
|
|
* With retpolines configured:
|
|
*
|
|
* callq do_rop
|
|
* spec_trap:
|
|
* pause
|
|
* lfence
|
|
* jmp spec_trap
|
|
* do_rop:
|
|
* mov %rax,(%rsp) for x86_64
|
|
* mov %edx,(%esp) for x86_32
|
|
* retq
|
|
*
|
|
* Without retpolines configured:
|
|
*
|
|
* jmp *%rax for x86_64
|
|
* jmp *%edx for x86_32
|
|
*/
|
|
#ifdef CONFIG_RETPOLINE
|
|
#ifdef CONFIG_X86_64
|
|
# define RETPOLINE_RAX_BPF_JIT_SIZE 17
|
|
# define RETPOLINE_RAX_BPF_JIT() \
|
|
do { \
|
|
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
|
|
/* spec_trap: */ \
|
|
EMIT2(0xF3, 0x90); /* pause */ \
|
|
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
|
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
|
/* do_rop: */ \
|
|
EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
|
|
EMIT1(0xC3); /* retq */ \
|
|
} while (0)
|
|
#else
|
|
# define RETPOLINE_EDX_BPF_JIT() \
|
|
do { \
|
|
EMIT1_off32(0xE8, 7); /* call do_rop */ \
|
|
/* spec_trap: */ \
|
|
EMIT2(0xF3, 0x90); /* pause */ \
|
|
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
|
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
|
/* do_rop: */ \
|
|
EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \
|
|
EMIT1(0xC3); /* ret */ \
|
|
} while (0)
|
|
#endif
|
|
#else /* !CONFIG_RETPOLINE */
|
|
|
|
#ifdef CONFIG_X86_64
|
|
# define RETPOLINE_RAX_BPF_JIT_SIZE 2
|
|
# define RETPOLINE_RAX_BPF_JIT() \
|
|
EMIT2(0xFF, 0xE0); /* jmp *%rax */
|
|
#else
|
|
# define RETPOLINE_EDX_BPF_JIT() \
|
|
EMIT2(0xFF, 0xE2) /* jmp *%edx */
|
|
#endif
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
|