mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
Add custom implementation of getsockopt hook for TCP_ZEROCOPY_RECEIVE. We skip generic hooks for TCP_ZEROCOPY_RECEIVE and have a custom call in do_tcp_getsockopt using the on-stack data. This removes 3% overhead for locking/unlocking the socket. Without this patch: 3.38% 0.07% tcp_mmap [kernel.kallsyms] [k] __cgroup_bpf_run_filter_getsockopt | --3.30%--__cgroup_bpf_run_filter_getsockopt | --0.81%--__kmalloc With the patch applied: 0.52% 0.12% tcp_mmap [kernel.kallsyms] [k] __cgroup_bpf_run_filter_getsockopt_kern Note, exporting uapi/tcp.h requires removing netinet/tcp.h from test_progs.h because those headers have confliciting definitions. Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Link: https://lore.kernel.org/bpf/20210115163501.805133-2-sdf@google.com
69 lines
2.1 KiB
C
69 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
|
|
#define _LINUX_INDIRECT_CALL_WRAPPER_H
|
|
|
|
#ifdef CONFIG_RETPOLINE
|
|
|
|
/*
|
|
* INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
|
|
* @f: function pointer
|
|
* @f$NR: builtin functions names, up to $NR of them
|
|
* @__VA_ARGS__: arguments for @f
|
|
*
|
|
* Avoid retpoline overhead for known builtin, checking @f vs each of them and
|
|
* eventually invoking directly the builtin function. The functions are check
|
|
* in the given order. Fallback to the indirect call.
|
|
*/
|
|
#define INDIRECT_CALL_1(f, f1, ...) \
|
|
({ \
|
|
likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
|
|
})
|
|
#define INDIRECT_CALL_2(f, f2, f1, ...) \
|
|
({ \
|
|
likely(f == f2) ? f2(__VA_ARGS__) : \
|
|
INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
|
|
})
|
|
#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
|
|
({ \
|
|
likely(f == f3) ? f3(__VA_ARGS__) : \
|
|
INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
|
|
})
|
|
#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
|
|
({ \
|
|
likely(f == f4) ? f4(__VA_ARGS__) : \
|
|
INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
|
|
})
|
|
|
|
#define INDIRECT_CALLABLE_DECLARE(f) f
|
|
#define INDIRECT_CALLABLE_SCOPE
|
|
|
|
#else
|
|
#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
|
|
#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
|
|
#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__)
|
|
#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
|
|
#define INDIRECT_CALLABLE_DECLARE(f)
|
|
#define INDIRECT_CALLABLE_SCOPE static
|
|
#endif
|
|
|
|
/*
|
|
* We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is
|
|
* builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6
|
|
* alternatives
|
|
*/
|
|
#if IS_BUILTIN(CONFIG_IPV6)
|
|
#define INDIRECT_CALL_INET(f, f2, f1, ...) \
|
|
INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
|
|
#elif IS_ENABLED(CONFIG_INET)
|
|
#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
|
|
#else
|
|
#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
|
|
#endif
|
|
|
|
#if IS_ENABLED(CONFIG_INET)
|
|
#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
|
|
#else
|
|
#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
|
|
#endif
|
|
|
|
#endif
|