mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-07-13 The following pull-request contains BPF updates for your *net-next* tree. We've added 36 non-merge commits during the last 7 day(s) which contain a total of 62 files changed, 2242 insertions(+), 468 deletions(-). The main changes are: 1) Avoid trace_printk warning banner by switching bpf_trace_printk to use its own tracing event, from Alan. 2) Better libbpf support on older kernels, from Andrii. 3) Additional AF_XDP stats, from Ciara. 4) build time resolution of BTF IDs, from Jiri. 5) BPF_CGROUP_INET_SOCK_RELEASE hook, from Stanislav. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
07dd1b7e68
62 changed files with 2235 additions and 461 deletions
|
@ -2338,18 +2338,23 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void bpf_object__sanitize_btf(struct bpf_object *obj)
|
||||
static bool btf_needs_sanitization(struct bpf_object *obj)
|
||||
{
|
||||
bool has_func_global = obj->caps.btf_func_global;
|
||||
bool has_datasec = obj->caps.btf_datasec;
|
||||
bool has_func = obj->caps.btf_func;
|
||||
|
||||
return !has_func || !has_datasec || !has_func_global;
|
||||
}
|
||||
|
||||
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
{
|
||||
bool has_func_global = obj->caps.btf_func_global;
|
||||
bool has_datasec = obj->caps.btf_datasec;
|
||||
bool has_func = obj->caps.btf_func;
|
||||
struct btf *btf = obj->btf;
|
||||
struct btf_type *t;
|
||||
int i, j, vlen;
|
||||
|
||||
if (!obj->btf || (has_func && has_datasec && has_func_global))
|
||||
return;
|
||||
|
||||
for (i = 1; i <= btf__get_nr_types(btf); i++) {
|
||||
t = (struct btf_type *)btf__type_by_id(btf, i);
|
||||
|
||||
|
@ -2402,17 +2407,6 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
|
|||
}
|
||||
}
|
||||
|
||||
static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
|
||||
{
|
||||
if (!obj->btf_ext)
|
||||
return;
|
||||
|
||||
if (!obj->caps.btf_func) {
|
||||
btf_ext__free(obj->btf_ext);
|
||||
obj->btf_ext = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool libbpf_needs_btf(const struct bpf_object *obj)
|
||||
{
|
||||
return obj->efile.btf_maps_shndx >= 0 ||
|
||||
|
@ -2473,19 +2467,11 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
|
|||
return 0;
|
||||
|
||||
err = btf__finalize_data(obj, obj->btf);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
|
||||
btf__free(obj->btf);
|
||||
obj->btf = NULL;
|
||||
btf_ext__free(obj->btf_ext);
|
||||
obj->btf_ext = NULL;
|
||||
|
||||
if (libbpf_needs_btf(obj)) {
|
||||
pr_warn("BTF is required, but is missing or corrupted.\n");
|
||||
return -ENOENT;
|
||||
if (err) {
|
||||
pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2538,30 +2524,45 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
|
|||
|
||||
static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
|
||||
{
|
||||
struct btf *kern_btf = obj->btf;
|
||||
bool btf_mandatory, sanitize;
|
||||
int err = 0;
|
||||
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
bpf_object__sanitize_btf(obj);
|
||||
bpf_object__sanitize_btf_ext(obj);
|
||||
sanitize = btf_needs_sanitization(obj);
|
||||
if (sanitize) {
|
||||
const void *raw_data;
|
||||
__u32 sz;
|
||||
|
||||
err = btf__load(obj->btf);
|
||||
if (err) {
|
||||
pr_warn("Error loading %s into kernel: %d.\n",
|
||||
BTF_ELF_SEC, err);
|
||||
btf__free(obj->btf);
|
||||
obj->btf = NULL;
|
||||
/* btf_ext can't exist without btf, so free it as well */
|
||||
if (obj->btf_ext) {
|
||||
btf_ext__free(obj->btf_ext);
|
||||
obj->btf_ext = NULL;
|
||||
}
|
||||
/* clone BTF to sanitize a copy and leave the original intact */
|
||||
raw_data = btf__get_raw_data(obj->btf, &sz);
|
||||
kern_btf = btf__new(raw_data, sz);
|
||||
if (IS_ERR(kern_btf))
|
||||
return PTR_ERR(kern_btf);
|
||||
|
||||
if (kernel_needs_btf(obj))
|
||||
return err;
|
||||
bpf_object__sanitize_btf(obj, kern_btf);
|
||||
}
|
||||
return 0;
|
||||
|
||||
err = btf__load(kern_btf);
|
||||
if (sanitize) {
|
||||
if (!err) {
|
||||
/* move fd to libbpf's BTF */
|
||||
btf__set_fd(obj->btf, btf__fd(kern_btf));
|
||||
btf__set_fd(kern_btf, -1);
|
||||
}
|
||||
btf__free(kern_btf);
|
||||
}
|
||||
if (err) {
|
||||
btf_mandatory = kernel_needs_btf(obj);
|
||||
pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
|
||||
btf_mandatory ? "BTF is mandatory, can't proceed."
|
||||
: "BTF is optional, ignoring.");
|
||||
if (!btf_mandatory)
|
||||
err = 0;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpf_object__elf_collect(struct bpf_object *obj)
|
||||
|
@ -3785,7 +3786,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
|
|||
create_attr.btf_fd = 0;
|
||||
create_attr.btf_key_type_id = 0;
|
||||
create_attr.btf_value_type_id = 0;
|
||||
if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
|
||||
if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
|
||||
create_attr.btf_fd = btf__fd(obj->btf);
|
||||
create_attr.btf_key_type_id = map->btf_key_type_id;
|
||||
create_attr.btf_value_type_id = map->btf_value_type_id;
|
||||
|
@ -5375,18 +5376,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
|
|||
load_attr.kern_version = kern_version;
|
||||
load_attr.prog_ifindex = prog->prog_ifindex;
|
||||
}
|
||||
/* if .BTF.ext was loaded, kernel supports associated BTF for prog */
|
||||
if (prog->obj->btf_ext)
|
||||
btf_fd = bpf_object__btf_fd(prog->obj);
|
||||
else
|
||||
btf_fd = -1;
|
||||
load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
|
||||
load_attr.func_info = prog->func_info;
|
||||
load_attr.func_info_rec_size = prog->func_info_rec_size;
|
||||
load_attr.func_info_cnt = prog->func_info_cnt;
|
||||
load_attr.line_info = prog->line_info;
|
||||
load_attr.line_info_rec_size = prog->line_info_rec_size;
|
||||
load_attr.line_info_cnt = prog->line_info_cnt;
|
||||
/* specify func_info/line_info only if kernel supports them */
|
||||
btf_fd = bpf_object__btf_fd(prog->obj);
|
||||
if (btf_fd >= 0 && prog->obj->caps.btf_func) {
|
||||
load_attr.prog_btf_fd = btf_fd;
|
||||
load_attr.func_info = prog->func_info;
|
||||
load_attr.func_info_rec_size = prog->func_info_rec_size;
|
||||
load_attr.func_info_cnt = prog->func_info_cnt;
|
||||
load_attr.line_info = prog->line_info;
|
||||
load_attr.line_info_rec_size = prog->line_info_rec_size;
|
||||
load_attr.line_info_cnt = prog->line_info_cnt;
|
||||
}
|
||||
load_attr.log_level = prog->log_level;
|
||||
load_attr.prog_flags = prog->prog_flags;
|
||||
|
||||
|
@ -6923,6 +6923,10 @@ static const struct bpf_sec_def section_defs[] = {
|
|||
BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
|
||||
BPF_CGROUP_INET_EGRESS),
|
||||
BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
|
||||
BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
BPF_CGROUP_INET_SOCK_CREATE),
|
||||
BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
BPF_CGROUP_INET_SOCK_RELEASE),
|
||||
BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
BPF_CGROUP_INET_SOCK_CREATE),
|
||||
BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
|
@ -8588,7 +8592,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
|
|||
struct perf_buffer_params *p)
|
||||
{
|
||||
const char *online_cpus_file = "/sys/devices/system/cpu/online";
|
||||
struct bpf_map_info map = {};
|
||||
struct bpf_map_info map;
|
||||
char msg[STRERR_BUFSIZE];
|
||||
struct perf_buffer *pb;
|
||||
bool *online = NULL;
|
||||
|
@ -8601,19 +8605,28 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* best-effort sanity checks */
|
||||
memset(&map, 0, sizeof(map));
|
||||
map_info_len = sizeof(map);
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
pr_warn("failed to get map info for map FD %d: %s\n",
|
||||
map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
|
||||
pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
|
||||
map.name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
/* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
|
||||
* -EBADFD, -EFAULT, or -E2BIG on real error
|
||||
*/
|
||||
if (err != -EINVAL) {
|
||||
pr_warn("failed to get map info for map FD %d: %s\n",
|
||||
map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
|
||||
map_fd);
|
||||
} else {
|
||||
if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
|
||||
pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
|
||||
map.name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
pb = calloc(1, sizeof(*pb));
|
||||
|
@ -8645,7 +8658,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
|
|||
err = pb->cpu_cnt;
|
||||
goto error;
|
||||
}
|
||||
if (map.max_entries < pb->cpu_cnt)
|
||||
if (map.max_entries && map.max_entries < pb->cpu_cnt)
|
||||
pb->cpu_cnt = map.max_entries;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue