From f3a95075549e0e5c36db922caf86847db7a35403 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Apr 2021 21:51:41 +0200 Subject: [PATCH 01/19] bpf: Allow trampoline re-attach for tracing and lsm programs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we don't allow re-attaching of trampolines. Once it's detached, it can't be re-attach even when the program is still loaded. Adding the possibility to re-attach the loaded tracing and lsm programs. Fixing missing unlock with proper cleanup goto jump reported by Julia. Reported-by: kernel test robot Reported-by: Julia Lawall Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Toke Høiland-Jørgensen Acked-by: Andrii Nakryiko Acked-by: KP Singh Link: https://lore.kernel.org/bpf/20210414195147.1624932-2-jolsa@kernel.org --- kernel/bpf/syscall.c | 23 +++++++++++++++++------ kernel/bpf/trampoline.c | 4 ++-- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index fd495190115e..941ca06d9dfa 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2648,14 +2648,25 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, * target_btf_id using the link_create API. * * - if tgt_prog == NULL when this function was called using the old - * raw_tracepoint_open API, and we need a target from prog->aux - * - * The combination of no saved target in prog->aux, and no target - * specified on load is illegal, and we reject that here. + * raw_tracepoint_open API, and we need a target from prog->aux + * + * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program + * was detached and is going for re-attachment. */ if (!prog->aux->dst_trampoline && !tgt_prog) { - err = -ENOENT; - goto out_unlock; + /* + * Allow re-attach for TRACING and LSM programs. If it's + * currently linked, bpf_trampoline_link_prog will fail. + * EXT programs need to specify tgt_prog_fd, so they + * re-attach in separate code path. + */ + if (prog->type != BPF_PROG_TYPE_TRACING && + prog->type != BPF_PROG_TYPE_LSM) { + err = -EINVAL; + goto out_unlock; + } + btf_id = prog->aux->attach_btf_id; + key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); } if (!prog->aux->dst_trampoline || diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 4aa8b52adf25..2d44b5aa0057 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -444,7 +444,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) tr->progs_cnt[kind]++; err = bpf_trampoline_update(tr); if (err) { - hlist_del(&prog->aux->tramp_hlist); + hlist_del_init(&prog->aux->tramp_hlist); tr->progs_cnt[kind]--; } out: @@ -467,7 +467,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) tr->extension_prog = NULL; goto out; } - hlist_del(&prog->aux->tramp_hlist); + hlist_del_init(&prog->aux->tramp_hlist); tr->progs_cnt[kind]--; err = bpf_trampoline_update(tr); out: From 56dda5a48f4f610ac9a0487c6fb64d31950e4a3e Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Apr 2021 21:51:43 +0200 Subject: [PATCH 02/19] selftests/bpf: Add re-attach test to fentry_test Adding the test to re-attach (detach/attach again) tracing fentry programs, plus check that already linked program can't be attached again. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20210414195147.1624932-4-jolsa@kernel.org --- .../selftests/bpf/prog_tests/fentry_test.c | 52 +++++++++++++------ 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c index 04ebbf1cb390..7cb111b11995 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -3,35 +3,57 @@ #include #include "fentry_test.skel.h" -void test_fentry_test(void) +static int fentry_test(struct fentry_test *fentry_skel) { - struct fentry_test *fentry_skel = NULL; int err, prog_fd, i; __u32 duration = 0, retval; + struct bpf_link *link; __u64 *result; - fentry_skel = fentry_test__open_and_load(); - if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n")) - goto cleanup; - err = fentry_test__attach(fentry_skel); - if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err)) - goto cleanup; + if (!ASSERT_OK(err, "fentry_attach")) + return err; + + /* Check that already linked program can't be attached again. */ + link = bpf_program__attach(fentry_skel->progs.test1); + if (!ASSERT_ERR_PTR(link, "fentry_attach_link")) + return -1; prog_fd = bpf_program__fd(fentry_skel->progs.test1); err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, NULL, &retval, &duration); - CHECK(err || retval, "test_run", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(retval, 0, "test_run"); result = (__u64 *)fentry_skel->bss; - for (i = 0; i < 6; i++) { - if (CHECK(result[i] != 1, "result", - "fentry_test%d failed err %lld\n", i + 1, result[i])) - goto cleanup; + for (i = 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) { + if (!ASSERT_EQ(result[i], 1, "fentry_result")) + return -1; } + fentry_test__detach(fentry_skel); + + /* zero results for re-attach test */ + memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss)); + return 0; +} + +void test_fentry_test(void) +{ + struct fentry_test *fentry_skel = NULL; + int err; + + fentry_skel = fentry_test__open_and_load(); + if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load")) + goto cleanup; + + err = fentry_test(fentry_skel); + if (!ASSERT_OK(err, "fentry_first_attach")) + goto cleanup; + + err = fentry_test(fentry_skel); + ASSERT_OK(err, "fentry_second_attach"); + cleanup: fentry_test__destroy(fentry_skel); } From 8caadc43f2019caebbf314f7a6ae2faed791e783 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Apr 2021 21:51:44 +0200 Subject: [PATCH 03/19] selftests/bpf: Add re-attach test to fexit_test Adding the test to re-attach (detach/attach again) tracing fexit programs, plus check that already linked program can't be attached again. Also switching to ASSERT* macros. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20210414195147.1624932-5-jolsa@kernel.org --- .../selftests/bpf/prog_tests/fexit_test.c | 52 +++++++++++++------ 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c index 78d7a2765c27..6792e41f7f69 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -3,35 +3,57 @@ #include #include "fexit_test.skel.h" -void test_fexit_test(void) +static int fexit_test(struct fexit_test *fexit_skel) { - struct fexit_test *fexit_skel = NULL; int err, prog_fd, i; __u32 duration = 0, retval; + struct bpf_link *link; __u64 *result; - fexit_skel = fexit_test__open_and_load(); - if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) - goto cleanup; - err = fexit_test__attach(fexit_skel); - if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) - goto cleanup; + if (!ASSERT_OK(err, "fexit_attach")) + return err; + + /* Check that already linked program can't be attached again. */ + link = bpf_program__attach(fexit_skel->progs.test1); + if (!ASSERT_ERR_PTR(link, "fexit_attach_link")) + return -1; prog_fd = bpf_program__fd(fexit_skel->progs.test1); err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, NULL, &retval, &duration); - CHECK(err || retval, "test_run", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(retval, 0, "test_run"); result = (__u64 *)fexit_skel->bss; - for (i = 0; i < 6; i++) { - if (CHECK(result[i] != 1, "result", - "fexit_test%d failed err %lld\n", i + 1, result[i])) - goto cleanup; + for (i = 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) { + if (!ASSERT_EQ(result[i], 1, "fexit_result")) + return -1; } + fexit_test__detach(fexit_skel); + + /* zero results for re-attach test */ + memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss)); + return 0; +} + +void test_fexit_test(void) +{ + struct fexit_test *fexit_skel = NULL; + int err; + + fexit_skel = fexit_test__open_and_load(); + if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load")) + goto cleanup; + + err = fexit_test(fexit_skel); + if (!ASSERT_OK(err, "fexit_first_attach")) + goto cleanup; + + err = fexit_test(fexit_skel); + ASSERT_OK(err, "fexit_second_attach"); + cleanup: fexit_test__destroy(fexit_skel); } From cede72ad367a105852e814ef91717aac4383b853 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Apr 2021 21:51:45 +0200 Subject: [PATCH 04/19] selftests/bpf: Add re-attach test to lsm test Adding the test to re-attach (detach/attach again) lsm programs, plus check that already linked program can't be attached again. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20210414195147.1624932-6-jolsa@kernel.org --- .../selftests/bpf/prog_tests/test_lsm.c | 48 +++++++++++++++---- 1 file changed, 38 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c index 2755e4f81499..d492e76e01cf 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c +++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c @@ -18,6 +18,8 @@ char *CMD_ARGS[] = {"true", NULL}; #define GET_PAGE_ADDR(ADDR, PAGE_SIZE) \ (char *)(((unsigned long) (ADDR + PAGE_SIZE)) & ~(PAGE_SIZE-1)) +static int duration = 0; + int stack_mprotect(void) { void *buf; @@ -51,23 +53,25 @@ int exec_cmd(int *monitored_pid) return -EINVAL; } -void test_test_lsm(void) +static int test_lsm(struct lsm *skel) { - struct lsm *skel = NULL; - int err, duration = 0; + struct bpf_link *link; int buf = 1234; - - skel = lsm__open_and_load(); - if (CHECK(!skel, "skel_load", "lsm skeleton failed\n")) - goto close_prog; + int err; err = lsm__attach(skel); if (CHECK(err, "attach", "lsm attach failed: %d\n", err)) - goto close_prog; + return err; + + /* Check that already linked program can't be attached again. */ + link = bpf_program__attach(skel->progs.test_int_hook); + if (CHECK(!IS_ERR(link), "attach_link", + "re-attach without detach should not succeed")) + return -1; err = exec_cmd(&skel->bss->monitored_pid); if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno)) - goto close_prog; + return err; CHECK(skel->bss->bprm_count != 1, "bprm_count", "bprm_count = %d\n", skel->bss->bprm_count); @@ -77,7 +81,7 @@ void test_test_lsm(void) err = stack_mprotect(); if (CHECK(errno != EPERM, "stack_mprotect", "want err=EPERM, got %d\n", errno)) - goto close_prog; + return err; CHECK(skel->bss->mprotect_count != 1, "mprotect_count", "mprotect_count = %d\n", skel->bss->mprotect_count); @@ -89,6 +93,30 @@ void test_test_lsm(void) CHECK(skel->bss->copy_test != 3, "copy_test", "copy_test = %d\n", skel->bss->copy_test); + lsm__detach(skel); + + skel->bss->copy_test = 0; + skel->bss->bprm_count = 0; + skel->bss->mprotect_count = 0; + return 0; +} + +void test_test_lsm(void) +{ + struct lsm *skel = NULL; + int err; + + skel = lsm__open_and_load(); + if (CHECK(!skel, "lsm_skel_load", "lsm skeleton failed\n")) + goto close_prog; + + err = test_lsm(skel); + if (CHECK(err, "test_lsm", "first attach failed\n")) + goto close_prog; + + err = test_lsm(skel); + CHECK(err, "test_lsm", "second attach failed\n"); + close_prog: lsm__destroy(skel); } From a1c05c3b09e0a92b26b94650837bf06c664beb1b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Apr 2021 21:51:46 +0200 Subject: [PATCH 05/19] selftests/bpf: Test that module can't be unloaded with attached trampoline Adding test to verify that once we attach module's trampoline, the module can't be unloaded. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20210414195147.1624932-7-jolsa@kernel.org --- .../selftests/bpf/prog_tests/module_attach.c | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c index 5bc53d53d86e..d85a69b7ce44 100644 --- a/tools/testing/selftests/bpf/prog_tests/module_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c @@ -45,12 +45,18 @@ static int trigger_module_test_write(int write_sz) return 0; } +static int delete_module(const char *name, int flags) +{ + return syscall(__NR_delete_module, name, flags); +} + void test_module_attach(void) { const int READ_SZ = 456; const int WRITE_SZ = 457; struct test_module_attach* skel; struct test_module_attach__bss *bss; + struct bpf_link *link; int err; skel = test_module_attach__open(); @@ -84,6 +90,23 @@ void test_module_attach(void) ASSERT_EQ(bss->fexit_ret, -EIO, "fexit_tet"); ASSERT_EQ(bss->fmod_ret_read_sz, READ_SZ, "fmod_ret"); + test_module_attach__detach(skel); + + /* attach fentry/fexit and make sure it get's module reference */ + link = bpf_program__attach(skel->progs.handle_fentry); + if (!ASSERT_OK_PTR(link, "attach_fentry")) + goto cleanup; + + ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + bpf_link__destroy(link); + + link = bpf_program__attach(skel->progs.handle_fexit); + if (!ASSERT_OK_PTR(link, "attach_fexit")) + goto cleanup; + + ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + bpf_link__destroy(link); + cleanup: test_module_attach__destroy(skel); } From 7bb2cc19aee8f7150851bb8668c9ff655a5e7678 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Apr 2021 21:51:47 +0200 Subject: [PATCH 06/19] selftests/bpf: Use ASSERT macros in lsm test Replacing CHECK with ASSERT macros. Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20210414195147.1624932-8-jolsa@kernel.org --- .../selftests/bpf/prog_tests/test_lsm.c | 27 +++++++------------ 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c index d492e76e01cf..244c01125126 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c +++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c @@ -18,8 +18,6 @@ char *CMD_ARGS[] = {"true", NULL}; #define GET_PAGE_ADDR(ADDR, PAGE_SIZE) \ (char *)(((unsigned long) (ADDR + PAGE_SIZE)) & ~(PAGE_SIZE-1)) -static int duration = 0; - int stack_mprotect(void) { void *buf; @@ -60,38 +58,33 @@ static int test_lsm(struct lsm *skel) int err; err = lsm__attach(skel); - if (CHECK(err, "attach", "lsm attach failed: %d\n", err)) + if (!ASSERT_OK(err, "attach")) return err; /* Check that already linked program can't be attached again. */ link = bpf_program__attach(skel->progs.test_int_hook); - if (CHECK(!IS_ERR(link), "attach_link", - "re-attach without detach should not succeed")) + if (!ASSERT_ERR_PTR(link, "attach_link")) return -1; err = exec_cmd(&skel->bss->monitored_pid); - if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno)) + if (!ASSERT_OK(err, "exec_cmd")) return err; - CHECK(skel->bss->bprm_count != 1, "bprm_count", "bprm_count = %d\n", - skel->bss->bprm_count); + ASSERT_EQ(skel->bss->bprm_count, 1, "bprm_count"); skel->bss->monitored_pid = getpid(); err = stack_mprotect(); - if (CHECK(errno != EPERM, "stack_mprotect", "want err=EPERM, got %d\n", - errno)) + if (!ASSERT_EQ(errno, EPERM, "stack_mprotect")) return err; - CHECK(skel->bss->mprotect_count != 1, "mprotect_count", - "mprotect_count = %d\n", skel->bss->mprotect_count); + ASSERT_EQ(skel->bss->mprotect_count, 1, "mprotect_count"); syscall(__NR_setdomainname, &buf, -2L); syscall(__NR_setdomainname, 0, -3L); syscall(__NR_setdomainname, ~0L, -4L); - CHECK(skel->bss->copy_test != 3, "copy_test", - "copy_test = %d\n", skel->bss->copy_test); + ASSERT_EQ(skel->bss->copy_test, 3, "copy_test"); lsm__detach(skel); @@ -107,15 +100,15 @@ void test_test_lsm(void) int err; skel = lsm__open_and_load(); - if (CHECK(!skel, "lsm_skel_load", "lsm skeleton failed\n")) + if (!ASSERT_OK_PTR(skel, "lsm_skel_load")) goto close_prog; err = test_lsm(skel); - if (CHECK(err, "test_lsm", "first attach failed\n")) + if (!ASSERT_OK(err, "test_lsm_first_attach")) goto close_prog; err = test_lsm(skel); - CHECK(err, "test_lsm", "second attach failed\n"); + ASSERT_OK(err, "test_lsm_second_attach"); close_prog: lsm__destroy(skel); From 7a2fa70aaffc2f8823feca22709a00f5c069a8a9 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 26 Apr 2021 12:29:45 -0700 Subject: [PATCH 07/19] selftests/bpf: Add remaining ASSERT_xxx() variants Add ASSERT_TRUE/ASSERT_FALSE for conditions calculated with custom logic to true/false. Also add remaining arithmetical assertions: - ASSERT_LE -- less than or equal; - ASSERT_GT -- greater than; - ASSERT_GE -- greater than or equal. This should cover most scenarios where people fall back to error-prone CHECK()s. Also extend ASSERT_ERR() to print out errno, in addition to direct error. Also convert few CHECK() instances to ensure new ASSERT_xxx() variants work as expected. Subsequent patch will also use ASSERT_TRUE/ASSERT_FALSE more extensively. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20210426192949.416837-2-andrii@kernel.org --- .../selftests/bpf/prog_tests/btf_dump.c | 2 +- .../selftests/bpf/prog_tests/btf_endian.c | 4 +- .../selftests/bpf/prog_tests/cgroup_link.c | 2 +- .../selftests/bpf/prog_tests/kfree_skb.c | 2 +- .../selftests/bpf/prog_tests/resolve_btfids.c | 7 +-- .../selftests/bpf/prog_tests/snprintf_btf.c | 4 +- tools/testing/selftests/bpf/test_progs.h | 50 ++++++++++++++++++- 7 files changed, 56 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index c60091ee8a21..5e129dc2073c 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -77,7 +77,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t) snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file); fd = mkstemp(out_file); - if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) { + if (!ASSERT_GE(fd, 0, "create_tmp")) { err = fd; goto done; } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_endian.c b/tools/testing/selftests/bpf/prog_tests/btf_endian.c index 8c52d72c876e..8ab5d3e358dd 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_endian.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_endian.c @@ -6,8 +6,6 @@ #include #include -static int duration = 0; - void test_btf_endian() { #if __BYTE_ORDER == __LITTLE_ENDIAN enum btf_endianness endian = BTF_LITTLE_ENDIAN; @@ -71,7 +69,7 @@ void test_btf_endian() { /* now modify original BTF */ var_id = btf__add_var(btf, "some_var", BTF_VAR_GLOBAL_ALLOCATED, 1); - CHECK(var_id <= 0, "var_id", "failed %d\n", var_id); + ASSERT_GT(var_id, 0, "var_id"); btf__free(swap_btf); swap_btf = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c index 4d9b514b3fd9..736796e56ed1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c @@ -54,7 +54,7 @@ void test_cgroup_link(void) for (i = 0; i < cg_nr; i++) { cgs[i].fd = create_and_get_cgroup(cgs[i].path); - if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd)) + if (!ASSERT_GE(cgs[i].fd, 0, "cg_create")) goto cleanup; } diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index 42c3a3103c26..d65107919998 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -134,7 +134,7 @@ void test_kfree_skb(void) /* make sure kfree_skb program was triggered * and it sent expected skb into ring buffer */ - CHECK_FAIL(!passed); + ASSERT_TRUE(passed, "passed"); err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok); if (CHECK(err, "get_result", diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c index 6ace5e9efec1..d3c2de2c24d1 100644 --- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -160,11 +160,8 @@ int test_resolve_btfids(void) break; if (i > 0) { - ret = CHECK(test_set.ids[i - 1] > test_set.ids[i], - "sort_check", - "test_set is not sorted\n"); - if (ret) - break; + if (!ASSERT_LE(test_set.ids[i - 1], test_set.ids[i], "sort_check")) + return -1; } } diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c index 686b40f11a45..76e1f5fe18fa 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c @@ -42,9 +42,7 @@ void test_snprintf_btf(void) * and it set expected return values from bpf_trace_printk()s * and all tests ran. */ - if (CHECK(bss->ret <= 0, - "bpf_snprintf_btf: got return value", - "ret <= 0 %ld test %d\n", bss->ret, bss->ran_subtests)) + if (!ASSERT_GT(bss->ret, 0, "bpf_snprintf_ret")) goto cleanup; if (CHECK(bss->ran_subtests == 0, "check if subtests ran", diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index ee7e3b45182a..dda52cb649dc 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -130,6 +130,20 @@ extern int test__join_cgroup(const char *path); #define CHECK_ATTR(condition, tag, format...) \ _CHECK(condition, tag, tattr.duration, format) +#define ASSERT_TRUE(actual, name) ({ \ + static int duration = 0; \ + bool ___ok = (actual); \ + CHECK(!___ok, (name), "unexpected %s: got FALSE\n", (name)); \ + ___ok; \ +}) + +#define ASSERT_FALSE(actual, name) ({ \ + static int duration = 0; \ + bool ___ok = !(actual); \ + CHECK(!___ok, (name), "unexpected %s: got TRUE\n", (name)); \ + ___ok; \ +}) + #define ASSERT_EQ(actual, expected, name) ({ \ static int duration = 0; \ typeof(actual) ___act = (actual); \ @@ -163,6 +177,39 @@ extern int test__join_cgroup(const char *path); ___ok; \ }) +#define ASSERT_LE(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act <= ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld > expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + +#define ASSERT_GT(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act > ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld <= expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + +#define ASSERT_GE(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act >= ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld < expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + #define ASSERT_STREQ(actual, expected, name) ({ \ static int duration = 0; \ const char *___act = actual; \ @@ -178,7 +225,8 @@ extern int test__join_cgroup(const char *path); static int duration = 0; \ long long ___res = (res); \ bool ___ok = ___res == 0; \ - CHECK(!___ok, (name), "unexpected error: %lld\n", ___res); \ + CHECK(!___ok, (name), "unexpected error: %lld (errno %d)\n", \ + ___res, errno); \ ___ok; \ }) From 6709a914c8498f42b1498b3d31f4b078d092fd35 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 26 Apr 2021 12:29:46 -0700 Subject: [PATCH 08/19] libbpf: Support BTF_KIND_FLOAT during type compatibility checks in CO-RE Add BTF_KIND_FLOAT support when doing CO-RE field type compatibility check. Without this, relocations against float/double fields will fail. Also adjust one error message to emit instruction index instead of less convenient instruction byte offset. Fixes: 22541a9eeb0d ("libbpf: Add BTF_KIND_FLOAT support") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20210426192949.416837-3-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a1cddd17af7d..e2a3cf437814 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5115,6 +5115,7 @@ err_out: * least one of enums should be anonymous; * - for ENUMs, check sizes, names are ignored; * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; * - for ARRAY, dimensionality is ignored, element types are checked for * compatibility recursively; * - everything else shouldn't be ever a target of relocation. @@ -5141,6 +5142,7 @@ recur: switch (btf_kind(local_type)) { case BTF_KIND_PTR: + case BTF_KIND_FLOAT: return 1; case BTF_KIND_FWD: case BTF_KIND_ENUM: { @@ -6245,8 +6247,8 @@ patch_insn: /* bpf_core_patch_insn() should know how to handle missing targ_spec */ err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res); if (err) { - pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n", - prog->name, relo_idx, relo->insn_off, err); + pr_warn("prog '%s': relo #%d: failed to patch insn #%zu: %d\n", + prog->name, relo_idx, relo->insn_off / BPF_INSN_SZ, err); return -EINVAL; } From 0f20615d64ee2ad5e2a133a812382d0c4071589b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 26 Apr 2021 12:29:47 -0700 Subject: [PATCH 09/19] selftests/bpf: Fix BPF_CORE_READ_BITFIELD() macro Fix BPF_CORE_READ_BITFIELD() macro used for reading CO-RE-relocatable bitfields. Missing breaks in a switch caused 8-byte reads always. This can confuse libbpf because it does strict checks that memory load size corresponds to the original size of the field, which in this case quite often would be wrong. After fixing that, we run into another problem, which quite subtle, so worth documenting here. The issue is in Clang optimization and CO-RE relocation interactions. Without that asm volatile construct (also known as barrier_var()), Clang will re-order BYTE_OFFSET and BYTE_SIZE relocations and will apply BYTE_OFFSET 4 times for each switch case arm. This will result in the same error from libbpf about mismatch of memory load size and original field size. I.e., if we were reading u32, we'd still have *(u8 *), *(u16 *), *(u32 *), and *(u64 *) memory loads, three of which will fail. Using barrier_var() forces Clang to apply BYTE_OFFSET relocation first (and once) to calculate p, after which value of p is used without relocation in each of switch case arms, doing appropiately-sized memory load. Here's the list of relevant relocations and pieces of generated BPF code before and after this patch for test_core_reloc_bitfields_direct selftests. BEFORE ===== #45: core_reloc: insn #160 --> [5] + 0:5: byte_sz --> struct core_reloc_bitfields.u32 #46: core_reloc: insn #167 --> [5] + 0:5: byte_off --> struct core_reloc_bitfields.u32 #47: core_reloc: insn #174 --> [5] + 0:5: byte_off --> struct core_reloc_bitfields.u32 #48: core_reloc: insn #178 --> [5] + 0:5: byte_off --> struct core_reloc_bitfields.u32 #49: core_reloc: insn #182 --> [5] + 0:5: byte_off --> struct core_reloc_bitfields.u32 157: 18 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r2 = 0 ll 159: 7b 12 20 01 00 00 00 00 *(u64 *)(r2 + 288) = r1 160: b7 02 00 00 04 00 00 00 r2 = 4 ; BYTE_SIZE relocation here ^^^ 161: 66 02 07 00 03 00 00 00 if w2 s> 3 goto +7 162: 16 02 0d 00 01 00 00 00 if w2 == 1 goto +13 163: 16 02 01 00 02 00 00 00 if w2 == 2 goto +1 164: 05 00 12 00 00 00 00 00 goto +18 0000000000000528 : 165: 18 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r1 = 0 ll 167: 69 11 08 00 00 00 00 00 r1 = *(u16 *)(r1 + 8) ; BYTE_OFFSET relo here w/ WRONG size ^^^^^^^^^^^^^^^^ 168: 05 00 0e 00 00 00 00 00 goto +14 0000000000000548 : 169: 16 02 0a 00 04 00 00 00 if w2 == 4 goto +10 170: 16 02 01 00 08 00 00 00 if w2 == 8 goto +1 171: 05 00 0b 00 00 00 00 00 goto +11 0000000000000560 : 172: 18 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r1 = 0 ll 174: 79 11 08 00 00 00 00 00 r1 = *(u64 *)(r1 + 8) ; BYTE_OFFSET relo here w/ WRONG size ^^^^^^^^^^^^^^^^ 175: 05 00 07 00 00 00 00 00 goto +7 0000000000000580 : 176: 18 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r1 = 0 ll 178: 71 11 08 00 00 00 00 00 r1 = *(u8 *)(r1 + 8) ; BYTE_OFFSET relo here w/ WRONG size ^^^^^^^^^^^^^^^^ 179: 05 00 03 00 00 00 00 00 goto +3 00000000000005a0 : 180: 18 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r1 = 0 ll 182: 61 11 08 00 00 00 00 00 r1 = *(u32 *)(r1 + 8) ; BYTE_OFFSET relo here w/ RIGHT size ^^^^^^^^^^^^^^^^ 00000000000005b8 : 183: 67 01 00 00 20 00 00 00 r1 <<= 32 184: b7 02 00 00 00 00 00 00 r2 = 0 185: 16 02 02 00 00 00 00 00 if w2 == 0 goto +2 186: c7 01 00 00 20 00 00 00 r1 s>>= 32 187: 05 00 01 00 00 00 00 00 goto +1 00000000000005e0 : 188: 77 01 00 00 20 00 00 00 r1 >>= 32 AFTER ===== #30: core_reloc: insn #132 --> [5] + 0:5: byte_off --> struct core_reloc_bitfields.u32 #31: core_reloc: insn #134 --> [5] + 0:5: byte_sz --> struct core_reloc_bitfields.u32 129: 18 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r2 = 0 ll 131: 7b 12 20 01 00 00 00 00 *(u64 *)(r2 + 288) = r1 132: b7 01 00 00 08 00 00 00 r1 = 8 ; BYTE_OFFSET relo here ^^^ ; no size check for non-memory dereferencing instructions 133: 0f 12 00 00 00 00 00 00 r2 += r1 134: b7 03 00 00 04 00 00 00 r3 = 4 ; BYTE_SIZE relocation here ^^^ 135: 66 03 05 00 03 00 00 00 if w3 s> 3 goto +5 136: 16 03 09 00 01 00 00 00 if w3 == 1 goto +9 137: 16 03 01 00 02 00 00 00 if w3 == 2 goto +1 138: 05 00 0a 00 00 00 00 00 goto +10 0000000000000458 : 139: 69 21 00 00 00 00 00 00 r1 = *(u16 *)(r2 + 0) ; NO CO-RE relocation here ^^^^^^^^^^^^^^^^ 140: 05 00 08 00 00 00 00 00 goto +8 0000000000000468 : 141: 16 03 06 00 04 00 00 00 if w3 == 4 goto +6 142: 16 03 01 00 08 00 00 00 if w3 == 8 goto +1 143: 05 00 05 00 00 00 00 00 goto +5 0000000000000480 : 144: 79 21 00 00 00 00 00 00 r1 = *(u64 *)(r2 + 0) ; NO CO-RE relocation here ^^^^^^^^^^^^^^^^ 145: 05 00 03 00 00 00 00 00 goto +3 0000000000000490 : 146: 71 21 00 00 00 00 00 00 r1 = *(u8 *)(r2 + 0) ; NO CO-RE relocation here ^^^^^^^^^^^^^^^^ 147: 05 00 01 00 00 00 00 00 goto +1 00000000000004a0 : 148: 61 21 00 00 00 00 00 00 r1 = *(u32 *)(r2 + 0) ; NO CO-RE relocation here ^^^^^^^^^^^^^^^^ 00000000000004a8 : 149: 67 01 00 00 20 00 00 00 r1 <<= 32 150: b7 02 00 00 00 00 00 00 r2 = 0 151: 16 02 02 00 00 00 00 00 if w2 == 0 goto +2 152: c7 01 00 00 20 00 00 00 r1 s>>= 32 153: 05 00 01 00 00 00 00 00 goto +1 00000000000004d0 : 154: 77 01 00 00 20 00 00 00 r1 >>= 323 Fixes: ee26dade0e3b ("libbpf: Add support for relocatable bitfields") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20210426192949.416837-4-andrii@kernel.org --- tools/lib/bpf/bpf_core_read.h | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index 53b3e199fb25..09ebe3db5f2f 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -88,11 +88,19 @@ enum bpf_enum_value_kind { const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ unsigned long long val; \ \ + /* This is a so-called barrier_var() operation that makes specified \ + * variable "a black box" for optimizing compiler. \ + * It forces compiler to perform BYTE_OFFSET relocation on p and use \ + * its calculated value in the switch below, instead of applying \ + * the same relocation 4 times for each individual memory load. \ + */ \ + asm volatile("" : "=r"(p) : "0"(p)); \ + \ switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ - case 1: val = *(const unsigned char *)p; \ - case 2: val = *(const unsigned short *)p; \ - case 4: val = *(const unsigned int *)p; \ - case 8: val = *(const unsigned long long *)p; \ + case 1: val = *(const unsigned char *)p; break; \ + case 2: val = *(const unsigned short *)p; break; \ + case 4: val = *(const unsigned int *)p; break; \ + case 8: val = *(const unsigned long long *)p; break; \ } \ val <<= __CORE_RELO(s, field, LSHIFT_U64); \ if (__CORE_RELO(s, field, SIGNED)) \ From 5a30eb23922b52f33222c6729b6b3ff1c37a6c66 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 26 Apr 2021 12:29:48 -0700 Subject: [PATCH 10/19] selftests/bpf: Fix field existence CO-RE reloc tests Negative field existence cases for have a broken assumption that FIELD_EXISTS CO-RE relo will fail for fields that match the name but have incompatible type signature. That's not how CO-RE relocations generally behave. Types and fields that match by name but not by expected type are treated as non-matching candidates and are skipped. Error later is reported if no matching candidate was found. That's what happens for most relocations, but existence relocations (FIELD_EXISTS and TYPE_EXISTS) are more permissive and they are designed to return 0 or 1, depending if a match is found. This allows to handle name-conflicting but incompatible types in BPF code easily. Combined with ___flavor suffixes, it's possible to handle pretty much any structural type changes in kernel within the compiled once BPF source code. So, long story short, negative field existence test cases are invalid in their assumptions, so this patch reworks them into a single consolidated positive case that doesn't match any of the fields. Fixes: c7566a69695c ("selftests/bpf: Add field existence CO-RE relocs tests") Reported-by: Lorenz Bauer Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20210426192949.416837-5-andrii@kernel.org --- .../selftests/bpf/prog_tests/core_reloc.c | 31 ++++++++++++------- ...ore_reloc_existence___err_wrong_arr_kind.c | 3 -- ...loc_existence___err_wrong_arr_value_type.c | 3 -- ...ore_reloc_existence___err_wrong_int_kind.c | 3 -- ..._core_reloc_existence___err_wrong_int_sz.c | 3 -- ...ore_reloc_existence___err_wrong_int_type.c | 3 -- ..._reloc_existence___err_wrong_struct_type.c | 3 -- ..._core_reloc_existence___wrong_field_defs.c | 3 ++ .../selftests/bpf/progs/core_reloc_types.h | 20 ++---------- 9 files changed, 24 insertions(+), 48 deletions(-) delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index d94dcead72e6..385fd7696a2e 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -210,11 +210,6 @@ static int duration = 0; .bpf_obj_file = "test_core_reloc_existence.o", \ .btf_src_file = "btf__core_reloc_" #name ".o" \ -#define FIELD_EXISTS_ERR_CASE(name) { \ - FIELD_EXISTS_CASE_COMMON(name), \ - .fails = true, \ -} - #define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \ .case_name = test_name_prefix#name, \ .bpf_obj_file = objfile, \ @@ -643,13 +638,25 @@ static struct core_reloc_test_case test_cases[] = { }, .output_len = sizeof(struct core_reloc_existence_output), }, - - FIELD_EXISTS_ERR_CASE(existence__err_int_sz), - FIELD_EXISTS_ERR_CASE(existence__err_int_type), - FIELD_EXISTS_ERR_CASE(existence__err_int_kind), - FIELD_EXISTS_ERR_CASE(existence__err_arr_kind), - FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type), - FIELD_EXISTS_ERR_CASE(existence__err_struct_type), + { + FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs), + .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) { + }, + .input_len = sizeof(struct core_reloc_existence___wrong_field_defs), + .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) { + .a_exists = 0, + .b_exists = 0, + .c_exists = 0, + .arr_exists = 0, + .s_exists = 0, + .a_value = 0xff000001u, + .b_value = 0xff000002u, + .c_value = 0xff000003u, + .arr_value = 0xff000004u, + .s_value = 0xff000005u, + }, + .output_len = sizeof(struct core_reloc_existence_output), + }, /* bitfield relocation checks */ BITFIELDS_CASE(bitfields, { diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c deleted file mode 100644 index dd0ffa518f36..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_existence___err_wrong_arr_kind x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c deleted file mode 100644 index bc83372088ad..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_existence___err_wrong_arr_value_type x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c deleted file mode 100644 index 917bec41be08..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_existence___err_wrong_int_kind x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c deleted file mode 100644 index 6ec7e6ec1c91..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_existence___err_wrong_int_sz x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c deleted file mode 100644 index 7bbcacf2b0d1..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_existence___err_wrong_int_type x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c deleted file mode 100644 index f384dd38ec70..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_existence___err_wrong_struct_type x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c new file mode 100644 index 000000000000..d14b496190c3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_existence___wrong_field_defs x) {} diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h index 9982eb969048..c95c0cabe951 100644 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h @@ -700,27 +700,11 @@ struct core_reloc_existence___minimal { int a; }; -struct core_reloc_existence___err_wrong_int_sz { - short a; -}; - -struct core_reloc_existence___err_wrong_int_type { +struct core_reloc_existence___wrong_field_defs { + void *a; int b[1]; -}; - -struct core_reloc_existence___err_wrong_int_kind { struct{ int x; } c; -}; - -struct core_reloc_existence___err_wrong_arr_kind { int arr; -}; - -struct core_reloc_existence___err_wrong_arr_value_type { - short arr[1]; -}; - -struct core_reloc_existence___err_wrong_struct_type { int s; }; From bede0ebf0be87e9678103486a77f39e0334c6791 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 26 Apr 2021 12:29:49 -0700 Subject: [PATCH 11/19] selftests/bpf: Fix core_reloc test runner Fix failed tests checks in core_reloc test runner, which allowed failing tests to pass quietly. Also add extra check to make sure that expected to fail test cases with invalid names are caught as test failure anyway, as this is not an expected failure mode. Also fix mislabeled probed vs direct bitfield test cases. Fixes: 124a892d1c41 ("selftests/bpf: Test TYPE_EXISTS and TYPE_SIZE CO-RE relocations") Reported-by: Lorenz Bauer Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20210426192949.416837-6-andrii@kernel.org --- .../selftests/bpf/prog_tests/core_reloc.c | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 385fd7696a2e..607710826dca 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -217,7 +217,7 @@ static int duration = 0; #define BITFIELDS_CASE(name, ...) { \ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \ - "direct:", name), \ + "probed:", name), \ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ .input_len = sizeof(struct core_reloc_##name), \ .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ @@ -225,7 +225,7 @@ static int duration = 0; .output_len = sizeof(struct core_reloc_bitfields_output), \ }, { \ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \ - "probed:", name), \ + "direct:", name), \ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ .input_len = sizeof(struct core_reloc_##name), \ .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ @@ -546,8 +546,7 @@ static struct core_reloc_test_case test_cases[] = { ARRAYS_ERR_CASE(arrays___err_too_small), ARRAYS_ERR_CASE(arrays___err_too_shallow), ARRAYS_ERR_CASE(arrays___err_non_array), - ARRAYS_ERR_CASE(arrays___err_wrong_val_type1), - ARRAYS_ERR_CASE(arrays___err_wrong_val_type2), + ARRAYS_ERR_CASE(arrays___err_wrong_val_type), ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr), /* enum/ptr/int handling scenarios */ @@ -865,13 +864,20 @@ void test_core_reloc(void) "prog '%s' not found\n", probe_name)) goto cleanup; + + if (test_case->btf_src_file) { + err = access(test_case->btf_src_file, R_OK); + if (!ASSERT_OK(err, "btf_src_file")) + goto cleanup; + } + load_attr.obj = obj; load_attr.log_level = 0; load_attr.target_btf_path = test_case->btf_src_file; err = bpf_object__load_xattr(&load_attr); if (err) { if (!test_case->fails) - CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err); + ASSERT_OK(err, "obj_load"); goto cleanup; } @@ -910,10 +916,8 @@ void test_core_reloc(void) goto cleanup; } - if (test_case->fails) { - CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name); + if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail")) goto cleanup; - } equal = memcmp(data->out, test_case->output, test_case->output_len) == 0; From 38d26d89b31d0766d431471572cc9b007ca19c98 Mon Sep 17 00:00:00 2001 From: Florent Revest Date: Tue, 27 Apr 2021 13:29:58 +0200 Subject: [PATCH 12/19] bpf: Lock bpf_trace_printk's tmp buf before it is written to bpf_trace_printk uses a shared static buffer to hold strings before they are printed. A recent refactoring moved the locking of that buffer after it gets filled by mistake. Fixes: d9c9e4db186a ("bpf: Factorize bpf_trace_printk and bpf_seq_printf") Reported-by: Rasmus Villemoes Signed-off-by: Florent Revest Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210427112958.773132-1-revest@chromium.org --- kernel/trace/bpf_trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 2a8bcdc927c7..0e67d12a8f40 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -391,13 +391,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, if (ret < 0) return ret; + raw_spin_lock_irqsave(&trace_printk_lock, flags); ret = snprintf(buf, sizeof(buf), fmt, BPF_CAST_FMT_ARG(0, args, mod), BPF_CAST_FMT_ARG(1, args, mod), BPF_CAST_FMT_ARG(2, args, mod)); /* snprintf() will not append null for zero-length strings */ if (ret == 0) buf[0] = '\0'; - raw_spin_lock_irqsave(&trace_printk_lock, flags); trace_bpf_trace_printk(buf); raw_spin_unlock_irqrestore(&trace_printk_lock, flags); From 10bf4e83167cc68595b85fd73bb91e8f2c086e36 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 23 Apr 2021 13:59:55 +0000 Subject: [PATCH 13/19] bpf: Fix propagation of 32 bit unsigned bounds from 64 bit bounds Similarly as b02709587ea3 ("bpf: Fix propagation of 32-bit signed bounds from 64-bit bounds."), we also need to fix the propagation of 32 bit unsigned bounds from 64 bit counterparts. That is, really only set the u32_{min,max}_value when /both/ {umin,umax}_value safely fit in 32 bit space. For example, the register with a umin_value == 1 does /not/ imply that u32_min_value is also equal to 1, since umax_value could be much larger than 32 bit subregister can hold, and thus u32_min_value is in the interval [0,1] instead. Before fix, invalid tracking result of R2_w=inv1: [...] 5: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0) R10=fp0 5: (35) if r2 >= 0x1 goto pc+1 [...] // goto path 7: R0=inv1337 R1=ctx(id=0,off=0,imm=0) R2=inv(id=0,umin_value=1) R10=fp0 7: (b6) if w2 <= 0x1 goto pc+1 [...] // goto path 9: R0=inv1337 R1=ctx(id=0,off=0,imm=0) R2=inv(id=0,smin_value=-9223372036854775807,smax_value=9223372032559808513,umin_value=1,umax_value=18446744069414584321,var_off=(0x1; 0xffffffff00000000),s32_min_value=1,s32_max_value=1,u32_max_value=1) R10=fp0 9: (bc) w2 = w2 10: R0=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv1 R10=fp0 [...] After fix, correct tracking result of R2_w=inv(id=0,umax_value=1,var_off=(0x0; 0x1)): [...] 5: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0) R10=fp0 5: (35) if r2 >= 0x1 goto pc+1 [...] // goto path 7: R0=inv1337 R1=ctx(id=0,off=0,imm=0) R2=inv(id=0,umin_value=1) R10=fp0 7: (b6) if w2 <= 0x1 goto pc+1 [...] // goto path 9: R0=inv1337 R1=ctx(id=0,off=0,imm=0) R2=inv(id=0,smax_value=9223372032559808513,umax_value=18446744069414584321,var_off=(0x0; 0xffffffff00000001),s32_min_value=0,s32_max_value=1,u32_max_value=1) R10=fp0 9: (bc) w2 = w2 10: R0=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0,umax_value=1,var_off=(0x0; 0x1)) R10=fp0 [...] Thus, same issue as in b02709587ea3 holds for unsigned subregister tracking. Also, align __reg64_bound_u32() similarly to __reg64_bound_s32() as done in b02709587ea3 to make them uniform again. Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking") Reported-by: Manfred Paul (@_manfp) Signed-off-by: Daniel Borkmann Reviewed-by: John Fastabend Acked-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 8 +++----- tools/testing/selftests/bpf/verifier/array_access.c | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 637462e9b6ee..9145f88b2a0a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1398,9 +1398,7 @@ static bool __reg64_bound_s32(s64 a) static bool __reg64_bound_u32(u64 a) { - if (a > U32_MIN && a < U32_MAX) - return true; - return false; + return a > U32_MIN && a < U32_MAX; } static void __reg_combine_64_into_32(struct bpf_reg_state *reg) @@ -1411,10 +1409,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg) reg->s32_min_value = (s32)reg->smin_value; reg->s32_max_value = (s32)reg->smax_value; } - if (__reg64_bound_u32(reg->umin_value)) + if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) { reg->u32_min_value = (u32)reg->umin_value; - if (__reg64_bound_u32(reg->umax_value)) reg->u32_max_value = (u32)reg->umax_value; + } /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c index 1b138cd2b187..1b1c798e9248 100644 --- a/tools/testing/selftests/bpf/verifier/array_access.c +++ b/tools/testing/selftests/bpf/verifier/array_access.c @@ -186,7 +186,7 @@ }, .fixup_map_hash_48b = { 3 }, .errstr_unpriv = "R0 leaks addr", - .errstr = "invalid access to map value, value_size=48 off=44 size=8", + .errstr = "R0 unbounded memory access", .result_unpriv = REJECT, .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, From bb0247807744dc93407771e13ba20af0b270ca6a Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 23 Apr 2021 11:27:27 +0200 Subject: [PATCH 14/19] bpf, cpumap: Bulk skb using netif_receive_skb_list Rely on netif_receive_skb_list routine to send skbs converted from xdp_frames in cpu_map_kthread_run in order to improve i-cache usage. The proposed patch has been tested running xdp_redirect_cpu bpf sample available in the kernel tree that is used to redirect UDP frames from ixgbe driver to a cpumap entry and then to the networking stack. UDP frames are generated using pktgen. Packets are discarded by the UDP layer. $ xdp_redirect_cpu --cpu --progname xdp_cpu_map0 --dev bpf-next: ~2.35Mpps bpf-next + cpumap skb-list: ~2.72Mpps Rename drops counter in kmem_alloc_drops since now it reports just kmem_cache_alloc_bulk failures Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/c729f83e5d7482d9329e0f165bdbe5adcefd1510.1619169700.git.lorenzo@kernel.org --- kernel/bpf/cpumap.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 0cf2791d5099..5dd3e866599a 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -27,7 +27,7 @@ #include #include -#include /* netif_receive_skb_core */ +#include /* netif_receive_skb_list */ #include /* eth_type_trans */ /* General idea: XDP packets getting XDP redirected to another CPU, @@ -252,11 +252,12 @@ static int cpu_map_kthread_run(void *data) */ while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { struct xdp_cpumap_stats stats = {}; /* zero stats */ + unsigned int kmem_alloc_drops = 0, sched = 0; gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; - unsigned int drops = 0, sched = 0; void *frames[CPUMAP_BATCH]; void *skbs[CPUMAP_BATCH]; int i, n, m, nframes; + LIST_HEAD(list); /* Release CPU reschedule checks */ if (__ptr_ring_empty(rcpu->queue)) { @@ -297,7 +298,7 @@ static int cpu_map_kthread_run(void *data) if (unlikely(m == 0)) { for (i = 0; i < nframes; i++) skbs[i] = NULL; /* effect: xdp_return_frame */ - drops += nframes; + kmem_alloc_drops += nframes; } } @@ -305,7 +306,6 @@ static int cpu_map_kthread_run(void *data) for (i = 0; i < nframes; i++) { struct xdp_frame *xdpf = frames[i]; struct sk_buff *skb = skbs[i]; - int ret; skb = __xdp_build_skb_from_frame(xdpf, skb, xdpf->dev_rx); @@ -314,13 +314,13 @@ static int cpu_map_kthread_run(void *data) continue; } - /* Inject into network stack */ - ret = netif_receive_skb_core(skb); - if (ret == NET_RX_DROP) - drops++; + list_add_tail(&skb->list, &list); } + netif_receive_skb_list(&list); + /* Feedback loop via tracepoint */ - trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats); + trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, + sched, &stats); local_bh_enable(); /* resched point, may call do_softirq() */ } From 2551c2d19c04cd1c7b6c99ec04a8ff08193b0ccc Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Sat, 24 Apr 2021 10:12:08 +0800 Subject: [PATCH 15/19] bpf, docs: Fix literal block for example code Add a missing colon so that the code block followed can be rendered properly. Signed-off-by: Hengqi Chen Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20210424021208.832116-1-hengqi.chen@gmail.com --- Documentation/networking/filter.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/networking/filter.rst b/Documentation/networking/filter.rst index 251c6bd73d15..3e2221f4abe4 100644 --- a/Documentation/networking/filter.rst +++ b/Documentation/networking/filter.rst @@ -327,7 +327,7 @@ Examples for low-level BPF: ret #-1 drop: ret #0 -**icmp random packet sampling, 1 in 4**: +**icmp random packet sampling, 1 in 4**:: ldh [12] jne #0x800, drop From 76d6a13383b8e3ff20a9cf52aa9c3de39e485632 Mon Sep 17 00:00:00 2001 From: Florent Revest Date: Tue, 27 Apr 2021 19:43:12 +0200 Subject: [PATCH 16/19] seq_file: Add a seq_bprintf function Similarly to seq_buf_bprintf in lib/seq_buf.c, this function writes a printf formatted string with arguments provided in a "binary representation" built by functions such as vbin_printf. Signed-off-by: Florent Revest Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210427174313.860948-2-revest@chromium.org --- fs/seq_file.c | 18 ++++++++++++++++++ include/linux/seq_file.h | 4 ++++ 2 files changed, 22 insertions(+) diff --git a/fs/seq_file.c b/fs/seq_file.c index cb11a34fb871..5059248f2d64 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -412,6 +412,24 @@ void seq_printf(struct seq_file *m, const char *f, ...) } EXPORT_SYMBOL(seq_printf); +#ifdef CONFIG_BINARY_PRINTF +void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary) +{ + int len; + + if (m->count < m->size) { + len = bstr_printf(m->buf + m->count, m->size - m->count, f, + binary); + if (m->count + len < m->size) { + m->count += len; + return; + } + } + seq_set_overflow(m); +} +EXPORT_SYMBOL(seq_bprintf); +#endif /* CONFIG_BINARY_PRINTF */ + /** * mangle_path - mangle and copy path to buffer beginning * @s: buffer start diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index b83b3ae3c877..723b1fa1177e 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -146,6 +146,10 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int); int seq_open_private(struct file *, const struct seq_operations *, int); int seq_release_private(struct inode *, struct file *); +#ifdef CONFIG_BINARY_PRINTF +void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary); +#endif + #define DEFINE_SEQ_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ From 48cac3f4a96ddf08df8e53809ed066de0dc93915 Mon Sep 17 00:00:00 2001 From: Florent Revest Date: Tue, 27 Apr 2021 19:43:13 +0200 Subject: [PATCH 17/19] bpf: Implement formatted output helpers with bstr_printf BPF has three formatted output helpers: bpf_trace_printk, bpf_seq_printf and bpf_snprintf. Their signatures specify that all arguments are provided from the BPF world as u64s (in an array or as registers). All of these helpers are currently implemented by calling functions such as snprintf() whose signatures take a variable number of arguments, then placed in a va_list by the compiler to call vsnprintf(). "d9c9e4db bpf: Factorize bpf_trace_printk and bpf_seq_printf" introduced a bpf_printf_prepare function that fills an array of u64 sanitized arguments with an array of "modifiers" which indicate what the "real" size of each argument should be (given by the format specifier). The BPF_CAST_FMT_ARG macro consumes these arrays and casts each argument to its real size. However, the C promotion rules implicitely cast them all back to u64s. Therefore, the arguments given to snprintf are u64s and the va_list constructed by the compiler will use 64 bits for each argument. On 64 bit machines, this happens to work well because 32 bit arguments in va_lists need to occupy 64 bits anyway, but on 32 bit architectures this breaks the layout of the va_list expected by the called function and mangles values. In "88a5c690b6 bpf: fix bpf_trace_printk on 32 bit archs", this problem had been solved for bpf_trace_printk only with a "horrid workaround" that emitted multiple calls to trace_printk where each call had different argument types and generated different va_list layouts. One of the call would be dynamically chosen at runtime. This was ok with the 3 arguments that bpf_trace_printk takes but bpf_seq_printf and bpf_snprintf accept up to 12 arguments. Because this approach scales code exponentially, it is not a viable option anymore. Because the promotion rules are part of the language and because the construction of a va_list is an arch-specific ABI, it's best to just avoid variadic arguments and va_lists altogether. Thankfully the kernel's snprintf() has an alternative in the form of bstr_printf() that accepts arguments in a "binary buffer representation". These binary buffers are currently created by vbin_printf and used in the tracing subsystem to split the cost of printing into two parts: a fast one that only dereferences and remembers values, and a slower one, called later, that does the pretty-printing. This patch refactors bpf_printf_prepare to construct binary buffers of arguments consumable by bstr_printf() instead of arrays of arguments and modifiers. This gets rid of BPF_CAST_FMT_ARG and greatly simplifies the bpf_printf_prepare usage but there are a few gotchas that change how bpf_printf_prepare needs to do things. Currently, bpf_printf_prepare uses a per cpu temporary buffer as a generic storage for strings and IP addresses. With this refactoring, the temporary buffers now holds all the arguments in a structured binary format. To comply with the format expected by bstr_printf, certain format specifiers also need to be pre-formatted: %pB and %pi6/%pi4/%pI4/%pI6. Because vsnprintf subroutines for these specifiers are hard to expose, we pre-format these arguments with calls to snprintf(). Reported-by: Rasmus Villemoes Signed-off-by: Florent Revest Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210427174313.860948-3-revest@chromium.org --- include/linux/bpf.h | 22 +---- init/Kconfig | 1 + kernel/bpf/helpers.c | 188 +++++++++++++++++++++------------------ kernel/bpf/verifier.c | 2 +- kernel/trace/bpf_trace.c | 32 ++----- 5 files changed, 114 insertions(+), 131 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ad4bcf1cadbb..b33f199c4cc2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2081,24 +2081,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, struct btf_id_set; bool btf_id_set_contains(const struct btf_id_set *set, u32 id); -enum bpf_printf_mod_type { - BPF_PRINTF_INT, - BPF_PRINTF_LONG, - BPF_PRINTF_LONG_LONG, -}; - -/* Workaround for getting va_list handling working with different argument type - * combinations generically for 32 and 64 bit archs. - */ -#define BPF_CAST_FMT_ARG(arg_nb, args, mod) \ - (mod[arg_nb] == BPF_PRINTF_LONG_LONG || \ - (mod[arg_nb] == BPF_PRINTF_LONG && __BITS_PER_LONG == 64) \ - ? (u64)args[arg_nb] \ - : (u32)args[arg_nb]) - -int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, - u64 *final_args, enum bpf_printf_mod_type *mod, - u32 num_args); -void bpf_printf_cleanup(void); +int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, + u32 **bin_buf, u32 num_args); +void bpf_bprintf_cleanup(void); #endif /* _LINUX_BPF_H */ diff --git a/init/Kconfig b/init/Kconfig index 5deae45b8d81..0d82a1f838cc 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1708,6 +1708,7 @@ config BPF_SYSCALL select BPF select IRQ_WORK select TASKS_TRACE_RCU + select BINARY_PRINTF select NET_SOCK_MSG if INET default n help diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 85b26ca5aacd..544773970dbc 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -707,9 +707,6 @@ static int try_get_fmt_tmp_buf(char **tmp_buf) struct bpf_printf_buf *bufs; int used; - if (*tmp_buf) - return 0; - preempt_disable(); used = this_cpu_inc_return(bpf_printf_buf_used); if (WARN_ON_ONCE(used > 1)) { @@ -723,7 +720,7 @@ static int try_get_fmt_tmp_buf(char **tmp_buf) return 0; } -void bpf_printf_cleanup(void) +void bpf_bprintf_cleanup(void) { if (this_cpu_read(bpf_printf_buf_used)) { this_cpu_dec(bpf_printf_buf_used); @@ -732,43 +729,45 @@ void bpf_printf_cleanup(void) } /* - * bpf_parse_fmt_str - Generic pass on format strings for printf-like helpers + * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers * * Returns a negative value if fmt is an invalid format string or 0 otherwise. * * This can be used in two ways: - * - Format string verification only: when final_args and mod are NULL + * - Format string verification only: when bin_args is NULL * - Arguments preparation: in addition to the above verification, it writes in - * final_args a copy of raw_args where pointers from BPF have been sanitized - * into pointers safe to use by snprintf. This also writes in the mod array - * the size requirement of each argument, usable by BPF_CAST_FMT_ARG for ex. + * bin_args a binary representation of arguments usable by bstr_printf where + * pointers from BPF have been sanitized. * * In argument preparation mode, if 0 is returned, safe temporary buffers are - * allocated and bpf_printf_cleanup should be called to free them after use. + * allocated and bpf_bprintf_cleanup should be called to free them after use. */ -int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, - u64 *final_args, enum bpf_printf_mod_type *mod, - u32 num_args) +int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, + u32 **bin_args, u32 num_args) { - char *unsafe_ptr = NULL, *tmp_buf = NULL, *fmt_end; - size_t tmp_buf_len = MAX_PRINTF_BUF_LEN; - int err, i, num_spec = 0, copy_size; - enum bpf_printf_mod_type cur_mod; + char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; + size_t sizeof_cur_arg, sizeof_cur_ip; + int err, i, num_spec = 0; u64 cur_arg; - char fmt_ptype; - - if (!!final_args != !!mod) - return -EINVAL; + char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; fmt_end = strnchr(fmt, fmt_size, 0); if (!fmt_end) return -EINVAL; fmt_size = fmt_end - fmt; + if (bin_args) { + if (num_args && try_get_fmt_tmp_buf(&tmp_buf)) + return -EBUSY; + + tmp_buf_end = tmp_buf + MAX_PRINTF_BUF_LEN; + *bin_args = (u32 *)tmp_buf; + } + for (i = 0; i < fmt_size; i++) { if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { err = -EINVAL; - goto cleanup; + goto out; } if (fmt[i] != '%') @@ -781,7 +780,7 @@ int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, if (num_spec >= num_args) { err = -EINVAL; - goto cleanup; + goto out; } /* The string is zero-terminated so if fmt[i] != 0, we can @@ -800,7 +799,7 @@ int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, } if (fmt[i] == 'p') { - cur_mod = BPF_PRINTF_LONG; + sizeof_cur_arg = sizeof(long); if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && fmt[i + 2] == 's') { @@ -811,117 +810,140 @@ int bpf_printf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || - fmt[i + 1] == 'x' || fmt[i + 1] == 'B' || - fmt[i + 1] == 's' || fmt[i + 1] == 'S') { + fmt[i + 1] == 'x' || fmt[i + 1] == 's' || + fmt[i + 1] == 'S') { /* just kernel pointers */ - if (final_args) + if (tmp_buf) cur_arg = raw_args[num_spec]; - goto fmt_next; + i++; + goto nocopy_fmt; + } + + if (fmt[i + 1] == 'B') { + if (tmp_buf) { + err = snprintf(tmp_buf, + (tmp_buf_end - tmp_buf), + "%pB", + (void *)(long)raw_args[num_spec]); + tmp_buf += (err + 1); + } + + i++; + num_spec++; + continue; } /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { err = -EINVAL; - goto cleanup; - } - - i += 2; - if (!final_args) - goto fmt_next; - - if (try_get_fmt_tmp_buf(&tmp_buf)) { - err = -EBUSY; goto out; } - copy_size = (fmt[i + 2] == '4') ? 4 : 16; - if (tmp_buf_len < copy_size) { + i += 2; + if (!tmp_buf) + goto nocopy_fmt; + + sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; + if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { err = -ENOSPC; - goto cleanup; + goto out; } unsafe_ptr = (char *)(long)raw_args[num_spec]; - err = copy_from_kernel_nofault(tmp_buf, unsafe_ptr, - copy_size); + err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, + sizeof_cur_ip); if (err < 0) - memset(tmp_buf, 0, copy_size); - cur_arg = (u64)(long)tmp_buf; - tmp_buf += copy_size; - tmp_buf_len -= copy_size; + memset(cur_ip, 0, sizeof_cur_ip); - goto fmt_next; + /* hack: bstr_printf expects IP addresses to be + * pre-formatted as strings, ironically, the easiest way + * to do that is to call snprintf. + */ + ip_spec[2] = fmt[i - 1]; + ip_spec[3] = fmt[i]; + err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, + ip_spec, &cur_ip); + + tmp_buf += err + 1; + num_spec++; + + continue; } else if (fmt[i] == 's') { - cur_mod = BPF_PRINTF_LONG; fmt_ptype = fmt[i]; fmt_str: if (fmt[i + 1] != 0 && !isspace(fmt[i + 1]) && !ispunct(fmt[i + 1])) { err = -EINVAL; - goto cleanup; - } - - if (!final_args) - goto fmt_next; - - if (try_get_fmt_tmp_buf(&tmp_buf)) { - err = -EBUSY; goto out; } - if (!tmp_buf_len) { + if (!tmp_buf) + goto nocopy_fmt; + + if (tmp_buf_end == tmp_buf) { err = -ENOSPC; - goto cleanup; + goto out; } unsafe_ptr = (char *)(long)raw_args[num_spec]; err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, - fmt_ptype, tmp_buf_len); + fmt_ptype, + tmp_buf_end - tmp_buf); if (err < 0) { tmp_buf[0] = '\0'; err = 1; } - cur_arg = (u64)(long)tmp_buf; tmp_buf += err; - tmp_buf_len -= err; + num_spec++; - goto fmt_next; + continue; } - cur_mod = BPF_PRINTF_INT; + sizeof_cur_arg = sizeof(int); if (fmt[i] == 'l') { - cur_mod = BPF_PRINTF_LONG; + sizeof_cur_arg = sizeof(long); i++; } if (fmt[i] == 'l') { - cur_mod = BPF_PRINTF_LONG_LONG; + sizeof_cur_arg = sizeof(long long); i++; } if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x' && fmt[i] != 'X') { err = -EINVAL; - goto cleanup; + goto out; } - if (final_args) + if (tmp_buf) cur_arg = raw_args[num_spec]; -fmt_next: - if (final_args) { - mod[num_spec] = cur_mod; - final_args[num_spec] = cur_arg; +nocopy_fmt: + if (tmp_buf) { + tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); + if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { + err = -ENOSPC; + goto out; + } + + if (sizeof_cur_arg == 8) { + *(u32 *)tmp_buf = *(u32 *)&cur_arg; + *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); + } else { + *(u32 *)tmp_buf = (u32)(long)cur_arg; + } + tmp_buf += sizeof_cur_arg; } num_spec++; } err = 0; -cleanup: - if (err) - bpf_printf_cleanup(); out: + if (err) + bpf_bprintf_cleanup(); return err; } @@ -930,9 +952,8 @@ out: BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, const void *, data, u32, data_len) { - enum bpf_printf_mod_type mod[MAX_SNPRINTF_VARARGS]; - u64 args[MAX_SNPRINTF_VARARGS]; int err, num_args; + u32 *bin_args; if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 || (data_len && !data)) @@ -942,22 +963,13 @@ BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we * can safely give an unbounded size. */ - err = bpf_printf_prepare(fmt, UINT_MAX, data, args, mod, num_args); + err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args); if (err < 0) return err; - /* Maximumly we can have MAX_SNPRINTF_VARARGS parameters, just give - * all of them to snprintf(). - */ - err = snprintf(str, str_size, fmt, BPF_CAST_FMT_ARG(0, args, mod), - BPF_CAST_FMT_ARG(1, args, mod), BPF_CAST_FMT_ARG(2, args, mod), - BPF_CAST_FMT_ARG(3, args, mod), BPF_CAST_FMT_ARG(4, args, mod), - BPF_CAST_FMT_ARG(5, args, mod), BPF_CAST_FMT_ARG(6, args, mod), - BPF_CAST_FMT_ARG(7, args, mod), BPF_CAST_FMT_ARG(8, args, mod), - BPF_CAST_FMT_ARG(9, args, mod), BPF_CAST_FMT_ARG(10, args, mod), - BPF_CAST_FMT_ARG(11, args, mod)); + err = bstr_printf(str, str_size, fmt, bin_args); - bpf_printf_cleanup(); + bpf_bprintf_cleanup(); return err + 1; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9145f88b2a0a..8fd552c16763 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5946,7 +5946,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env, /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we * can focus on validating the format specifiers. */ - err = bpf_printf_prepare(fmt, UINT_MAX, NULL, NULL, NULL, num_args); + err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args); if (err < 0) verbose(env, "Invalid format string\n"); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0e67d12a8f40..d2d7cf6cfe83 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -381,27 +381,23 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, u64, arg2, u64, arg3) { u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; - enum bpf_printf_mod_type mod[MAX_TRACE_PRINTK_VARARGS]; + u32 *bin_args; static char buf[BPF_TRACE_PRINTK_SIZE]; unsigned long flags; int ret; - ret = bpf_printf_prepare(fmt, fmt_size, args, args, mod, - MAX_TRACE_PRINTK_VARARGS); + ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args, + MAX_TRACE_PRINTK_VARARGS); if (ret < 0) return ret; raw_spin_lock_irqsave(&trace_printk_lock, flags); - ret = snprintf(buf, sizeof(buf), fmt, BPF_CAST_FMT_ARG(0, args, mod), - BPF_CAST_FMT_ARG(1, args, mod), BPF_CAST_FMT_ARG(2, args, mod)); - /* snprintf() will not append null for zero-length strings */ - if (ret == 0) - buf[0] = '\0'; + ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); trace_bpf_trace_printk(buf); raw_spin_unlock_irqrestore(&trace_printk_lock, flags); - bpf_printf_cleanup(); + bpf_bprintf_cleanup(); return ret; } @@ -435,31 +431,21 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void) BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, const void *, data, u32, data_len) { - enum bpf_printf_mod_type mod[MAX_SEQ_PRINTF_VARARGS]; - u64 args[MAX_SEQ_PRINTF_VARARGS]; int err, num_args; + u32 *bin_args; if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 || (data_len && !data)) return -EINVAL; num_args = data_len / 8; - err = bpf_printf_prepare(fmt, fmt_size, data, args, mod, num_args); + err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); if (err < 0) return err; - /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give - * all of them to seq_printf(). - */ - seq_printf(m, fmt, BPF_CAST_FMT_ARG(0, args, mod), - BPF_CAST_FMT_ARG(1, args, mod), BPF_CAST_FMT_ARG(2, args, mod), - BPF_CAST_FMT_ARG(3, args, mod), BPF_CAST_FMT_ARG(4, args, mod), - BPF_CAST_FMT_ARG(5, args, mod), BPF_CAST_FMT_ARG(6, args, mod), - BPF_CAST_FMT_ARG(7, args, mod), BPF_CAST_FMT_ARG(8, args, mod), - BPF_CAST_FMT_ARG(9, args, mod), BPF_CAST_FMT_ARG(10, args, mod), - BPF_CAST_FMT_ARG(11, args, mod)); + seq_bprintf(m, fmt, bin_args); - bpf_printf_cleanup(); + bpf_bprintf_cleanup(); return seq_has_overflowed(m) ? -EOVERFLOW : 0; } From f008d732ab181fd00d95c2e8a6e479d2f7c634b3 Mon Sep 17 00:00:00 2001 From: Pedro Tammela Date: Sat, 24 Apr 2021 18:45:09 -0300 Subject: [PATCH 18/19] bpf: Add batched ops support for percpu array Uses the already in-place infrastructure provided by the 'generic_map_*_batch' functions. No tweak was needed as it transparently handles the percpu variant. As arrays don't have delete operations, let it return a error to user space (default behaviour). Suggested-by: Jamal Hadi Salim Signed-off-by: Pedro Tammela Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20210424214510.806627-2-pctammela@mojatatu.com --- kernel/bpf/arraymap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 463d25e1e67e..3c4105603f9d 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -698,6 +698,8 @@ const struct bpf_map_ops percpu_array_map_ops = { .map_delete_elem = array_map_delete_elem, .map_seq_show_elem = percpu_array_map_seq_show_elem, .map_check_btf = array_map_check_btf, + .map_lookup_batch = generic_map_lookup_batch, + .map_update_batch = generic_map_update_batch, .map_set_for_each_callback_args = map_set_for_each_callback_args, .map_for_each_callback = bpf_for_each_array_elem, .map_btf_name = "bpf_array", From 3733bfbbdd28f7a65340d0058d15d15190a4944a Mon Sep 17 00:00:00 2001 From: Pedro Tammela Date: Sat, 24 Apr 2021 18:45:10 -0300 Subject: [PATCH 19/19] bpf, selftests: Update array map tests for per-cpu batched ops Follows the same logic as the hashtable tests. Signed-off-by: Pedro Tammela Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20210424214510.806627-3-pctammela@mojatatu.com --- .../bpf/map_tests/array_map_batch_ops.c | 104 +++++++++++++----- 1 file changed, 75 insertions(+), 29 deletions(-) diff --git a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c index e42ea1195d18..f4d870da7684 100644 --- a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c @@ -9,10 +9,13 @@ #include +static int nr_cpus; + static void map_batch_update(int map_fd, __u32 max_entries, int *keys, - int *values) + __s64 *values, bool is_pcpu) { - int i, err; + int i, j, err; + int cpu_offset = 0; DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts, .elem_flags = 0, .flags = 0, @@ -20,22 +23,41 @@ static void map_batch_update(int map_fd, __u32 max_entries, int *keys, for (i = 0; i < max_entries; i++) { keys[i] = i; - values[i] = i + 1; + if (is_pcpu) { + cpu_offset = i * nr_cpus; + for (j = 0; j < nr_cpus; j++) + (values + cpu_offset)[j] = i + 1 + j; + } else { + values[i] = i + 1; + } } err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts); CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno)); } -static void map_batch_verify(int *visited, __u32 max_entries, - int *keys, int *values) +static void map_batch_verify(int *visited, __u32 max_entries, int *keys, + __s64 *values, bool is_pcpu) { - int i; + int i, j; + int cpu_offset = 0; memset(visited, 0, max_entries * sizeof(*visited)); for (i = 0; i < max_entries; i++) { - CHECK(keys[i] + 1 != values[i], "key/value checking", - "error: i %d key %d value %d\n", i, keys[i], values[i]); + if (is_pcpu) { + cpu_offset = i * nr_cpus; + for (j = 0; j < nr_cpus; j++) { + __s64 value = (values + cpu_offset)[j]; + CHECK(keys[i] + j + 1 != value, + "key/value checking", + "error: i %d j %d key %d value %lld\n", i, + j, keys[i], value); + } + } else { + CHECK(keys[i] + 1 != values[i], "key/value checking", + "error: i %d key %d value %lld\n", i, keys[i], + values[i]); + } visited[i] = 1; } for (i = 0; i < max_entries; i++) { @@ -44,19 +66,21 @@ static void map_batch_verify(int *visited, __u32 max_entries, } } -void test_array_map_batch_ops(void) +static void __test_map_lookup_and_update_batch(bool is_pcpu) { struct bpf_create_map_attr xattr = { .name = "array_map", - .map_type = BPF_MAP_TYPE_ARRAY, + .map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY : + BPF_MAP_TYPE_ARRAY, .key_size = sizeof(int), - .value_size = sizeof(int), + .value_size = sizeof(__s64), }; - int map_fd, *keys, *values, *visited; + int map_fd, *keys, *visited; __u32 count, total, total_success; const __u32 max_entries = 10; __u64 batch = 0; - int err, step; + int err, step, value_size; + void *values; DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts, .elem_flags = 0, .flags = 0, @@ -67,22 +91,23 @@ void test_array_map_batch_ops(void) CHECK(map_fd == -1, "bpf_create_map_xattr()", "error:%s\n", strerror(errno)); - keys = malloc(max_entries * sizeof(int)); - values = malloc(max_entries * sizeof(int)); - visited = malloc(max_entries * sizeof(int)); + value_size = sizeof(__s64); + if (is_pcpu) + value_size *= nr_cpus; + + keys = calloc(max_entries, sizeof(*keys)); + values = calloc(max_entries, value_size); + visited = calloc(max_entries, sizeof(*visited)); CHECK(!keys || !values || !visited, "malloc()", "error:%s\n", strerror(errno)); - /* populate elements to the map */ - map_batch_update(map_fd, max_entries, keys, values); - /* test 1: lookup in a loop with various steps. */ total_success = 0; for (step = 1; step < max_entries; step++) { - map_batch_update(map_fd, max_entries, keys, values); - map_batch_verify(visited, max_entries, keys, values); + map_batch_update(map_fd, max_entries, keys, values, is_pcpu); + map_batch_verify(visited, max_entries, keys, values, is_pcpu); memset(keys, 0, max_entries * sizeof(*keys)); - memset(values, 0, max_entries * sizeof(*values)); + memset(values, 0, max_entries * value_size); batch = 0; total = 0; /* iteratively lookup/delete elements with 'step' @@ -91,10 +116,10 @@ void test_array_map_batch_ops(void) count = step; while (true) { err = bpf_map_lookup_batch(map_fd, - total ? &batch : NULL, &batch, - keys + total, - values + total, - &count, &opts); + total ? &batch : NULL, + &batch, keys + total, + values + total * value_size, + &count, &opts); CHECK((err && errno != ENOENT), "lookup with steps", "error: %s\n", strerror(errno)); @@ -108,7 +133,7 @@ void test_array_map_batch_ops(void) CHECK(total != max_entries, "lookup with steps", "total = %u, max_entries = %u\n", total, max_entries); - map_batch_verify(visited, max_entries, keys, values); + map_batch_verify(visited, max_entries, keys, values, is_pcpu); total_success++; } @@ -116,9 +141,30 @@ void test_array_map_batch_ops(void) CHECK(total_success == 0, "check total_success", "unexpected failure\n"); - printf("%s:PASS\n", __func__); - free(keys); free(values); free(visited); } + +static void array_map_batch_ops(void) +{ + __test_map_lookup_and_update_batch(false); + printf("test_%s:PASS\n", __func__); +} + +static void array_percpu_map_batch_ops(void) +{ + __test_map_lookup_and_update_batch(true); + printf("test_%s:PASS\n", __func__); +} + +void test_array_map_batch_ops(void) +{ + nr_cpus = libbpf_num_possible_cpus(); + + CHECK(nr_cpus < 0, "nr_cpus checking", + "error: get possible cpus failed"); + + array_map_batch_ops(); + array_percpu_map_batch_ops(); +}