mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-06 06:21:31 +00:00
net: filter: split 'struct sk_filter' into socket and bpf parts
clean up names related to socket filtering and bpf in the following way: - everything that deals with sockets keeps 'sk_*' prefix - everything that is pure BPF is changed to 'bpf_*' prefix split 'struct sk_filter' into struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; and struct bpf_prog { u32 jited:1, len:31; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; struct work_struct work; }; }; so that 'struct bpf_prog' can be used independent of sockets and cleans up 'unattached' bpf use cases split SK_RUN_FILTER macro into: SK_RUN_FILTER to be used with 'struct sk_filter *' and BPF_PROG_RUN to be used with 'struct bpf_prog *' __sk_filter_release(struct sk_filter *) gains __bpf_prog_release(struct bpf_prog *) helper function also perform related renames for the functions that work with 'struct bpf_prog *', since they're on the same lines: sk_filter_size -> bpf_prog_size sk_filter_select_runtime -> bpf_prog_select_runtime sk_filter_free -> bpf_prog_free sk_unattached_filter_create -> bpf_prog_create sk_unattached_filter_destroy -> bpf_prog_destroy sk_store_orig_filter -> bpf_prog_store_orig_filter sk_release_orig_filter -> bpf_release_orig_filter __sk_migrate_filter -> bpf_migrate_filter __sk_prepare_filter -> bpf_prepare_filter API for attaching classic BPF to a socket stays the same: sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *) and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program which is used by sockets, tun, af_packet API for 'unattached' BPF programs becomes: bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *) and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
8fb575ca39
commit
7ae457c1e5
21 changed files with 183 additions and 169 deletions
|
@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp)
|
|||
return len + 1;
|
||||
}
|
||||
|
||||
static struct sk_filter *generate_filter(int which, int *err)
|
||||
static struct bpf_prog *generate_filter(int which, int *err)
|
||||
{
|
||||
struct sk_filter *fp;
|
||||
struct bpf_prog *fp;
|
||||
struct sock_fprog_kern fprog;
|
||||
unsigned int flen = probe_filter_length(tests[which].u.insns);
|
||||
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
|
||||
|
@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err)
|
|||
fprog.filter = tests[which].u.insns;
|
||||
fprog.len = flen;
|
||||
|
||||
*err = sk_unattached_filter_create(&fp, &fprog);
|
||||
*err = bpf_prog_create(&fp, &fprog);
|
||||
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
|
||||
if (*err == -EINVAL) {
|
||||
pr_cont("PASS\n");
|
||||
|
@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err)
|
|||
break;
|
||||
|
||||
case INTERNAL:
|
||||
fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
|
||||
fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
|
||||
if (fp == NULL) {
|
||||
pr_cont("UNEXPECTED_FAIL no memory left\n");
|
||||
*err = -ENOMEM;
|
||||
|
@ -1809,7 +1809,7 @@ static struct sk_filter *generate_filter(int which, int *err)
|
|||
memcpy(fp->insnsi, tests[which].u.insns_int,
|
||||
fp->len * sizeof(struct bpf_insn));
|
||||
|
||||
sk_filter_select_runtime(fp);
|
||||
bpf_prog_select_runtime(fp);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err)
|
|||
return fp;
|
||||
}
|
||||
|
||||
static void release_filter(struct sk_filter *fp, int which)
|
||||
static void release_filter(struct bpf_prog *fp, int which)
|
||||
{
|
||||
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
|
||||
|
||||
switch (test_type) {
|
||||
case CLASSIC:
|
||||
sk_unattached_filter_destroy(fp);
|
||||
bpf_prog_destroy(fp);
|
||||
break;
|
||||
case INTERNAL:
|
||||
sk_filter_free(fp);
|
||||
bpf_prog_free(fp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int __run_one(const struct sk_filter *fp, const void *data,
|
||||
static int __run_one(const struct bpf_prog *fp, const void *data,
|
||||
int runs, u64 *duration)
|
||||
{
|
||||
u64 start, finish;
|
||||
|
@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
|
|||
start = ktime_to_us(ktime_get());
|
||||
|
||||
for (i = 0; i < runs; i++)
|
||||
ret = SK_RUN_FILTER(fp, data);
|
||||
ret = BPF_PROG_RUN(fp, data);
|
||||
|
||||
finish = ktime_to_us(ktime_get());
|
||||
|
||||
|
@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int run_one(const struct sk_filter *fp, struct bpf_test *test)
|
||||
static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
|
||||
{
|
||||
int err_cnt = 0, i, runs = MAX_TESTRUNS;
|
||||
|
||||
|
@ -1884,7 +1884,7 @@ static __init int test_bpf(void)
|
|||
int i, err_cnt = 0, pass_cnt = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||
struct sk_filter *fp;
|
||||
struct bpf_prog *fp;
|
||||
int err;
|
||||
|
||||
pr_info("#%d %s ", i, tests[i].descr);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue