mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 23:21:46 +00:00
bpf: Use bpf_prog_run_pin_on_cpu() at simple call sites.
All of these cases are strictly of the form: preempt_disable(); BPF_PROG_RUN(...); preempt_enable(); Replace this with bpf_prog_run_pin_on_cpu() which wraps BPF_PROG_RUN() with: migrate_disable(); BPF_PROG_RUN(...); migrate_enable(); On non RT enabled kernels this maps to preempt_disable/enable() and on RT enabled kernels this solely prevents migration, which is sufficient as there is no requirement to prevent reentrancy to any BPF program from a preempting task. The only requirement is that the program stays on the same CPU. Therefore, this is a trivially correct transformation. The seccomp loop does not need protection over the loop. It only needs protection per BPF filter program [ tglx: Converted to bpf_prog_run_pin_on_cpu() ] Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200224145643.691493094@linutronix.de
This commit is contained in:
parent
37e1d92022
commit
3d9f773cf2
5 changed files with 6 additions and 18 deletions
|
@ -717,9 +717,7 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
|
||||||
if (unlikely(prog->cb_access))
|
if (unlikely(prog->cb_access))
|
||||||
memset(cb_data, 0, BPF_SKB_CB_LEN);
|
memset(cb_data, 0, BPF_SKB_CB_LEN);
|
||||||
|
|
||||||
preempt_disable();
|
res = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||||
res = BPF_PROG_RUN(prog, skb);
|
|
||||||
preempt_enable();
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -268,16 +268,14 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
|
||||||
* All filters in the list are evaluated and the lowest BPF return
|
* All filters in the list are evaluated and the lowest BPF return
|
||||||
* value always takes priority (ignoring the DATA).
|
* value always takes priority (ignoring the DATA).
|
||||||
*/
|
*/
|
||||||
preempt_disable();
|
|
||||||
for (; f; f = f->prev) {
|
for (; f; f = f->prev) {
|
||||||
u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
|
u32 cur_ret = bpf_prog_run_pin_on_cpu(f->prog, sd);
|
||||||
|
|
||||||
if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
|
if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
|
||||||
ret = cur_ret;
|
ret = cur_ret;
|
||||||
*match = f;
|
*match = f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
preempt_enable();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SECCOMP_FILTER */
|
#endif /* CONFIG_SECCOMP_FILTER */
|
||||||
|
|
|
@ -920,9 +920,7 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
|
||||||
(int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
|
(int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
|
||||||
flow_keys->flags = flags;
|
flow_keys->flags = flags;
|
||||||
|
|
||||||
preempt_disable();
|
result = bpf_prog_run_pin_on_cpu(prog, ctx);
|
||||||
result = BPF_PROG_RUN(prog, ctx);
|
|
||||||
preempt_enable();
|
|
||||||
|
|
||||||
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
|
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
|
||||||
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
|
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
|
||||||
|
|
|
@ -628,7 +628,6 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
|
||||||
struct bpf_prog *prog;
|
struct bpf_prog *prog;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
prog = READ_ONCE(psock->progs.msg_parser);
|
prog = READ_ONCE(psock->progs.msg_parser);
|
||||||
if (unlikely(!prog)) {
|
if (unlikely(!prog)) {
|
||||||
|
@ -638,7 +637,7 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
|
||||||
|
|
||||||
sk_msg_compute_data_pointers(msg);
|
sk_msg_compute_data_pointers(msg);
|
||||||
msg->sk = sk;
|
msg->sk = sk;
|
||||||
ret = BPF_PROG_RUN(prog, msg);
|
ret = bpf_prog_run_pin_on_cpu(prog, msg);
|
||||||
ret = sk_psock_map_verd(ret, msg->sk_redir);
|
ret = sk_psock_map_verd(ret, msg->sk_redir);
|
||||||
psock->apply_bytes = msg->apply_bytes;
|
psock->apply_bytes = msg->apply_bytes;
|
||||||
if (ret == __SK_REDIRECT) {
|
if (ret == __SK_REDIRECT) {
|
||||||
|
@ -653,7 +652,6 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
preempt_enable();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
|
EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
|
||||||
|
@ -665,9 +663,7 @@ static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
|
||||||
|
|
||||||
skb->sk = psock->sk;
|
skb->sk = psock->sk;
|
||||||
bpf_compute_data_end_sk_skb(skb);
|
bpf_compute_data_end_sk_skb(skb);
|
||||||
preempt_disable();
|
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||||
ret = BPF_PROG_RUN(prog, skb);
|
|
||||||
preempt_enable();
|
|
||||||
/* strparser clones the skb before handing it to a upper layer,
|
/* strparser clones the skb before handing it to a upper layer,
|
||||||
* meaning skb_orphan has been called. We NULL sk on the way out
|
* meaning skb_orphan has been called. We NULL sk on the way out
|
||||||
* to ensure we don't trigger a BUG_ON() in skb/sk operations
|
* to ensure we don't trigger a BUG_ON() in skb/sk operations
|
||||||
|
|
|
@ -380,9 +380,7 @@ static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
|
||||||
struct bpf_prog *prog = psock->bpf_prog;
|
struct bpf_prog *prog = psock->bpf_prog;
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
preempt_disable();
|
res = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||||
res = BPF_PROG_RUN(prog, skb);
|
|
||||||
preempt_enable();
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue