mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 07:12:09 +00:00
bpf: permits narrower load from bpf program context fields
Currently, verifier will reject a program if it contains an narrower load from the bpf context structure. For example, __u8 h = __sk_buff->hash, or __u16 p = __sk_buff->protocol __u32 sample_period = bpf_perf_event_data->sample_period which are narrower loads of 4-byte or 8-byte field. This patch solves the issue by: . Introduce a new parameter ctx_field_size to carry the field size of narrower load from prog type specific *__is_valid_access validator back to verifier. . The non-zero ctx_field_size for a memory access indicates (1). underlying prog type specific convert_ctx_accesses supporting non-whole-field access (2). the current insn is a narrower or whole field access. . In verifier, for such loads where load memory size is less than ctx_field_size, verifier transforms it to a full field load followed by proper masking. . Currently, __sk_buff and bpf_perf_event_data->sample_period are supporting narrowing loads. . Narrower stores are still not allowed as typical ctx stores are just normal stores. Because of this change, some tests in verifier will fail and these tests are removed. As a bonus, rename some out of bound __sk_buff->cb access to proper field name and remove two redundant "skb cb oob" tests. Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
a88e2676a6
commit
31fd85816d
6 changed files with 124 additions and 119 deletions
|
@ -479,7 +479,7 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
|
|||
|
||||
/* bpf+kprobe programs can access fields of 'struct pt_regs' */
|
||||
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
|
||||
enum bpf_reg_type *reg_type)
|
||||
enum bpf_reg_type *reg_type, int *ctx_field_size)
|
||||
{
|
||||
if (off < 0 || off >= sizeof(struct pt_regs))
|
||||
return false;
|
||||
|
@ -562,7 +562,7 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
|
|||
}
|
||||
|
||||
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
|
||||
enum bpf_reg_type *reg_type)
|
||||
enum bpf_reg_type *reg_type, int *ctx_field_size)
|
||||
{
|
||||
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
|
||||
return false;
|
||||
|
@ -581,17 +581,26 @@ const struct bpf_verifier_ops tracepoint_prog_ops = {
|
|||
};
|
||||
|
||||
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
|
||||
enum bpf_reg_type *reg_type)
|
||||
enum bpf_reg_type *reg_type, int *ctx_field_size)
|
||||
{
|
||||
int sample_period_off;
|
||||
|
||||
if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
|
||||
return false;
|
||||
if (type != BPF_READ)
|
||||
return false;
|
||||
if (off % size != 0)
|
||||
return false;
|
||||
if (off == offsetof(struct bpf_perf_event_data, sample_period)) {
|
||||
if (size != sizeof(u64))
|
||||
return false;
|
||||
|
||||
/* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */
|
||||
sample_period_off = offsetof(struct bpf_perf_event_data, sample_period);
|
||||
if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) {
|
||||
*ctx_field_size = 8;
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
return (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0;
|
||||
#else
|
||||
return ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0;
|
||||
#endif
|
||||
} else {
|
||||
if (size != sizeof(long))
|
||||
return false;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue