mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-17 12:34:01 +00:00
Merge branch 'introduce bpf_find_vma'
Song Liu says: ==================== Changes v4 => v5: 1. Clean up and style change in 2/2. (Andrii) Changes v3 => v4: 1. Move mmap_unlock_work to task_iter.c to fix build for .config without !CONFIG_PERF_EVENTS. (kernel test robot <lkp@intel.com>) Changes v2 => v3: 1. Avoid using x86 only function in selftests. (Yonghong) 2. Add struct file and struct vm_area_struct to btf_task_struct_ids, and use it in bpf_find_vma and stackmap.c. (Yonghong) 3. Fix inaccurate comments. (Yonghong) Changes v1 => v2: 1. Share irq_work with stackmap.c. (Daniel) 2. Add tests for illegal writes to task/vma from the callback function. (Daniel) 3. Other small fixes. Add helper bpf_find_vma. This can be used in some profiling use cases. It might also be useful for LSM. ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
0cc78dcca3
13 changed files with 466 additions and 81 deletions
|
@ -2157,6 +2157,7 @@ extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
|
|||
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
|
||||
extern const struct bpf_func_proto bpf_find_vma_proto;
|
||||
|
||||
const struct bpf_func_proto *tracing_prog_func_proto(
|
||||
enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
|
|
|
@ -4938,6 +4938,25 @@ union bpf_attr {
|
|||
* **-ENOENT** if symbol is not found.
|
||||
*
|
||||
* **-EPERM** if caller does not have permission to obtain kernel address.
|
||||
*
|
||||
* long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
|
||||
* Description
|
||||
* Find vma of *task* that contains *addr*, call *callback_fn*
|
||||
* function with *task*, *vma*, and *callback_ctx*.
|
||||
* The *callback_fn* should be a static function and
|
||||
* the *callback_ctx* should be a pointer to the stack.
|
||||
* The *flags* is used to control certain aspects of the helper.
|
||||
* Currently, the *flags* must be 0.
|
||||
*
|
||||
* The expected callback signature is
|
||||
*
|
||||
* long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
|
||||
*
|
||||
* Return
|
||||
* 0 on success.
|
||||
* **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
|
||||
* **-EBUSY** if failed to try lock mmap_lock.
|
||||
* **-EINVAL** for invalid **flags**.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
|
@ -5120,6 +5139,7 @@ union bpf_attr {
|
|||
FN(trace_vprintk), \
|
||||
FN(skc_to_unix_sock), \
|
||||
FN(kallsyms_lookup_name), \
|
||||
FN(find_vma), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
|
|
@ -6342,7 +6342,10 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
|
|||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
|
||||
BTF_ID_LIST_GLOBAL(btf_task_struct_ids)
|
||||
BTF_ID(struct, task_struct)
|
||||
BTF_ID(struct, file)
|
||||
BTF_ID(struct, vm_area_struct)
|
||||
|
||||
/* BTF ID set registration API for modules */
|
||||
|
||||
|
|
65
kernel/bpf/mmap_unlock_work.h
Normal file
65
kernel/bpf/mmap_unlock_work.h
Normal file
|
@ -0,0 +1,65 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2021 Facebook
|
||||
*/
|
||||
|
||||
#ifndef __MMAP_UNLOCK_WORK_H__
|
||||
#define __MMAP_UNLOCK_WORK_H__
|
||||
#include <linux/irq_work.h>
|
||||
|
||||
/* irq_work to run mmap_read_unlock() in irq_work */
|
||||
struct mmap_unlock_irq_work {
|
||||
struct irq_work irq_work;
|
||||
struct mm_struct *mm;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
|
||||
|
||||
/*
|
||||
* We cannot do mmap_read_unlock() when the irq is disabled, because of
|
||||
* risk to deadlock with rq_lock. To look up vma when the irqs are
|
||||
* disabled, we need to run mmap_read_unlock() in irq_work. We use a
|
||||
* percpu variable to do the irq_work. If the irq_work is already used
|
||||
* by another lookup, we fall over.
|
||||
*/
|
||||
static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work **work_ptr)
|
||||
{
|
||||
struct mmap_unlock_irq_work *work = NULL;
|
||||
bool irq_work_busy = false;
|
||||
|
||||
if (irqs_disabled()) {
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||||
work = this_cpu_ptr(&mmap_unlock_work);
|
||||
if (irq_work_is_busy(&work->irq_work)) {
|
||||
/* cannot queue more up_read, fallback */
|
||||
irq_work_busy = true;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* PREEMPT_RT does not allow to trylock mmap sem in
|
||||
* interrupt disabled context. Force the fallback code.
|
||||
*/
|
||||
irq_work_busy = true;
|
||||
}
|
||||
}
|
||||
|
||||
*work_ptr = work;
|
||||
return irq_work_busy;
|
||||
}
|
||||
|
||||
static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
|
||||
{
|
||||
if (!work) {
|
||||
mmap_read_unlock(mm);
|
||||
} else {
|
||||
work->mm = mm;
|
||||
|
||||
/* The lock will be released once we're out of interrupt
|
||||
* context. Tell lockdep that we've released it now so
|
||||
* it doesn't complain that we forgot to release it.
|
||||
*/
|
||||
rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
|
||||
irq_work_queue(&work->irq_work);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __MMAP_UNLOCK_WORK_H__ */
|
|
@ -7,10 +7,10 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/buildid.h>
|
||||
#include "percpu_freelist.h"
|
||||
#include "mmap_unlock_work.h"
|
||||
|
||||
#define STACK_CREATE_FLAG_MASK \
|
||||
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
|
||||
|
@ -31,25 +31,6 @@ struct bpf_stack_map {
|
|||
struct stack_map_bucket *buckets[];
|
||||
};
|
||||
|
||||
/* irq_work to run up_read() for build_id lookup in nmi context */
|
||||
struct stack_map_irq_work {
|
||||
struct irq_work irq_work;
|
||||
struct mm_struct *mm;
|
||||
};
|
||||
|
||||
static void do_up_read(struct irq_work *entry)
|
||||
{
|
||||
struct stack_map_irq_work *work;
|
||||
|
||||
if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
|
||||
return;
|
||||
|
||||
work = container_of(entry, struct stack_map_irq_work, irq_work);
|
||||
mmap_read_unlock_non_owner(work->mm);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
|
||||
|
||||
static inline bool stack_map_use_build_id(struct bpf_map *map)
|
||||
{
|
||||
return (map->map_flags & BPF_F_STACK_BUILD_ID);
|
||||
|
@ -149,35 +130,13 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
|||
u64 *ips, u32 trace_nr, bool user)
|
||||
{
|
||||
int i;
|
||||
struct mmap_unlock_irq_work *work = NULL;
|
||||
bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
|
||||
struct vm_area_struct *vma;
|
||||
bool irq_work_busy = false;
|
||||
struct stack_map_irq_work *work = NULL;
|
||||
|
||||
if (irqs_disabled()) {
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||||
work = this_cpu_ptr(&up_read_work);
|
||||
if (irq_work_is_busy(&work->irq_work)) {
|
||||
/* cannot queue more up_read, fallback */
|
||||
irq_work_busy = true;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* PREEMPT_RT does not allow to trylock mmap sem in
|
||||
* interrupt disabled context. Force the fallback code.
|
||||
*/
|
||||
irq_work_busy = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot do up_read() when the irq is disabled, because of
|
||||
* risk to deadlock with rq_lock. To do build_id lookup when the
|
||||
* irqs are disabled, we need to run up_read() in irq_work. We use
|
||||
* a percpu variable to do the irq_work. If the irq_work is
|
||||
* already used by another lookup, we fall back to report ips.
|
||||
*
|
||||
* Same fallback is used for kernel stack (!user) on a stackmap
|
||||
* with build_id.
|
||||
/* If the irq_work is in use, fall back to report ips. Same
|
||||
* fallback is used for kernel stack (!user) on a stackmap with
|
||||
* build_id.
|
||||
*/
|
||||
if (!user || !current || !current->mm || irq_work_busy ||
|
||||
!mmap_read_trylock(current->mm)) {
|
||||
|
@ -203,19 +162,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
|||
- vma->vm_start;
|
||||
id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
|
||||
}
|
||||
|
||||
if (!work) {
|
||||
mmap_read_unlock(current->mm);
|
||||
} else {
|
||||
work->mm = current->mm;
|
||||
|
||||
/* The lock will be released once we're out of interrupt
|
||||
* context. Tell lockdep that we've released it now so
|
||||
* it doesn't complain that we forgot to release it.
|
||||
*/
|
||||
rwsem_release(¤t->mm->mmap_lock.dep_map, _RET_IP_);
|
||||
irq_work_queue(&work->irq_work);
|
||||
}
|
||||
bpf_mmap_unlock_mm(work, current->mm);
|
||||
}
|
||||
|
||||
static struct perf_callchain_entry *
|
||||
|
@ -719,16 +666,3 @@ const struct bpf_map_ops stack_trace_map_ops = {
|
|||
.map_btf_name = "bpf_stack_map",
|
||||
.map_btf_id = &stack_trace_map_btf_id,
|
||||
};
|
||||
|
||||
static int __init stack_map_init(void)
|
||||
{
|
||||
int cpu;
|
||||
struct stack_map_irq_work *work;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
work = per_cpu_ptr(&up_read_work, cpu);
|
||||
init_irq_work(&work->irq_work, do_up_read);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(stack_map_init);
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/fdtable.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include "mmap_unlock_work.h"
|
||||
|
||||
struct bpf_iter_seq_task_common {
|
||||
struct pid_namespace *ns;
|
||||
|
@ -524,10 +525,6 @@ static const struct seq_operations task_vma_seq_ops = {
|
|||
.show = task_vma_seq_show,
|
||||
};
|
||||
|
||||
BTF_ID_LIST(btf_task_file_ids)
|
||||
BTF_ID(struct, file)
|
||||
BTF_ID(struct, vm_area_struct)
|
||||
|
||||
static const struct bpf_iter_seq_info task_seq_info = {
|
||||
.seq_ops = &task_seq_ops,
|
||||
.init_seq_private = init_seq_pidns,
|
||||
|
@ -586,9 +583,74 @@ static struct bpf_iter_reg task_vma_reg_info = {
|
|||
.seq_info = &task_vma_seq_info,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
|
||||
bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags)
|
||||
{
|
||||
struct mmap_unlock_irq_work *work = NULL;
|
||||
struct vm_area_struct *vma;
|
||||
bool irq_work_busy = false;
|
||||
struct mm_struct *mm;
|
||||
int ret = -ENOENT;
|
||||
|
||||
if (flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (!task)
|
||||
return -ENOENT;
|
||||
|
||||
mm = task->mm;
|
||||
if (!mm)
|
||||
return -ENOENT;
|
||||
|
||||
irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
|
||||
|
||||
if (irq_work_busy || !mmap_read_trylock(mm))
|
||||
return -EBUSY;
|
||||
|
||||
vma = find_vma(mm, start);
|
||||
|
||||
if (vma && vma->vm_start <= start && vma->vm_end > start) {
|
||||
callback_fn((u64)(long)task, (u64)(long)vma,
|
||||
(u64)(long)callback_ctx, 0, 0);
|
||||
ret = 0;
|
||||
}
|
||||
bpf_mmap_unlock_mm(work, mm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_find_vma_proto = {
|
||||
.func = bpf_find_vma,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &btf_task_struct_ids[0],
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_FUNC,
|
||||
.arg4_type = ARG_PTR_TO_STACK_OR_NULL,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
|
||||
|
||||
static void do_mmap_read_unlock(struct irq_work *entry)
|
||||
{
|
||||
struct mmap_unlock_irq_work *work;
|
||||
|
||||
if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
|
||||
return;
|
||||
|
||||
work = container_of(entry, struct mmap_unlock_irq_work, irq_work);
|
||||
mmap_read_unlock_non_owner(work->mm);
|
||||
}
|
||||
|
||||
static int __init task_iter_init(void)
|
||||
{
|
||||
int ret;
|
||||
struct mmap_unlock_irq_work *work;
|
||||
int ret, cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
work = per_cpu_ptr(&mmap_unlock_work, cpu);
|
||||
init_irq_work(&work->irq_work, do_mmap_read_unlock);
|
||||
}
|
||||
|
||||
task_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0];
|
||||
ret = bpf_iter_reg_target(&task_reg_info);
|
||||
|
@ -596,13 +658,13 @@ static int __init task_iter_init(void)
|
|||
return ret;
|
||||
|
||||
task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0];
|
||||
task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[0];
|
||||
task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_struct_ids[1];
|
||||
ret = bpf_iter_reg_target(&task_file_reg_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0];
|
||||
task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
|
||||
task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_struct_ids[2];
|
||||
return bpf_iter_reg_target(&task_vma_reg_info);
|
||||
}
|
||||
late_initcall(task_iter_init);
|
||||
|
|
|
@ -6132,6 +6132,33 @@ static int set_timer_callback_state(struct bpf_verifier_env *env,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int set_find_vma_callback_state(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee,
|
||||
int insn_idx)
|
||||
{
|
||||
/* bpf_find_vma(struct task_struct *task, u64 addr,
|
||||
* void *callback_fn, void *callback_ctx, u64 flags)
|
||||
* (callback_fn)(struct task_struct *task,
|
||||
* struct vm_area_struct *vma, void *callback_ctx);
|
||||
*/
|
||||
callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
|
||||
|
||||
callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
|
||||
__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
|
||||
callee->regs[BPF_REG_2].btf = btf_vmlinux;
|
||||
callee->regs[BPF_REG_2].btf_id = btf_task_struct_ids[2];
|
||||
|
||||
/* pointer to stack or null */
|
||||
callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
|
||||
|
||||
/* unused */
|
||||
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
|
||||
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
|
||||
callee->in_callback_fn = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
|
@ -6489,6 +6516,13 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (func_id == BPF_FUNC_find_vma) {
|
||||
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
|
||||
set_find_vma_callback_state);
|
||||
if (err < 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (func_id == BPF_FUNC_snprintf) {
|
||||
err = check_bpf_snprintf_call(env, regs);
|
||||
if (err < 0)
|
||||
|
|
|
@ -1208,6 +1208,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return &bpf_get_func_ip_proto_tracing;
|
||||
case BPF_FUNC_get_branch_snapshot:
|
||||
return &bpf_get_branch_snapshot_proto;
|
||||
case BPF_FUNC_find_vma:
|
||||
return &bpf_find_vma_proto;
|
||||
case BPF_FUNC_trace_vprintk:
|
||||
return bpf_get_trace_vprintk_proto();
|
||||
default:
|
||||
|
|
|
@ -4938,6 +4938,25 @@ union bpf_attr {
|
|||
* **-ENOENT** if symbol is not found.
|
||||
*
|
||||
* **-EPERM** if caller does not have permission to obtain kernel address.
|
||||
*
|
||||
* long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
|
||||
* Description
|
||||
* Find vma of *task* that contains *addr*, call *callback_fn*
|
||||
* function with *task*, *vma*, and *callback_ctx*.
|
||||
* The *callback_fn* should be a static function and
|
||||
* the *callback_ctx* should be a pointer to the stack.
|
||||
* The *flags* is used to control certain aspects of the helper.
|
||||
* Currently, the *flags* must be 0.
|
||||
*
|
||||
* The expected callback signature is
|
||||
*
|
||||
* long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
|
||||
*
|
||||
* Return
|
||||
* 0 on success.
|
||||
* **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
|
||||
* **-EBUSY** if failed to try lock mmap_lock.
|
||||
* **-EINVAL** for invalid **flags**.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
|
@ -5120,6 +5139,7 @@ union bpf_attr {
|
|||
FN(trace_vprintk), \
|
||||
FN(skc_to_unix_sock), \
|
||||
FN(kallsyms_lookup_name), \
|
||||
FN(find_vma), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
|
117
tools/testing/selftests/bpf/prog_tests/find_vma.c
Normal file
117
tools/testing/selftests/bpf/prog_tests/find_vma.c
Normal file
|
@ -0,0 +1,117 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include <test_progs.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include "find_vma.skel.h"
|
||||
#include "find_vma_fail1.skel.h"
|
||||
#include "find_vma_fail2.skel.h"
|
||||
|
||||
static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret)
|
||||
{
|
||||
ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec");
|
||||
ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret");
|
||||
ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret");
|
||||
ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs");
|
||||
|
||||
skel->bss->found_vm_exec = 0;
|
||||
skel->data->find_addr_ret = -1;
|
||||
skel->data->find_zero_ret = -1;
|
||||
skel->bss->d_iname[0] = 0;
|
||||
}
|
||||
|
||||
static int open_pe(void)
|
||||
{
|
||||
struct perf_event_attr attr = {0};
|
||||
int pfd;
|
||||
|
||||
/* create perf event */
|
||||
attr.size = sizeof(attr);
|
||||
attr.type = PERF_TYPE_HARDWARE;
|
||||
attr.config = PERF_COUNT_HW_CPU_CYCLES;
|
||||
attr.freq = 1;
|
||||
attr.sample_freq = 4000;
|
||||
pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
|
||||
|
||||
return pfd >= 0 ? pfd : -errno;
|
||||
}
|
||||
|
||||
static void test_find_vma_pe(struct find_vma *skel)
|
||||
{
|
||||
struct bpf_link *link = NULL;
|
||||
volatile int j = 0;
|
||||
int pfd, i;
|
||||
|
||||
pfd = open_pe();
|
||||
if (pfd < 0) {
|
||||
if (pfd == -ENOENT || pfd == -EOPNOTSUPP) {
|
||||
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
|
||||
test__skip();
|
||||
goto cleanup;
|
||||
}
|
||||
if (!ASSERT_GE(pfd, 0, "perf_event_open"))
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
link = bpf_program__attach_perf_event(skel->progs.handle_pe, pfd);
|
||||
if (!ASSERT_OK_PTR(link, "attach_perf_event"))
|
||||
goto cleanup;
|
||||
|
||||
for (i = 0; i < 1000000; ++i)
|
||||
++j;
|
||||
|
||||
test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */);
|
||||
cleanup:
|
||||
bpf_link__destroy(link);
|
||||
close(pfd);
|
||||
}
|
||||
|
||||
static void test_find_vma_kprobe(struct find_vma *skel)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = find_vma__attach(skel);
|
||||
if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
|
||||
return;
|
||||
|
||||
getpgid(skel->bss->target_pid);
|
||||
test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */);
|
||||
}
|
||||
|
||||
static void test_illegal_write_vma(void)
|
||||
{
|
||||
struct find_vma_fail1 *skel;
|
||||
|
||||
skel = find_vma_fail1__open_and_load();
|
||||
if (!ASSERT_ERR_PTR(skel, "find_vma_fail1__open_and_load"))
|
||||
find_vma_fail1__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_illegal_write_task(void)
|
||||
{
|
||||
struct find_vma_fail2 *skel;
|
||||
|
||||
skel = find_vma_fail2__open_and_load();
|
||||
if (!ASSERT_ERR_PTR(skel, "find_vma_fail2__open_and_load"))
|
||||
find_vma_fail2__destroy(skel);
|
||||
}
|
||||
|
||||
void serial_test_find_vma(void)
|
||||
{
|
||||
struct find_vma *skel;
|
||||
|
||||
skel = find_vma__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "find_vma__open_and_load"))
|
||||
return;
|
||||
|
||||
skel->bss->target_pid = getpid();
|
||||
skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe;
|
||||
|
||||
test_find_vma_pe(skel);
|
||||
usleep(100000); /* allow the irq_work to finish */
|
||||
test_find_vma_kprobe(skel);
|
||||
|
||||
find_vma__destroy(skel);
|
||||
test_illegal_write_vma();
|
||||
test_illegal_write_task();
|
||||
}
|
69
tools/testing/selftests/bpf/progs/find_vma.c
Normal file
69
tools/testing/selftests/bpf/progs/find_vma.c
Normal file
|
@ -0,0 +1,69 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct callback_ctx {
|
||||
int dummy;
|
||||
};
|
||||
|
||||
#define VM_EXEC 0x00000004
|
||||
#define DNAME_INLINE_LEN 32
|
||||
|
||||
pid_t target_pid = 0;
|
||||
char d_iname[DNAME_INLINE_LEN] = {0};
|
||||
__u32 found_vm_exec = 0;
|
||||
__u64 addr = 0;
|
||||
int find_zero_ret = -1;
|
||||
int find_addr_ret = -1;
|
||||
|
||||
static long check_vma(struct task_struct *task, struct vm_area_struct *vma,
|
||||
struct callback_ctx *data)
|
||||
{
|
||||
if (vma->vm_file)
|
||||
bpf_probe_read_kernel_str(d_iname, DNAME_INLINE_LEN - 1,
|
||||
vma->vm_file->f_path.dentry->d_iname);
|
||||
|
||||
/* check for VM_EXEC */
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
found_vm_exec = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handle_getpid(void)
|
||||
{
|
||||
struct task_struct *task = bpf_get_current_task_btf();
|
||||
struct callback_ctx data = {};
|
||||
|
||||
if (task->pid != target_pid)
|
||||
return 0;
|
||||
|
||||
find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
|
||||
|
||||
/* this should return -ENOENT */
|
||||
find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
int handle_pe(void)
|
||||
{
|
||||
struct task_struct *task = bpf_get_current_task_btf();
|
||||
struct callback_ctx data = {};
|
||||
|
||||
if (task->pid != target_pid)
|
||||
return 0;
|
||||
|
||||
find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
|
||||
|
||||
/* In NMI, this should return -EBUSY, as the previous call is using
|
||||
* the irq_work.
|
||||
*/
|
||||
find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
|
||||
return 0;
|
||||
}
|
29
tools/testing/selftests/bpf/progs/find_vma_fail1.c
Normal file
29
tools/testing/selftests/bpf/progs/find_vma_fail1.c
Normal file
|
@ -0,0 +1,29 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct callback_ctx {
|
||||
int dummy;
|
||||
};
|
||||
|
||||
static long write_vma(struct task_struct *task, struct vm_area_struct *vma,
|
||||
struct callback_ctx *data)
|
||||
{
|
||||
/* writing to vma, which is illegal */
|
||||
vma->vm_flags |= 0x55;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handle_getpid(void)
|
||||
{
|
||||
struct task_struct *task = bpf_get_current_task_btf();
|
||||
struct callback_ctx data = {};
|
||||
|
||||
bpf_find_vma(task, 0, write_vma, &data, 0);
|
||||
return 0;
|
||||
}
|
29
tools/testing/selftests/bpf/progs/find_vma_fail2.c
Normal file
29
tools/testing/selftests/bpf/progs/find_vma_fail2.c
Normal file
|
@ -0,0 +1,29 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct callback_ctx {
|
||||
int dummy;
|
||||
};
|
||||
|
||||
static long write_task(struct task_struct *task, struct vm_area_struct *vma,
|
||||
struct callback_ctx *data)
|
||||
{
|
||||
/* writing to task, which is illegal */
|
||||
task->mm = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handle_getpid(void)
|
||||
{
|
||||
struct task_struct *task = bpf_get_current_task_btf();
|
||||
struct callback_ctx data = {};
|
||||
|
||||
bpf_find_vma(task, 0, write_task, &data, 0);
|
||||
return 0;
|
||||
}
|
Loading…
Add table
Reference in a new issue