mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 14:41:27 +00:00
bpf: Fix spelling mistakes
Fix some spelling mistakes in comments: aother ==> another Netiher ==> Neither desribe ==> describe intializing ==> initializing funciton ==> function wont ==> won't and move the word 'the' at the end to the next line accross ==> across pathes ==> paths triggerred ==> triggered excute ==> execute ether ==> either conervative ==> conservative convetion ==> convention markes ==> marks interpeter ==> interpreter Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20210525025659.8898-2-thunder.leizhen@huawei.com
This commit is contained in:
parent
4ce7d68beb
commit
8fb33b6055
8 changed files with 18 additions and 18 deletions
|
@ -58,7 +58,7 @@ struct bpf_local_storage_data {
|
||||||
* from the object's bpf_local_storage.
|
* from the object's bpf_local_storage.
|
||||||
*
|
*
|
||||||
* Put it in the same cacheline as the data to minimize
|
* Put it in the same cacheline as the data to minimize
|
||||||
* the number of cachelines access during the cache hit case.
|
* the number of cachelines accessed during the cache hit case.
|
||||||
*/
|
*/
|
||||||
struct bpf_local_storage_map __rcu *smap;
|
struct bpf_local_storage_map __rcu *smap;
|
||||||
u8 data[] __aligned(8);
|
u8 data[] __aligned(8);
|
||||||
|
@ -71,7 +71,7 @@ struct bpf_local_storage_elem {
|
||||||
struct bpf_local_storage __rcu *local_storage;
|
struct bpf_local_storage __rcu *local_storage;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
/* 8 bytes hole */
|
/* 8 bytes hole */
|
||||||
/* The data is stored in aother cacheline to minimize
|
/* The data is stored in another cacheline to minimize
|
||||||
* the number of cachelines access during a cache hit.
|
* the number of cachelines access during a cache hit.
|
||||||
*/
|
*/
|
||||||
struct bpf_local_storage_data sdata ____cacheline_aligned;
|
struct bpf_local_storage_data sdata ____cacheline_aligned;
|
||||||
|
|
|
@ -72,7 +72,7 @@ void bpf_inode_storage_free(struct inode *inode)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Netiher the bpf_prog nor the bpf-map's syscall
|
/* Neither the bpf_prog nor the bpf-map's syscall
|
||||||
* could be modifying the local_storage->list now.
|
* could be modifying the local_storage->list now.
|
||||||
* Thus, no elem can be added-to or deleted-from the
|
* Thus, no elem can be added-to or deleted-from the
|
||||||
* local_storage->list by the bpf_prog or by the bpf-map's syscall.
|
* local_storage->list by the bpf_prog or by the bpf-map's syscall.
|
||||||
|
|
|
@ -51,7 +51,7 @@
|
||||||
* The BTF type section contains a list of 'struct btf_type' objects.
|
* The BTF type section contains a list of 'struct btf_type' objects.
|
||||||
* Each one describes a C type. Recall from the above section
|
* Each one describes a C type. Recall from the above section
|
||||||
* that a 'struct btf_type' object could be immediately followed by extra
|
* that a 'struct btf_type' object could be immediately followed by extra
|
||||||
* data in order to desribe some particular C types.
|
* data in order to describe some particular C types.
|
||||||
*
|
*
|
||||||
* type_id:
|
* type_id:
|
||||||
* ~~~~~~~
|
* ~~~~~~~
|
||||||
|
@ -1143,7 +1143,7 @@ static void *btf_show_obj_safe(struct btf_show *show,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need a new copy to our safe object, either because we haven't
|
* We need a new copy to our safe object, either because we haven't
|
||||||
* yet copied and are intializing safe data, or because the data
|
* yet copied and are initializing safe data, or because the data
|
||||||
* we want falls outside the boundaries of the safe object.
|
* we want falls outside the boundaries of the safe object.
|
||||||
*/
|
*/
|
||||||
if (!safe) {
|
if (!safe) {
|
||||||
|
@ -3417,7 +3417,7 @@ static struct btf_kind_operations func_proto_ops = {
|
||||||
* BTF_KIND_FUNC_PROTO cannot be directly referred by
|
* BTF_KIND_FUNC_PROTO cannot be directly referred by
|
||||||
* a struct's member.
|
* a struct's member.
|
||||||
*
|
*
|
||||||
* It should be a funciton pointer instead.
|
* It should be a function pointer instead.
|
||||||
* (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
|
* (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
|
||||||
*
|
*
|
||||||
* Hence, there is no btf_func_check_member().
|
* Hence, there is no btf_func_check_member().
|
||||||
|
|
|
@ -382,8 +382,8 @@ void __dev_flush(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
|
/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
|
||||||
* update happens in parallel here a dev_put wont happen until after reading the
|
* update happens in parallel here a dev_put won't happen until after reading
|
||||||
* ifindex.
|
* the ifindex.
|
||||||
*/
|
*/
|
||||||
static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
|
static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||||
{
|
{
|
||||||
|
|
|
@ -46,12 +46,12 @@
|
||||||
* events, kprobes and tracing to be invoked before the prior invocation
|
* events, kprobes and tracing to be invoked before the prior invocation
|
||||||
* from one of these contexts completed. sys_bpf() uses the same mechanism
|
* from one of these contexts completed. sys_bpf() uses the same mechanism
|
||||||
* by pinning the task to the current CPU and incrementing the recursion
|
* by pinning the task to the current CPU and incrementing the recursion
|
||||||
* protection accross the map operation.
|
* protection across the map operation.
|
||||||
*
|
*
|
||||||
* This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
|
* This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
|
||||||
* operations like memory allocations (even with GFP_ATOMIC) from atomic
|
* operations like memory allocations (even with GFP_ATOMIC) from atomic
|
||||||
* contexts. This is required because even with GFP_ATOMIC the memory
|
* contexts. This is required because even with GFP_ATOMIC the memory
|
||||||
* allocator calls into code pathes which acquire locks with long held lock
|
* allocator calls into code paths which acquire locks with long held lock
|
||||||
* sections. To ensure the deterministic behaviour these locks are regular
|
* sections. To ensure the deterministic behaviour these locks are regular
|
||||||
* spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
|
* spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
|
||||||
* true atomic contexts on an RT kernel are the low level hardware
|
* true atomic contexts on an RT kernel are the low level hardware
|
||||||
|
|
|
@ -102,7 +102,7 @@ static void reuseport_array_free(struct bpf_map *map)
|
||||||
/*
|
/*
|
||||||
* ops->map_*_elem() will not be able to access this
|
* ops->map_*_elem() will not be able to access this
|
||||||
* array now. Hence, this function only races with
|
* array now. Hence, this function only races with
|
||||||
* bpf_sk_reuseport_detach() which was triggerred by
|
* bpf_sk_reuseport_detach() which was triggered by
|
||||||
* close() or disconnect().
|
* close() or disconnect().
|
||||||
*
|
*
|
||||||
* This function and bpf_sk_reuseport_detach() are
|
* This function and bpf_sk_reuseport_detach() are
|
||||||
|
|
|
@ -552,7 +552,7 @@ static void notrace inc_misses_counter(struct bpf_prog *prog)
|
||||||
* __bpf_prog_enter returns:
|
* __bpf_prog_enter returns:
|
||||||
* 0 - skip execution of the bpf prog
|
* 0 - skip execution of the bpf prog
|
||||||
* 1 - execute bpf prog
|
* 1 - execute bpf prog
|
||||||
* [2..MAX_U64] - excute bpf prog and record execution time.
|
* [2..MAX_U64] - execute bpf prog and record execution time.
|
||||||
* This is start time.
|
* This is start time.
|
||||||
*/
|
*/
|
||||||
u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
|
u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
|
||||||
|
|
|
@ -47,7 +47,7 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
|
||||||
* - unreachable insns exist (shouldn't be a forest. program = one function)
|
* - unreachable insns exist (shouldn't be a forest. program = one function)
|
||||||
* - out of bounds or malformed jumps
|
* - out of bounds or malformed jumps
|
||||||
* The second pass is all possible path descent from the 1st insn.
|
* The second pass is all possible path descent from the 1st insn.
|
||||||
* Since it's analyzing all pathes through the program, the length of the
|
* Since it's analyzing all paths through the program, the length of the
|
||||||
* analysis is limited to 64k insn, which may be hit even if total number of
|
* analysis is limited to 64k insn, which may be hit even if total number of
|
||||||
* insn is less then 4K, but there are too many branches that change stack/regs.
|
* insn is less then 4K, but there are too many branches that change stack/regs.
|
||||||
* Number of 'branches to be analyzed' is limited to 1k
|
* Number of 'branches to be analyzed' is limited to 1k
|
||||||
|
@ -132,7 +132,7 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
|
||||||
* If it's ok, then verifier allows this BPF_CALL insn and looks at
|
* If it's ok, then verifier allows this BPF_CALL insn and looks at
|
||||||
* .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
|
* .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
|
||||||
* R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
|
* R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
|
||||||
* returns ether pointer to map value or NULL.
|
* returns either pointer to map value or NULL.
|
||||||
*
|
*
|
||||||
* When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
|
* When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
|
||||||
* insn, the register holding that pointer in the true branch changes state to
|
* insn, the register holding that pointer in the true branch changes state to
|
||||||
|
@ -2616,7 +2616,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||||
if (dst_reg != BPF_REG_FP) {
|
if (dst_reg != BPF_REG_FP) {
|
||||||
/* The backtracking logic can only recognize explicit
|
/* The backtracking logic can only recognize explicit
|
||||||
* stack slot address like [fp - 8]. Other spill of
|
* stack slot address like [fp - 8]. Other spill of
|
||||||
* scalar via different register has to be conervative.
|
* scalar via different register has to be conservative.
|
||||||
* Backtrack from here and mark all registers as precise
|
* Backtrack from here and mark all registers as precise
|
||||||
* that contributed into 'reg' being a constant.
|
* that contributed into 'reg' being a constant.
|
||||||
*/
|
*/
|
||||||
|
@ -9053,7 +9053,7 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||||
!prog->aux->attach_func_proto->type)
|
!prog->aux->attach_func_proto->type)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* eBPF calling convetion is such that R0 is used
|
/* eBPF calling convention is such that R0 is used
|
||||||
* to return the value from eBPF program.
|
* to return the value from eBPF program.
|
||||||
* Make sure that it's readable at this time
|
* Make sure that it's readable at this time
|
||||||
* of bpf_exit, which means that program wrote
|
* of bpf_exit, which means that program wrote
|
||||||
|
@ -9844,7 +9844,7 @@ static void clean_verifier_state(struct bpf_verifier_env *env,
|
||||||
* Since the verifier pushes the branch states as it sees them while exploring
|
* Since the verifier pushes the branch states as it sees them while exploring
|
||||||
* the program the condition of walking the branch instruction for the second
|
* the program the condition of walking the branch instruction for the second
|
||||||
* time means that all states below this branch were already explored and
|
* time means that all states below this branch were already explored and
|
||||||
* their final liveness markes are already propagated.
|
* their final liveness marks are already propagated.
|
||||||
* Hence when the verifier completes the search of state list in is_state_visited()
|
* Hence when the verifier completes the search of state list in is_state_visited()
|
||||||
* we can call this clean_live_states() function to mark all liveness states
|
* we can call this clean_live_states() function to mark all liveness states
|
||||||
* as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
|
* as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
|
||||||
|
@ -12464,7 +12464,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||||
prog->aux->max_pkt_offset = MAX_PACKET_OFF;
|
prog->aux->max_pkt_offset = MAX_PACKET_OFF;
|
||||||
|
|
||||||
/* mark bpf_tail_call as different opcode to avoid
|
/* mark bpf_tail_call as different opcode to avoid
|
||||||
* conditional branch in the interpeter for every normal
|
* conditional branch in the interpreter for every normal
|
||||||
* call and to prevent accidental JITing by JIT compiler
|
* call and to prevent accidental JITing by JIT compiler
|
||||||
* that doesn't support bpf_tail_call yet
|
* that doesn't support bpf_tail_call yet
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue