ftrace: Fix some typos in comment

s/coorditate/coordinate/
s/emty/empty/
s/preeptive/preemptive/
s/succes/success/
s/carefule/careful/

Link: https://lkml.kernel.org/r/20201002143126.2890-1-hqjagain@gmail.com

Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
Qiujun Huang 2020-10-02 22:31:26 +08:00 committed by Steven Rostedt (VMware)
parent fdb46faeab
commit fdda88d31a

View file

@ -230,7 +230,7 @@ static void update_ftrace_function(void)
/* /*
* For static tracing, we need to be a bit more careful. * For static tracing, we need to be a bit more careful.
* The function change takes affect immediately. Thus, * The function change takes affect immediately. Thus,
* we need to coorditate the setting of the function_trace_ops * we need to coordinate the setting of the function_trace_ops
* with the setting of the ftrace_trace_function. * with the setting of the ftrace_trace_function.
* *
* Set the function to the list ops, which will call the * Set the function to the list ops, which will call the
@ -1451,7 +1451,7 @@ static bool hash_contains_ip(unsigned long ip,
{ {
/* /*
* The function record is a match if it exists in the filter * The function record is a match if it exists in the filter
* hash and not in the notrace hash. Note, an emty hash is * hash and not in the notrace hash. Note, an empty hash is
* considered a match for the filter hash, but an empty * considered a match for the filter hash, but an empty
* notrace hash is considered not in the notrace hash. * notrace hash is considered not in the notrace hash.
*/ */
@ -2976,7 +2976,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
synchronize_rcu_tasks_rude(); synchronize_rcu_tasks_rude();
/* /*
* When the kernel is preeptive, tasks can be preempted * When the kernel is preemptive, tasks can be preempted
* while on a ftrace trampoline. Just scheduling a task on * while on a ftrace trampoline. Just scheduling a task on
* a CPU is not good enough to flush them. Calling * a CPU is not good enough to flush them. Calling
* synchornize_rcu_tasks() will wait for those tasks to * synchornize_rcu_tasks() will wait for those tasks to
@ -4368,7 +4368,7 @@ void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
* @ip: The instruction pointer address to map @data to * @ip: The instruction pointer address to map @data to
* @data: The data to map to @ip * @data: The data to map to @ip
* *
* Returns 0 on succes otherwise an error. * Returns 0 on success otherwise an error.
*/ */
int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
unsigned long ip, void *data) unsigned long ip, void *data)
@ -4536,7 +4536,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
/* /*
* Note, there's a small window here that the func_hash->filter_hash * Note, there's a small window here that the func_hash->filter_hash
* may be NULL or empty. Need to be carefule when reading the loop. * may be NULL or empty. Need to be careful when reading the loop.
*/ */
mutex_lock(&probe->ops.func_hash->regex_lock); mutex_lock(&probe->ops.func_hash->regex_lock);