mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-28 17:41:50 +00:00
Merge branch 'core/kprobes' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
0cc4bd8f70
2 changed files with 45 additions and 25 deletions
|
@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
|
||||||
mutex_unlock(&kprobe_mutex);
|
mutex_unlock(&kprobe_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool optprobe_queued_unopt(struct optimized_kprobe *op)
|
||||||
|
{
|
||||||
|
struct optimized_kprobe *_op;
|
||||||
|
|
||||||
|
list_for_each_entry(_op, &unoptimizing_list, list) {
|
||||||
|
if (op == _op)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* Optimize kprobe if p is ready to be optimized */
|
/* Optimize kprobe if p is ready to be optimized */
|
||||||
static void optimize_kprobe(struct kprobe *p)
|
static void optimize_kprobe(struct kprobe *p)
|
||||||
{
|
{
|
||||||
|
@ -633,18 +645,22 @@ static void optimize_kprobe(struct kprobe *p)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Check if it is already optimized. */
|
/* Check if it is already optimized. */
|
||||||
if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
|
if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
|
||||||
return;
|
if (optprobe_queued_unopt(op)) {
|
||||||
op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
|
|
||||||
|
|
||||||
if (!list_empty(&op->list))
|
|
||||||
/* This is under unoptimizing. Just dequeue the probe */
|
/* This is under unoptimizing. Just dequeue the probe */
|
||||||
list_del_init(&op->list);
|
list_del_init(&op->list);
|
||||||
else {
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
|
||||||
|
|
||||||
|
/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
|
||||||
|
if (WARN_ON_ONCE(!list_empty(&op->list)))
|
||||||
|
return;
|
||||||
|
|
||||||
list_add(&op->list, &optimizing_list);
|
list_add(&op->list, &optimizing_list);
|
||||||
kick_kprobe_optimizer();
|
kick_kprobe_optimizer();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* Short cut to direct unoptimizing */
|
/* Short cut to direct unoptimizing */
|
||||||
static void force_unoptimize_kprobe(struct optimized_kprobe *op)
|
static void force_unoptimize_kprobe(struct optimized_kprobe *op)
|
||||||
|
@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
|
||||||
return; /* This is not an optprobe nor optimized */
|
return; /* This is not an optprobe nor optimized */
|
||||||
|
|
||||||
op = container_of(p, struct optimized_kprobe, kp);
|
op = container_of(p, struct optimized_kprobe, kp);
|
||||||
if (!kprobe_optimized(p)) {
|
if (!kprobe_optimized(p))
|
||||||
/* Unoptimized or unoptimizing case */
|
return;
|
||||||
if (force && !list_empty(&op->list)) {
|
|
||||||
|
if (!list_empty(&op->list)) {
|
||||||
|
if (optprobe_queued_unopt(op)) {
|
||||||
|
/* Queued in unoptimizing queue */
|
||||||
|
if (force) {
|
||||||
/*
|
/*
|
||||||
* Only if this is unoptimizing kprobe and forced,
|
* Forcibly unoptimize the kprobe here, and queue it
|
||||||
* forcibly unoptimize it. (No need to unoptimize
|
* in the freeing list for release afterwards.
|
||||||
* unoptimized kprobe again :)
|
|
||||||
*/
|
*/
|
||||||
list_del_init(&op->list);
|
|
||||||
force_unoptimize_kprobe(op);
|
force_unoptimize_kprobe(op);
|
||||||
|
list_move(&op->list, &freeing_list);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Dequeue from the optimizing queue */
|
||||||
|
list_del_init(&op->list);
|
||||||
|
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!list_empty(&op->list)) {
|
|
||||||
/* Dequeue from the optimization queue */
|
|
||||||
list_del_init(&op->list);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
/* Optimized kprobe case */
|
/* Optimized kprobe case */
|
||||||
if (force)
|
if (force) {
|
||||||
/* Forcibly update the code: this is a special case */
|
/* Forcibly update the code: this is a special case */
|
||||||
force_unoptimize_kprobe(op);
|
force_unoptimize_kprobe(op);
|
||||||
else {
|
} else {
|
||||||
list_add(&op->list, &unoptimizing_list);
|
list_add(&op->list, &unoptimizing_list);
|
||||||
kick_kprobe_optimizer();
|
kick_kprobe_optimizer();
|
||||||
}
|
}
|
||||||
|
|
|
@ -274,7 +274,8 @@ static int __init syscall_enter_define_fields(struct trace_event_call *call)
|
||||||
struct syscall_trace_enter trace;
|
struct syscall_trace_enter trace;
|
||||||
struct syscall_metadata *meta = call->data;
|
struct syscall_metadata *meta = call->data;
|
||||||
int offset = offsetof(typeof(trace), args);
|
int offset = offsetof(typeof(trace), args);
|
||||||
int ret, i;
|
int ret = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < meta->nb_args; i++) {
|
for (i = 0; i < meta->nb_args; i++) {
|
||||||
ret = trace_define_field(call, meta->types[i],
|
ret = trace_define_field(call, meta->types[i],
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue