mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-26 08:31:13 +00:00
Merge branch 'locking/core' into x86/mm, to resolve conflict
There's a non-trivial conflict between the parallel TLB flush framework and the IPI flush debugging code - merge them manually. Conflicts: kernel/smp.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
a500fc918f
7 changed files with 306 additions and 24 deletions
|
@ -784,6 +784,16 @@
|
||||||
cs89x0_media= [HW,NET]
|
cs89x0_media= [HW,NET]
|
||||||
Format: { rj45 | aui | bnc }
|
Format: { rj45 | aui | bnc }
|
||||||
|
|
||||||
|
csdlock_debug= [KNL] Enable debug add-ons of cross-CPU function call
|
||||||
|
handling. When switched on, additional debug data is
|
||||||
|
printed to the console in case a hanging CPU is
|
||||||
|
detected, and that CPU is pinged again in order to try
|
||||||
|
to resolve the hang situation.
|
||||||
|
0: disable csdlock debugging (default)
|
||||||
|
1: enable basic csdlock debugging (minor impact)
|
||||||
|
ext: enable extended csdlock debugging (more impact,
|
||||||
|
but more data)
|
||||||
|
|
||||||
dasd= [HW,NET]
|
dasd= [HW,NET]
|
||||||
See header of drivers/s390/block/dasd_devmap.c.
|
See header of drivers/s390/block/dasd_devmap.c.
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
#include <linux/stringify.h>
|
#include <linux/stringify.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
|
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
|
||||||
{
|
{
|
||||||
asm_volatile_goto("1:"
|
asm_volatile_goto("1:"
|
||||||
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
|
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
|
||||||
|
@ -36,7 +36,7 @@ l_yes:
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
|
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
|
||||||
{
|
{
|
||||||
asm_volatile_goto("1:"
|
asm_volatile_goto("1:"
|
||||||
".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
|
".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
|
||||||
|
|
|
@ -4727,6 +4727,8 @@ out:
|
||||||
/* Must not be called with conf_mutex held as workers can use that also. */
|
/* Must not be called with conf_mutex held as workers can use that also. */
|
||||||
void ath10k_drain_tx(struct ath10k *ar)
|
void ath10k_drain_tx(struct ath10k *ar)
|
||||||
{
|
{
|
||||||
|
lockdep_assert_not_held(&ar->conf_mutex);
|
||||||
|
|
||||||
/* make sure rcu-protected mac80211 tx path itself is drained */
|
/* make sure rcu-protected mac80211 tx path itself is drained */
|
||||||
synchronize_net();
|
synchronize_net();
|
||||||
|
|
||||||
|
|
|
@ -268,6 +268,11 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||||
|
|
||||||
extern void lock_release(struct lockdep_map *lock, unsigned long ip);
|
extern void lock_release(struct lockdep_map *lock, unsigned long ip);
|
||||||
|
|
||||||
|
/* lock_is_held_type() returns */
|
||||||
|
#define LOCK_STATE_UNKNOWN -1
|
||||||
|
#define LOCK_STATE_NOT_HELD 0
|
||||||
|
#define LOCK_STATE_HELD 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Same "read" as for lock_acquire(), except -1 means any.
|
* Same "read" as for lock_acquire(), except -1 means any.
|
||||||
*/
|
*/
|
||||||
|
@ -301,8 +306,14 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
|
||||||
|
|
||||||
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
|
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
|
||||||
|
|
||||||
#define lockdep_assert_held(l) do { \
|
#define lockdep_assert_held(l) do { \
|
||||||
WARN_ON(debug_locks && !lockdep_is_held(l)); \
|
WARN_ON(debug_locks && \
|
||||||
|
lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define lockdep_assert_not_held(l) do { \
|
||||||
|
WARN_ON(debug_locks && \
|
||||||
|
lockdep_is_held(l) == LOCK_STATE_HELD); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define lockdep_assert_held_write(l) do { \
|
#define lockdep_assert_held_write(l) do { \
|
||||||
|
@ -393,7 +404,8 @@ extern int lockdep_is_held(const void *);
|
||||||
#define lockdep_is_held_type(l, r) (1)
|
#define lockdep_is_held_type(l, r) (1)
|
||||||
|
|
||||||
#define lockdep_assert_held(l) do { (void)(l); } while (0)
|
#define lockdep_assert_held(l) do { (void)(l); } while (0)
|
||||||
#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
|
#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
|
||||||
|
#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
|
||||||
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
|
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
|
||||||
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
|
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
|
||||||
|
|
||||||
|
|
|
@ -54,6 +54,7 @@
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/kprobes.h>
|
#include <linux/kprobes.h>
|
||||||
|
#include <linux/lockdep.h>
|
||||||
|
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
|
||||||
|
@ -5252,13 +5253,13 @@ int __lock_is_held(const struct lockdep_map *lock, int read)
|
||||||
|
|
||||||
if (match_held_lock(hlock, lock)) {
|
if (match_held_lock(hlock, lock)) {
|
||||||
if (read == -1 || hlock->read == read)
|
if (read == -1 || hlock->read == read)
|
||||||
return 1;
|
return LOCK_STATE_HELD;
|
||||||
|
|
||||||
return 0;
|
return LOCK_STATE_NOT_HELD;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return LOCK_STATE_NOT_HELD;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
|
static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
|
||||||
|
@ -5537,10 +5538,14 @@ EXPORT_SYMBOL_GPL(lock_release);
|
||||||
noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
|
noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = LOCK_STATE_NOT_HELD;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Avoid false negative lockdep_assert_held() and
|
||||||
|
* lockdep_assert_not_held().
|
||||||
|
*/
|
||||||
if (unlikely(!lockdep_enabled()))
|
if (unlikely(!lockdep_enabled()))
|
||||||
return 1; /* avoid false negative lockdep_assert_held() */
|
return LOCK_STATE_UNKNOWN;
|
||||||
|
|
||||||
raw_local_irq_save(flags);
|
raw_local_irq_save(flags);
|
||||||
check_flags(flags);
|
check_flags(flags);
|
||||||
|
|
274
kernel/smp.c
274
kernel/smp.c
|
@ -24,14 +24,70 @@
|
||||||
#include <linux/sched/clock.h>
|
#include <linux/sched/clock.h>
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/sched/debug.h>
|
#include <linux/sched/debug.h>
|
||||||
|
#include <linux/jump_label.h>
|
||||||
|
|
||||||
#include "smpboot.h"
|
#include "smpboot.h"
|
||||||
#include "sched/smp.h"
|
#include "sched/smp.h"
|
||||||
|
|
||||||
#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
|
#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
|
||||||
|
|
||||||
|
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
||||||
|
union cfd_seq_cnt {
|
||||||
|
u64 val;
|
||||||
|
struct {
|
||||||
|
u64 src:16;
|
||||||
|
u64 dst:16;
|
||||||
|
#define CFD_SEQ_NOCPU 0xffff
|
||||||
|
u64 type:4;
|
||||||
|
#define CFD_SEQ_QUEUE 0
|
||||||
|
#define CFD_SEQ_IPI 1
|
||||||
|
#define CFD_SEQ_NOIPI 2
|
||||||
|
#define CFD_SEQ_PING 3
|
||||||
|
#define CFD_SEQ_PINGED 4
|
||||||
|
#define CFD_SEQ_HANDLE 5
|
||||||
|
#define CFD_SEQ_DEQUEUE 6
|
||||||
|
#define CFD_SEQ_IDLE 7
|
||||||
|
#define CFD_SEQ_GOTIPI 8
|
||||||
|
#define CFD_SEQ_HDLEND 9
|
||||||
|
u64 cnt:28;
|
||||||
|
} u;
|
||||||
|
};
|
||||||
|
|
||||||
|
static char *seq_type[] = {
|
||||||
|
[CFD_SEQ_QUEUE] = "queue",
|
||||||
|
[CFD_SEQ_IPI] = "ipi",
|
||||||
|
[CFD_SEQ_NOIPI] = "noipi",
|
||||||
|
[CFD_SEQ_PING] = "ping",
|
||||||
|
[CFD_SEQ_PINGED] = "pinged",
|
||||||
|
[CFD_SEQ_HANDLE] = "handle",
|
||||||
|
[CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)",
|
||||||
|
[CFD_SEQ_IDLE] = "idle",
|
||||||
|
[CFD_SEQ_GOTIPI] = "gotipi",
|
||||||
|
[CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)",
|
||||||
|
};
|
||||||
|
|
||||||
|
struct cfd_seq_local {
|
||||||
|
u64 ping;
|
||||||
|
u64 pinged;
|
||||||
|
u64 handle;
|
||||||
|
u64 dequeue;
|
||||||
|
u64 idle;
|
||||||
|
u64 gotipi;
|
||||||
|
u64 hdlend;
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct cfd_percpu {
|
||||||
|
call_single_data_t csd;
|
||||||
|
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
||||||
|
u64 seq_queue;
|
||||||
|
u64 seq_ipi;
|
||||||
|
u64 seq_noipi;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
struct call_function_data {
|
struct call_function_data {
|
||||||
call_single_data_t __percpu *csd;
|
struct cfd_percpu __percpu *pcpu;
|
||||||
cpumask_var_t cpumask;
|
cpumask_var_t cpumask;
|
||||||
cpumask_var_t cpumask_ipi;
|
cpumask_var_t cpumask_ipi;
|
||||||
};
|
};
|
||||||
|
@ -54,8 +110,8 @@ int smpcfd_prepare_cpu(unsigned int cpu)
|
||||||
free_cpumask_var(cfd->cpumask);
|
free_cpumask_var(cfd->cpumask);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
cfd->csd = alloc_percpu(call_single_data_t);
|
cfd->pcpu = alloc_percpu(struct cfd_percpu);
|
||||||
if (!cfd->csd) {
|
if (!cfd->pcpu) {
|
||||||
free_cpumask_var(cfd->cpumask);
|
free_cpumask_var(cfd->cpumask);
|
||||||
free_cpumask_var(cfd->cpumask_ipi);
|
free_cpumask_var(cfd->cpumask_ipi);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -70,7 +126,7 @@ int smpcfd_dead_cpu(unsigned int cpu)
|
||||||
|
|
||||||
free_cpumask_var(cfd->cpumask);
|
free_cpumask_var(cfd->cpumask);
|
||||||
free_cpumask_var(cfd->cpumask_ipi);
|
free_cpumask_var(cfd->cpumask_ipi);
|
||||||
free_percpu(cfd->csd);
|
free_percpu(cfd->pcpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,15 +158,60 @@ void __init call_function_init(void)
|
||||||
|
|
||||||
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
||||||
|
|
||||||
|
static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
|
||||||
|
static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended);
|
||||||
|
|
||||||
|
static int __init csdlock_debug(char *str)
|
||||||
|
{
|
||||||
|
unsigned int val = 0;
|
||||||
|
|
||||||
|
if (str && !strcmp(str, "ext")) {
|
||||||
|
val = 1;
|
||||||
|
static_branch_enable(&csdlock_debug_extended);
|
||||||
|
} else
|
||||||
|
get_option(&str, &val);
|
||||||
|
|
||||||
|
if (val)
|
||||||
|
static_branch_enable(&csdlock_debug_enabled);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("csdlock_debug", csdlock_debug);
|
||||||
|
|
||||||
static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
|
static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
|
||||||
static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
|
static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
|
||||||
static DEFINE_PER_CPU(void *, cur_csd_info);
|
static DEFINE_PER_CPU(void *, cur_csd_info);
|
||||||
|
static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
|
||||||
|
|
||||||
#define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
|
#define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
|
||||||
static atomic_t csd_bug_count = ATOMIC_INIT(0);
|
static atomic_t csd_bug_count = ATOMIC_INIT(0);
|
||||||
|
static u64 cfd_seq;
|
||||||
|
|
||||||
|
#define CFD_SEQ(s, d, t, c) \
|
||||||
|
(union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
|
||||||
|
|
||||||
|
static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type)
|
||||||
|
{
|
||||||
|
union cfd_seq_cnt new, old;
|
||||||
|
|
||||||
|
new = CFD_SEQ(src, dst, type, 0);
|
||||||
|
|
||||||
|
do {
|
||||||
|
old.val = READ_ONCE(cfd_seq);
|
||||||
|
new.u.cnt = old.u.cnt + 1;
|
||||||
|
} while (cmpxchg(&cfd_seq, old.val, new.val) != old.val);
|
||||||
|
|
||||||
|
return old.val;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define cfd_seq_store(var, src, dst, type) \
|
||||||
|
do { \
|
||||||
|
if (static_branch_unlikely(&csdlock_debug_extended)) \
|
||||||
|
var = cfd_seq_inc(src, dst, type); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/* Record current CSD work for current CPU, NULL to erase. */
|
/* Record current CSD work for current CPU, NULL to erase. */
|
||||||
static void csd_lock_record(call_single_data_t *csd)
|
static void __csd_lock_record(call_single_data_t *csd)
|
||||||
{
|
{
|
||||||
if (!csd) {
|
if (!csd) {
|
||||||
smp_mb(); /* NULL cur_csd after unlock. */
|
smp_mb(); /* NULL cur_csd after unlock. */
|
||||||
|
@ -125,7 +226,13 @@ static void csd_lock_record(call_single_data_t *csd)
|
||||||
/* Or before unlock, as the case may be. */
|
/* Or before unlock, as the case may be. */
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
|
static __always_inline void csd_lock_record(call_single_data_t *csd)
|
||||||
|
{
|
||||||
|
if (static_branch_unlikely(&csdlock_debug_enabled))
|
||||||
|
__csd_lock_record(csd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int csd_lock_wait_getcpu(call_single_data_t *csd)
|
||||||
{
|
{
|
||||||
unsigned int csd_type;
|
unsigned int csd_type;
|
||||||
|
|
||||||
|
@ -135,12 +242,86 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst,
|
||||||
|
unsigned int type, union cfd_seq_cnt *data,
|
||||||
|
unsigned int *n_data, unsigned int now)
|
||||||
|
{
|
||||||
|
union cfd_seq_cnt new[2];
|
||||||
|
unsigned int i, j, k;
|
||||||
|
|
||||||
|
new[0].val = val;
|
||||||
|
new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1);
|
||||||
|
|
||||||
|
for (i = 0; i < 2; i++) {
|
||||||
|
if (new[i].u.cnt <= now)
|
||||||
|
new[i].u.cnt |= 0x80000000U;
|
||||||
|
for (j = 0; j < *n_data; j++) {
|
||||||
|
if (new[i].u.cnt == data[j].u.cnt) {
|
||||||
|
/* Direct read value trumps generated one. */
|
||||||
|
if (i == 0)
|
||||||
|
data[j].val = new[i].val;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (new[i].u.cnt < data[j].u.cnt) {
|
||||||
|
for (k = *n_data; k > j; k--)
|
||||||
|
data[k].val = data[k - 1].val;
|
||||||
|
data[j].val = new[i].val;
|
||||||
|
(*n_data)++;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (j == *n_data) {
|
||||||
|
data[j].val = new[i].val;
|
||||||
|
(*n_data)++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char *csd_lock_get_type(unsigned int type)
|
||||||
|
{
|
||||||
|
return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void csd_lock_print_extended(call_single_data_t *csd, int cpu)
|
||||||
|
{
|
||||||
|
struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);
|
||||||
|
unsigned int srccpu = csd->node.src;
|
||||||
|
struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);
|
||||||
|
struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
|
||||||
|
unsigned int now;
|
||||||
|
union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)];
|
||||||
|
unsigned int n_data = 0, i;
|
||||||
|
|
||||||
|
data[0].val = READ_ONCE(cfd_seq);
|
||||||
|
now = data[0].u.cnt;
|
||||||
|
|
||||||
|
cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now);
|
||||||
|
cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now);
|
||||||
|
cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now);
|
||||||
|
|
||||||
|
cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now);
|
||||||
|
cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now);
|
||||||
|
|
||||||
|
cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now);
|
||||||
|
cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now);
|
||||||
|
cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now);
|
||||||
|
cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now);
|
||||||
|
cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now);
|
||||||
|
|
||||||
|
for (i = 0; i < n_data; i++) {
|
||||||
|
pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n",
|
||||||
|
data[i].u.cnt & ~0x80000000U, data[i].u.src,
|
||||||
|
data[i].u.dst, csd_lock_get_type(data[i].u.type));
|
||||||
|
}
|
||||||
|
pr_alert("\tcsd: cnt now: %07x\n", now);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Complain if too much time spent waiting. Note that only
|
* Complain if too much time spent waiting. Note that only
|
||||||
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
|
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
|
||||||
* so waiting on other types gets much less information.
|
* so waiting on other types gets much less information.
|
||||||
*/
|
*/
|
||||||
static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
|
static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
|
||||||
{
|
{
|
||||||
int cpu = -1;
|
int cpu = -1;
|
||||||
int cpux;
|
int cpux;
|
||||||
|
@ -184,6 +365,8 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
|
||||||
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
|
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
|
||||||
}
|
}
|
||||||
if (cpu >= 0) {
|
if (cpu >= 0) {
|
||||||
|
if (static_branch_unlikely(&csdlock_debug_extended))
|
||||||
|
csd_lock_print_extended(csd, cpu);
|
||||||
if (!trigger_single_cpu_backtrace(cpu))
|
if (!trigger_single_cpu_backtrace(cpu))
|
||||||
dump_cpu_task(cpu);
|
dump_cpu_task(cpu);
|
||||||
if (!cpu_cur_csd) {
|
if (!cpu_cur_csd) {
|
||||||
|
@ -204,7 +387,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
|
||||||
* previous function call. For multi-cpu calls its even more interesting
|
* previous function call. For multi-cpu calls its even more interesting
|
||||||
* as we'll have to ensure no other cpu is observing our csd.
|
* as we'll have to ensure no other cpu is observing our csd.
|
||||||
*/
|
*/
|
||||||
static __always_inline void csd_lock_wait(call_single_data_t *csd)
|
static void __csd_lock_wait(call_single_data_t *csd)
|
||||||
{
|
{
|
||||||
int bug_id = 0;
|
int bug_id = 0;
|
||||||
u64 ts0, ts1;
|
u64 ts0, ts1;
|
||||||
|
@ -218,7 +401,36 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
|
||||||
smp_acquire__after_ctrl_dep();
|
smp_acquire__after_ctrl_dep();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void csd_lock_wait(call_single_data_t *csd)
|
||||||
|
{
|
||||||
|
if (static_branch_unlikely(&csdlock_debug_enabled)) {
|
||||||
|
__csd_lock_wait(csd);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
|
||||||
|
{
|
||||||
|
unsigned int this_cpu = smp_processor_id();
|
||||||
|
struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local);
|
||||||
|
struct call_function_data *cfd = this_cpu_ptr(&cfd_data);
|
||||||
|
struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
|
||||||
|
|
||||||
|
cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
|
||||||
|
if (llist_add(node, &per_cpu(call_single_queue, cpu))) {
|
||||||
|
cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
|
||||||
|
cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING);
|
||||||
|
send_call_function_single_ipi(cpu);
|
||||||
|
cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED);
|
||||||
|
} else {
|
||||||
|
cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
|
||||||
|
}
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
|
#define cfd_seq_store(var, src, dst, type)
|
||||||
|
|
||||||
static void csd_lock_record(call_single_data_t *csd)
|
static void csd_lock_record(call_single_data_t *csd)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -256,6 +468,19 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
|
||||||
|
|
||||||
void __smp_call_single_queue(int cpu, struct llist_node *node)
|
void __smp_call_single_queue(int cpu, struct llist_node *node)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
||||||
|
if (static_branch_unlikely(&csdlock_debug_extended)) {
|
||||||
|
unsigned int type;
|
||||||
|
|
||||||
|
type = CSD_TYPE(container_of(node, call_single_data_t,
|
||||||
|
node.llist));
|
||||||
|
if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) {
|
||||||
|
__smp_call_single_queue_debug(cpu, node);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The list addition should be visible before sending the IPI
|
* The list addition should be visible before sending the IPI
|
||||||
* handler locks the list to pull the entry off it because of
|
* handler locks the list to pull the entry off it because of
|
||||||
|
@ -314,6 +539,8 @@ static int generic_exec_single(int cpu, call_single_data_t *csd)
|
||||||
*/
|
*/
|
||||||
void generic_smp_call_function_single_interrupt(void)
|
void generic_smp_call_function_single_interrupt(void)
|
||||||
{
|
{
|
||||||
|
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
|
||||||
|
smp_processor_id(), CFD_SEQ_GOTIPI);
|
||||||
flush_smp_call_function_queue(true);
|
flush_smp_call_function_queue(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,7 +568,13 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||||
lockdep_assert_irqs_disabled();
|
lockdep_assert_irqs_disabled();
|
||||||
|
|
||||||
head = this_cpu_ptr(&call_single_queue);
|
head = this_cpu_ptr(&call_single_queue);
|
||||||
|
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU,
|
||||||
|
smp_processor_id(), CFD_SEQ_HANDLE);
|
||||||
entry = llist_del_all(head);
|
entry = llist_del_all(head);
|
||||||
|
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue,
|
||||||
|
/* Special meaning of source cpu: 0 == queue empty */
|
||||||
|
entry ? CFD_SEQ_NOCPU : 0,
|
||||||
|
smp_processor_id(), CFD_SEQ_DEQUEUE);
|
||||||
entry = llist_reverse_order(entry);
|
entry = llist_reverse_order(entry);
|
||||||
|
|
||||||
/* There shouldn't be any pending callbacks on an offline CPU. */
|
/* There shouldn't be any pending callbacks on an offline CPU. */
|
||||||
|
@ -400,8 +633,12 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!entry)
|
if (!entry) {
|
||||||
|
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend,
|
||||||
|
0, smp_processor_id(),
|
||||||
|
CFD_SEQ_HDLEND);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Second; run all !SYNC callbacks.
|
* Second; run all !SYNC callbacks.
|
||||||
|
@ -439,6 +676,9 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||||
*/
|
*/
|
||||||
if (entry)
|
if (entry)
|
||||||
sched_ttwu_pending(entry);
|
sched_ttwu_pending(entry);
|
||||||
|
|
||||||
|
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU,
|
||||||
|
smp_processor_id(), CFD_SEQ_HDLEND);
|
||||||
}
|
}
|
||||||
|
|
||||||
void flush_smp_call_function_from_idle(void)
|
void flush_smp_call_function_from_idle(void)
|
||||||
|
@ -448,6 +688,8 @@ void flush_smp_call_function_from_idle(void)
|
||||||
if (llist_empty(this_cpu_ptr(&call_single_queue)))
|
if (llist_empty(this_cpu_ptr(&call_single_queue)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
|
||||||
|
smp_processor_id(), CFD_SEQ_IDLE);
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
flush_smp_call_function_queue(true);
|
flush_smp_call_function_queue(true);
|
||||||
if (local_softirq_pending())
|
if (local_softirq_pending())
|
||||||
|
@ -667,7 +909,8 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||||
|
|
||||||
cpumask_clear(cfd->cpumask_ipi);
|
cpumask_clear(cfd->cpumask_ipi);
|
||||||
for_each_cpu(cpu, cfd->cpumask) {
|
for_each_cpu(cpu, cfd->cpumask) {
|
||||||
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
|
struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
|
||||||
|
call_single_data_t *csd = &pcpu->csd;
|
||||||
|
|
||||||
if (cond_func && !cond_func(cpu, info))
|
if (cond_func && !cond_func(cpu, info))
|
||||||
continue;
|
continue;
|
||||||
|
@ -681,13 +924,20 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||||
csd->node.src = smp_processor_id();
|
csd->node.src = smp_processor_id();
|
||||||
csd->node.dst = cpu;
|
csd->node.dst = cpu;
|
||||||
#endif
|
#endif
|
||||||
|
cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
|
||||||
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
|
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
|
||||||
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
|
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
|
||||||
nr_cpus++;
|
nr_cpus++;
|
||||||
last_cpu = cpu;
|
last_cpu = cpu;
|
||||||
|
|
||||||
|
cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
|
||||||
|
} else {
|
||||||
|
cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Choose the most efficient way to send an IPI. Note that the
|
* Choose the most efficient way to send an IPI. Note that the
|
||||||
* number of CPUs might be zero due to concurrent changes to the
|
* number of CPUs might be zero due to concurrent changes to the
|
||||||
|
@ -697,6 +947,8 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||||
send_call_function_single_ipi(last_cpu);
|
send_call_function_single_ipi(last_cpu);
|
||||||
else if (likely(nr_cpus > 1))
|
else if (likely(nr_cpus > 1))
|
||||||
arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
|
arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
|
||||||
|
|
||||||
|
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (run_local && (!cond_func || cond_func(this_cpu, info))) {
|
if (run_local && (!cond_func || cond_func(this_cpu, info))) {
|
||||||
|
@ -711,7 +963,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||||
for_each_cpu(cpu, cfd->cpumask) {
|
for_each_cpu(cpu, cfd->cpumask) {
|
||||||
call_single_data_t *csd;
|
call_single_data_t *csd;
|
||||||
|
|
||||||
csd = per_cpu_ptr(cfd->csd, cpu);
|
csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
|
||||||
csd_lock_wait(csd);
|
csd_lock_wait(csd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -349,7 +349,8 @@ static int static_call_add_module(struct module *mod)
|
||||||
struct static_call_site *site;
|
struct static_call_site *site;
|
||||||
|
|
||||||
for (site = start; site != stop; site++) {
|
for (site = start; site != stop; site++) {
|
||||||
unsigned long addr = (unsigned long)static_call_key(site);
|
unsigned long s_key = (long)site->key + (long)&site->key;
|
||||||
|
unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
|
||||||
unsigned long key;
|
unsigned long key;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -373,8 +374,8 @@ static int static_call_add_module(struct module *mod)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
site->key = (key - (long)&site->key) |
|
key |= s_key & STATIC_CALL_SITE_FLAGS;
|
||||||
(site->key & STATIC_CALL_SITE_FLAGS);
|
site->key = key - (long)&site->key;
|
||||||
}
|
}
|
||||||
|
|
||||||
return __static_call_init(mod, start, stop);
|
return __static_call_init(mod, start, stop);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue