mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-01 20:14:08 +00:00
Commit 401c636a0e
("kernel/hung_task.c: show all hung tasks before
panic") introduced a change in that we started to show all CPUs
backtraces when a hung task is detected _and_ the sysctl/kernel
parameter "hung_task_panic" is set. The idea is good, because usually
when observing deadlocks (that may lead to hung tasks), the culprit is
another task holding a lock and not necessarily the task detected as
hung.
The problem with this approach is that dumping backtraces is a slightly
expensive task, specially printing that on console (and specially in
many CPU machines, as servers commonly found nowadays). So, users that
plan to collect a kdump to investigate the hung tasks and narrow down
the deadlock definitely don't need the CPUs backtrace on dmesg/console,
which will delay the panic and pollute the log (crash tool would easily
grab all CPUs traces with 'bt -a' command).
Also, there's the reciprocal scenario: some users may be interested in
seeing the CPUs backtraces but not have the system panic when a hung
task is detected. The current approach hence is almost as embedding a
policy in the kernel, by forcing the CPUs backtraces' dump (only) on
hung_task_panic.
This patch decouples the panic event on hung task from the CPUs
backtraces dump, by creating (and documenting) a new sysctl called
"hung_task_all_cpu_backtrace", analog to the approach taken on soft/hard
lockups, that have both a panic and an "all_cpu_backtrace" sysctl to
allow individual control. The new mechanism for dumping the CPUs
backtraces on hung task detection respects "hung_task_warnings" by not
dumping the traces in case there's no warnings left.
Signed-off-by: Guilherme G. Piccoli <gpiccoli@canonical.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Link: http://lkml.kernel.org/r/20200327223646.20779-1-gpiccoli@canonical.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
97 lines
3 KiB
C
97 lines
3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_SCHED_SYSCTL_H
|
|
#define _LINUX_SCHED_SYSCTL_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
struct ctl_table;
|
|
|
|
#ifdef CONFIG_DETECT_HUNG_TASK
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern unsigned int sysctl_hung_task_all_cpu_backtrace;
|
|
#else
|
|
#define sysctl_hung_task_all_cpu_backtrace 0
|
|
#endif /* CONFIG_SMP */
|
|
|
|
extern int sysctl_hung_task_check_count;
|
|
extern unsigned int sysctl_hung_task_panic;
|
|
extern unsigned long sysctl_hung_task_timeout_secs;
|
|
extern unsigned long sysctl_hung_task_check_interval_secs;
|
|
extern int sysctl_hung_task_warnings;
|
|
int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
|
|
void *buffer, size_t *lenp, loff_t *ppos);
|
|
#else
|
|
/* Avoid need for ifdefs elsewhere in the code */
|
|
enum { sysctl_hung_task_timeout_secs = 0 };
|
|
#endif
|
|
|
|
extern unsigned int sysctl_sched_latency;
|
|
extern unsigned int sysctl_sched_min_granularity;
|
|
extern unsigned int sysctl_sched_wakeup_granularity;
|
|
extern unsigned int sysctl_sched_child_runs_first;
|
|
|
|
enum sched_tunable_scaling {
|
|
SCHED_TUNABLESCALING_NONE,
|
|
SCHED_TUNABLESCALING_LOG,
|
|
SCHED_TUNABLESCALING_LINEAR,
|
|
SCHED_TUNABLESCALING_END,
|
|
};
|
|
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
|
|
|
|
extern unsigned int sysctl_numa_balancing_scan_delay;
|
|
extern unsigned int sysctl_numa_balancing_scan_period_min;
|
|
extern unsigned int sysctl_numa_balancing_scan_period_max;
|
|
extern unsigned int sysctl_numa_balancing_scan_size;
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
extern __read_mostly unsigned int sysctl_sched_migration_cost;
|
|
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
|
|
|
|
int sched_proc_update_handler(struct ctl_table *table, int write,
|
|
void *buffer, size_t *length, loff_t *ppos);
|
|
#endif
|
|
|
|
/*
|
|
* control realtime throttling:
|
|
*
|
|
* /proc/sys/kernel/sched_rt_period_us
|
|
* /proc/sys/kernel/sched_rt_runtime_us
|
|
*/
|
|
extern unsigned int sysctl_sched_rt_period;
|
|
extern int sysctl_sched_rt_runtime;
|
|
|
|
#ifdef CONFIG_UCLAMP_TASK
|
|
extern unsigned int sysctl_sched_uclamp_util_min;
|
|
extern unsigned int sysctl_sched_uclamp_util_max;
|
|
#endif
|
|
|
|
#ifdef CONFIG_CFS_BANDWIDTH
|
|
extern unsigned int sysctl_sched_cfs_bandwidth_slice;
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHED_AUTOGROUP
|
|
extern unsigned int sysctl_sched_autogroup_enabled;
|
|
#endif
|
|
|
|
extern int sysctl_sched_rr_timeslice;
|
|
extern int sched_rr_timeslice;
|
|
|
|
int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
|
|
size_t *lenp, loff_t *ppos);
|
|
int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
|
|
size_t *lenp, loff_t *ppos);
|
|
int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
|
|
void *buffer, size_t *lenp, loff_t *ppos);
|
|
int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer,
|
|
size_t *lenp, loff_t *ppos);
|
|
int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
|
|
size_t *lenp, loff_t *ppos);
|
|
|
|
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
|
|
extern unsigned int sysctl_sched_energy_aware;
|
|
int sched_energy_aware_handler(struct ctl_table *table, int write,
|
|
void *buffer, size_t *lenp, loff_t *ppos);
|
|
#endif
|
|
|
|
#endif /* _LINUX_SCHED_SYSCTL_H */
|