mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-17 20:54:10 +00:00
signal: Add task_sigpending() helper
This is in preparation for maintaining signal_pending() as the decider of whether or not a schedule() loop should be broken, or continue sleeping. This is different than the core signal use cases, which really need to know whether an actual signal is pending or not. task_sigpending() returns non-zero if TIF_SIGPENDING is set. Only core kernel use cases should care about the distinction between the two, make sure those use the task_sigpending() helper. Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Oleg Nesterov <oleg@redhat.com> Link: https://lore.kernel.org/r/20201026203230.386348-2-axboe@kernel.dk
This commit is contained in:
parent
9123e3a74e
commit
5c251e9dc0
3 changed files with 12 additions and 7 deletions
|
@ -353,11 +353,16 @@ static inline int restart_syscall(void)
|
||||||
return -ERESTARTNOINTR;
|
return -ERESTARTNOINTR;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int signal_pending(struct task_struct *p)
|
static inline int task_sigpending(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
|
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int signal_pending(struct task_struct *p)
|
||||||
|
{
|
||||||
|
return task_sigpending(p);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int __fatal_signal_pending(struct task_struct *p)
|
static inline int __fatal_signal_pending(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return unlikely(sigismember(&p->pending.signal, SIGKILL));
|
return unlikely(sigismember(&p->pending.signal, SIGKILL));
|
||||||
|
@ -365,7 +370,7 @@ static inline int __fatal_signal_pending(struct task_struct *p)
|
||||||
|
|
||||||
static inline int fatal_signal_pending(struct task_struct *p)
|
static inline int fatal_signal_pending(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return signal_pending(p) && __fatal_signal_pending(p);
|
return task_sigpending(p) && __fatal_signal_pending(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int signal_pending_state(long state, struct task_struct *p)
|
static inline int signal_pending_state(long state, struct task_struct *p)
|
||||||
|
|
|
@ -1973,7 +1973,7 @@ bool uprobe_deny_signal(void)
|
||||||
|
|
||||||
WARN_ON_ONCE(utask->state != UTASK_SSTEP);
|
WARN_ON_ONCE(utask->state != UTASK_SSTEP);
|
||||||
|
|
||||||
if (signal_pending(t)) {
|
if (task_sigpending(t)) {
|
||||||
spin_lock_irq(&t->sighand->siglock);
|
spin_lock_irq(&t->sighand->siglock);
|
||||||
clear_tsk_thread_flag(t, TIF_SIGPENDING);
|
clear_tsk_thread_flag(t, TIF_SIGPENDING);
|
||||||
spin_unlock_irq(&t->sighand->siglock);
|
spin_unlock_irq(&t->sighand->siglock);
|
||||||
|
|
|
@ -983,7 +983,7 @@ static inline bool wants_signal(int sig, struct task_struct *p)
|
||||||
if (task_is_stopped_or_traced(p))
|
if (task_is_stopped_or_traced(p))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return task_curr(p) || !signal_pending(p);
|
return task_curr(p) || !task_sigpending(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
|
static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
|
||||||
|
@ -2822,7 +2822,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
|
||||||
/* Remove the signals this thread can handle. */
|
/* Remove the signals this thread can handle. */
|
||||||
sigandsets(&retarget, &retarget, &t->blocked);
|
sigandsets(&retarget, &retarget, &t->blocked);
|
||||||
|
|
||||||
if (!signal_pending(t))
|
if (!task_sigpending(t))
|
||||||
signal_wake_up(t, 0);
|
signal_wake_up(t, 0);
|
||||||
|
|
||||||
if (sigisemptyset(&retarget))
|
if (sigisemptyset(&retarget))
|
||||||
|
@ -2856,7 +2856,7 @@ void exit_signals(struct task_struct *tsk)
|
||||||
|
|
||||||
cgroup_threadgroup_change_end(tsk);
|
cgroup_threadgroup_change_end(tsk);
|
||||||
|
|
||||||
if (!signal_pending(tsk))
|
if (!task_sigpending(tsk))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
unblocked = tsk->blocked;
|
unblocked = tsk->blocked;
|
||||||
|
@ -2900,7 +2900,7 @@ long do_no_restart_syscall(struct restart_block *param)
|
||||||
|
|
||||||
static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
|
static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
|
||||||
{
|
{
|
||||||
if (signal_pending(tsk) && !thread_group_empty(tsk)) {
|
if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
|
||||||
sigset_t newblocked;
|
sigset_t newblocked;
|
||||||
/* A set of now blocked but previously unblocked signals. */
|
/* A set of now blocked but previously unblocked signals. */
|
||||||
sigandnsets(&newblocked, newset, ¤t->blocked);
|
sigandnsets(&newblocked, newset, ¤t->blocked);
|
||||||
|
|
Loading…
Add table
Reference in a new issue