mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-29 01:51:39 +00:00
signals: move the definition of __group_complete_signal() up
Move the unchanged definition of __group_complete_signal() so that send_signal can see it. To simplify the reading of the next patches. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
db51aeccd7
commit
71f11dc025
1 changed files with 96 additions and 96 deletions
192
kernel/signal.c
192
kernel/signal.c
|
@ -652,6 +652,102 @@ static void handle_stop_signal(int sig, struct task_struct *p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test if P wants to take SIG. After we've checked all threads with this,
|
||||||
|
* it's equivalent to finding no threads not blocking SIG. Any threads not
|
||||||
|
* blocking SIG were ruled out because they are not running and already
|
||||||
|
* have pending signals. Such threads will dequeue from the shared queue
|
||||||
|
* as soon as they're available, so putting the signal on the shared queue
|
||||||
|
* will be equivalent to sending it to one such thread.
|
||||||
|
*/
|
||||||
|
static inline int wants_signal(int sig, struct task_struct *p)
|
||||||
|
{
|
||||||
|
if (sigismember(&p->blocked, sig))
|
||||||
|
return 0;
|
||||||
|
if (p->flags & PF_EXITING)
|
||||||
|
return 0;
|
||||||
|
if (sig == SIGKILL)
|
||||||
|
return 1;
|
||||||
|
if (task_is_stopped_or_traced(p))
|
||||||
|
return 0;
|
||||||
|
return task_curr(p) || !signal_pending(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
__group_complete_signal(int sig, struct task_struct *p)
|
||||||
|
{
|
||||||
|
struct signal_struct *signal = p->signal;
|
||||||
|
struct task_struct *t;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now find a thread we can wake up to take the signal off the queue.
|
||||||
|
*
|
||||||
|
* If the main thread wants the signal, it gets first crack.
|
||||||
|
* Probably the least surprising to the average bear.
|
||||||
|
*/
|
||||||
|
if (wants_signal(sig, p))
|
||||||
|
t = p;
|
||||||
|
else if (thread_group_empty(p))
|
||||||
|
/*
|
||||||
|
* There is just one thread and it does not need to be woken.
|
||||||
|
* It will dequeue unblocked signals before it runs again.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
else {
|
||||||
|
/*
|
||||||
|
* Otherwise try to find a suitable thread.
|
||||||
|
*/
|
||||||
|
t = signal->curr_target;
|
||||||
|
while (!wants_signal(sig, t)) {
|
||||||
|
t = next_thread(t);
|
||||||
|
if (t == signal->curr_target)
|
||||||
|
/*
|
||||||
|
* No thread needs to be woken.
|
||||||
|
* Any eligible threads will see
|
||||||
|
* the signal in the queue soon.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
signal->curr_target = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Found a killable thread. If the signal will be fatal,
|
||||||
|
* then start taking the whole group down immediately.
|
||||||
|
*/
|
||||||
|
if (sig_fatal(p, sig) && !(signal->flags & SIGNAL_GROUP_EXIT) &&
|
||||||
|
!sigismember(&t->real_blocked, sig) &&
|
||||||
|
(sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
|
||||||
|
/*
|
||||||
|
* This signal will be fatal to the whole group.
|
||||||
|
*/
|
||||||
|
if (!sig_kernel_coredump(sig)) {
|
||||||
|
/*
|
||||||
|
* Start a group exit and wake everybody up.
|
||||||
|
* This way we don't have other threads
|
||||||
|
* running and doing things after a slower
|
||||||
|
* thread has the fatal signal pending.
|
||||||
|
*/
|
||||||
|
signal->flags = SIGNAL_GROUP_EXIT;
|
||||||
|
signal->group_exit_code = sig;
|
||||||
|
signal->group_stop_count = 0;
|
||||||
|
t = p;
|
||||||
|
do {
|
||||||
|
sigaddset(&t->pending.signal, SIGKILL);
|
||||||
|
signal_wake_up(t, 1);
|
||||||
|
} while_each_thread(p, t);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The signal is already in the shared-pending queue.
|
||||||
|
* Tell the chosen thread to wake up and dequeue it.
|
||||||
|
*/
|
||||||
|
signal_wake_up(t, sig == SIGKILL);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int legacy_queue(struct sigpending *signals, int sig)
|
static inline int legacy_queue(struct sigpending *signals, int sig)
|
||||||
{
|
{
|
||||||
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
|
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
|
||||||
|
@ -817,102 +913,6 @@ force_sig_specific(int sig, struct task_struct *t)
|
||||||
force_sig_info(sig, SEND_SIG_FORCED, t);
|
force_sig_info(sig, SEND_SIG_FORCED, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Test if P wants to take SIG. After we've checked all threads with this,
|
|
||||||
* it's equivalent to finding no threads not blocking SIG. Any threads not
|
|
||||||
* blocking SIG were ruled out because they are not running and already
|
|
||||||
* have pending signals. Such threads will dequeue from the shared queue
|
|
||||||
* as soon as they're available, so putting the signal on the shared queue
|
|
||||||
* will be equivalent to sending it to one such thread.
|
|
||||||
*/
|
|
||||||
static inline int wants_signal(int sig, struct task_struct *p)
|
|
||||||
{
|
|
||||||
if (sigismember(&p->blocked, sig))
|
|
||||||
return 0;
|
|
||||||
if (p->flags & PF_EXITING)
|
|
||||||
return 0;
|
|
||||||
if (sig == SIGKILL)
|
|
||||||
return 1;
|
|
||||||
if (task_is_stopped_or_traced(p))
|
|
||||||
return 0;
|
|
||||||
return task_curr(p) || !signal_pending(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
__group_complete_signal(int sig, struct task_struct *p)
|
|
||||||
{
|
|
||||||
struct signal_struct *signal = p->signal;
|
|
||||||
struct task_struct *t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Now find a thread we can wake up to take the signal off the queue.
|
|
||||||
*
|
|
||||||
* If the main thread wants the signal, it gets first crack.
|
|
||||||
* Probably the least surprising to the average bear.
|
|
||||||
*/
|
|
||||||
if (wants_signal(sig, p))
|
|
||||||
t = p;
|
|
||||||
else if (thread_group_empty(p))
|
|
||||||
/*
|
|
||||||
* There is just one thread and it does not need to be woken.
|
|
||||||
* It will dequeue unblocked signals before it runs again.
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
else {
|
|
||||||
/*
|
|
||||||
* Otherwise try to find a suitable thread.
|
|
||||||
*/
|
|
||||||
t = signal->curr_target;
|
|
||||||
while (!wants_signal(sig, t)) {
|
|
||||||
t = next_thread(t);
|
|
||||||
if (t == signal->curr_target)
|
|
||||||
/*
|
|
||||||
* No thread needs to be woken.
|
|
||||||
* Any eligible threads will see
|
|
||||||
* the signal in the queue soon.
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
signal->curr_target = t;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Found a killable thread. If the signal will be fatal,
|
|
||||||
* then start taking the whole group down immediately.
|
|
||||||
*/
|
|
||||||
if (sig_fatal(p, sig) && !(signal->flags & SIGNAL_GROUP_EXIT) &&
|
|
||||||
!sigismember(&t->real_blocked, sig) &&
|
|
||||||
(sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
|
|
||||||
/*
|
|
||||||
* This signal will be fatal to the whole group.
|
|
||||||
*/
|
|
||||||
if (!sig_kernel_coredump(sig)) {
|
|
||||||
/*
|
|
||||||
* Start a group exit and wake everybody up.
|
|
||||||
* This way we don't have other threads
|
|
||||||
* running and doing things after a slower
|
|
||||||
* thread has the fatal signal pending.
|
|
||||||
*/
|
|
||||||
signal->flags = SIGNAL_GROUP_EXIT;
|
|
||||||
signal->group_exit_code = sig;
|
|
||||||
signal->group_stop_count = 0;
|
|
||||||
t = p;
|
|
||||||
do {
|
|
||||||
sigaddset(&t->pending.signal, SIGKILL);
|
|
||||||
signal_wake_up(t, 1);
|
|
||||||
} while_each_thread(p, t);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The signal is already in the shared-pending queue.
|
|
||||||
* Tell the chosen thread to wake up and dequeue it.
|
|
||||||
*/
|
|
||||||
signal_wake_up(t, sig == SIGKILL);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
|
__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue