This tightens up __dequeue_signal a little. It also avoids doing
recalc_sigpending twice in a row, instead doing it once in dequeue_signal.
Signed-off-by: Roland McGrath <[email protected]>
---
kernel/signal.c | 30 ++++++++++++++----------------
1 files changed, 14 insertions(+), 16 deletions(-)
diff --git a/kernel/signal.c b/kernel/signal.c
index bfdb568..bae6155 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -417,9 +417,8 @@ static int collect_signal(int sig, struc
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
siginfo_t *info)
{
- int sig = 0;
+ int sig = next_signal(pending, mask);
- sig = next_signal(pending, mask);
if (sig) {
if (current->notifier) {
if (sigismember(current->notifier_mask, sig)) {
@@ -432,9 +431,7 @@ static int __dequeue_signal(struct sigpe
if (!collect_signal(sig, pending, info))
sig = 0;
-
}
- recalc_sigpending();
return sig;
}
@@ -451,6 +448,7 @@ int dequeue_signal(struct task_struct *t
if (!signr)
signr = __dequeue_signal(&tsk->signal->shared_pending,
mask, info);
+ recalc_sigpending_tsk(tsk);
if (signr && unlikely(sig_kernel_stop(signr))) {
/*
* Set a marker that we have dequeued a stop signal. Our