2024-04-10 22:54:10

by Thomas Gleixner

[permalink] [raw]
Subject: [patch V2 36/50] signal: Split up __sigqueue_alloc()

To cure the SIG_IGN handling for posix interval timers, the preallocated
sigqueue needs to be embedded into struct k_itimer to prevent life time
races of all sorts.

Reorganize __sigqueue_alloc() so the ucounts retrieval and the
initialization can be used independently.

No functional change.

Signed-off-by: Thomas Gleixner <[email protected]>
---
kernel/signal.c | 52 +++++++++++++++++++++++++++++++++++-----------------
1 file changed, 35 insertions(+), 17 deletions(-)

--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -396,16 +396,9 @@ void task_join_group_stop(struct task_st
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
}

-/*
- * allocate a new signal queue record
- * - this may be called without locks if and only if t == current, otherwise an
- * appropriate lock must be held to stop the target task from exiting
- */
-static struct sigqueue *
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
- int override_rlimit, const unsigned int sigqueue_flags)
+static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
+ int override_rlimit)
{
- struct sigqueue *q = NULL;
struct ucounts *ucounts;
long sigpending;

@@ -424,19 +417,44 @@ static struct sigqueue *
if (!sigpending)
return NULL;

- if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
- q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
- } else {
+ if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
+ dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
print_dropped_signal(sig);
+ return NULL;
}

- if (unlikely(q == NULL)) {
+ return ucounts;
+}
+
+static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
+ const unsigned int sigqueue_flags)
+{
+ INIT_LIST_HEAD(&q->list);
+ q->flags = sigqueue_flags;
+ q->ucounts = ucounts;
+}
+
+/*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ * appropriate lock must be held to stop the target task from exiting
+ */
+static struct sigqueue *__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
+ int override_rlimit, const unsigned int sigqueue_flags)
+{
+ struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
+ struct sigqueue *q;
+
+ if (!ucounts)
+ return NULL;
+
+ q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
+ if (!q) {
dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
- } else {
- INIT_LIST_HEAD(&q->list);
- q->flags = sigqueue_flags;
- q->ucounts = ucounts;
+ return NULL;
}
+
+ __sigqueue_init(q, ucounts, sigqueue_flags);
return q;
}