2022-03-24 17:36:11

by Rasmus Villemoes

[permalink] [raw]
Subject: [RFC PATCH 1/2] workqueue: allow use of realtime scheduling policies

Prepare for allowing the administrator to set RT scheduling policies,
rather than just tweaking the nice value, of workqueues exposed via
sysfs. Subsystems that currently use, say, system_unbound_wq, can be
updated to create a separate workqueue (possibly depending on a
CONFIG_ knob or boottime parameter).

This patch merely updates the internal interfaces. The next patch will
expose a sysfs knob.

Signed-off-by: Rasmus Villemoes <[email protected]>
---
include/linux/workqueue.h | 17 +++++++++++++++--
kernel/workqueue.c | 20 ++++++++++++++++++--
2 files changed, 33 insertions(+), 4 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 7fee9b6cfede..a69bdd877120 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -131,9 +131,22 @@ struct rcu_work {
*/
struct workqueue_attrs {
/**
- * @nice: nice level
+ * @policy: scheduling policy (SCHED_NORMAL, SCHED_FIFO, SCHED_RR)
*/
- int nice;
+ int policy;
+
+ /**
+ * @nice: nice level (SCHED_NORMAL)
+ * @priority: priority (SCHED_FIFO, SCHED_RR)
+ *
+ * Letting these two fields occupy the same word simplifies
+ * copying, hashing and equality testing of struct
+ * workqueue_attrs.
+ */
+ union {
+ int nice;
+ int priority;
+ };

/**
* @cpumask: allowed CPUs
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 33f1106b4f99..9eb2ff7bcc04 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -51,6 +51,7 @@
#include <linux/sched/isolation.h>
#include <linux/nmi.h>
#include <linux/kvm_para.h>
+#include <uapi/linux/sched/types.h>

#include "workqueue_internal.h"

@@ -1969,7 +1970,13 @@ static struct worker *create_worker(struct worker_pool *pool)
if (IS_ERR(worker->task))
goto fail;

- set_user_nice(worker->task, pool->attrs->nice);
+ if (pool->attrs->policy == SCHED_NORMAL) {
+ set_user_nice(worker->task, pool->attrs->nice);
+ } else {
+ struct sched_param sp = { .sched_priority = pool->attrs->priority };
+
+ sched_setscheduler(worker->task, pool->attrs->policy, &sp);
+ }
kthread_bind_mask(worker->task, pool->attrs->cpumask);

/* successful, attach the worker to the pool */
@@ -3402,6 +3409,12 @@ struct workqueue_attrs *alloc_workqueue_attrs(void)
{
struct workqueue_attrs *attrs;

+ /*
+ * A zeroed structure has ->policy==SCHED_NORMAL and
+ * ->nice==0.
+ */
+ static_assert(SCHED_NORMAL == 0);
+
attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
if (!attrs)
goto fail;
@@ -3418,6 +3431,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(void)
static void copy_workqueue_attrs(struct workqueue_attrs *to,
const struct workqueue_attrs *from)
{
+ to->policy = from->policy;
to->nice = from->nice;
cpumask_copy(to->cpumask, from->cpumask);
/*
@@ -3433,7 +3447,7 @@ static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
{
u32 hash = 0;

- hash = jhash_1word(attrs->nice, hash);
+ hash = jhash_2words(attrs->policy, attrs->nice, hash);
hash = jhash(cpumask_bits(attrs->cpumask),
BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
return hash;
@@ -3443,6 +3457,8 @@ static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
static bool wqattrs_equal(const struct workqueue_attrs *a,
const struct workqueue_attrs *b)
{
+ if (a->policy != b->policy)
+ return false;
if (a->nice != b->nice)
return false;
if (!cpumask_equal(a->cpumask, b->cpumask))
--
2.31.1