From: Lai Jiangshan <[email protected]>
Some simple cleanups.
Lai Jiangshan (4):
workqueue: Use INIT_WORK_ONSTACK in workqueue_softirq_dead()
workqueue: Reorder the fields in struct workqueue_attrs
workqueue: Move attrs->cpumask out of worker_pool's properties when
attrs->affn_strict
workqueue: Use list_last_entry() to get the last idle worker
include/linux/workqueue.h | 27 +++++++++++++++------------
kernel/workqueue.c | 19 +++++++++++--------
2 files changed, 26 insertions(+), 20 deletions(-)
--
2.19.1.6.gb485710b
From: Lai Jiangshan <[email protected]>
dead_work is a stack variable.
Signed-off-by: Lai Jiangshan <[email protected]>
---
kernel/workqueue.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a60eb65955e7..3ff91243322d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3687,7 +3687,7 @@ void workqueue_softirq_dead(unsigned int cpu)
if (!need_more_worker(pool))
continue;
- INIT_WORK(&dead_work.work, drain_dead_softirq_workfn);
+ INIT_WORK_ONSTACK(&dead_work.work, drain_dead_softirq_workfn);
dead_work.pool = pool;
init_completion(&dead_work.done);
--
2.19.1.6.gb485710b
From: Lai Jiangshan <[email protected]>
So that its size is reduced from 40 to 32 in 64bit kernel, and it can be
saved more when allocated with kmalloc() in alloc_workqueue_attrs().
Signed-off-by: Lai Jiangshan <[email protected]>
---
include/linux/workqueue.h | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 158784dd189a..c885a5f6bb93 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -145,13 +145,15 @@ struct workqueue_attrs {
int nice;
/**
- * @cpumask: allowed CPUs
+ * @affn_strict: affinity scope is strict
*
- * Work items in this workqueue are affine to these CPUs and not allowed
- * to execute on other CPUs. A pool serving a workqueue must have the
- * same @cpumask.
+ * If clear, workqueue will make a best-effort attempt at starting the
+ * worker inside @__pod_cpumask but the scheduler is free to migrate it
+ * outside.
+ *
+ * If set, workers are only allowed to run inside @__pod_cpumask.
*/
- cpumask_var_t cpumask;
+ bool affn_strict;
/**
* @__pod_cpumask: internal attribute used to create per-pod pools
@@ -166,15 +168,13 @@ struct workqueue_attrs {
cpumask_var_t __pod_cpumask;
/**
- * @affn_strict: affinity scope is strict
- *
- * If clear, workqueue will make a best-effort attempt at starting the
- * worker inside @__pod_cpumask but the scheduler is free to migrate it
- * outside.
+ * @cpumask: allowed CPUs
*
- * If set, workers are only allowed to run inside @__pod_cpumask.
+ * Work items in this workqueue are affine to these CPUs and not allowed
+ * to execute on other CPUs. A pool serving a workqueue must have the
+ * same @cpumask.
*/
- bool affn_strict;
+ cpumask_var_t cpumask;
/*
* Below fields aren't properties of a worker_pool. They only modify how
--
2.19.1.6.gb485710b
From: Lai Jiangshan <[email protected]>
It is clearer than open code.
Signed-off-by: Lai Jiangshan <[email protected]>
---
kernel/workqueue.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3bb71cfd3e06..7a3392aab395 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2940,7 +2940,7 @@ static void idle_worker_timeout(struct timer_list *t)
unsigned long expires;
/* idle_list is kept in LIFO order, check the last one */
- worker = list_entry(pool->idle_list.prev, struct worker, entry);
+ worker = list_last_entry(&pool->idle_list, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
do_cull = !time_before(jiffies, expires);
@@ -2982,7 +2982,7 @@ static void idle_cull_fn(struct work_struct *work)
struct worker *worker;
unsigned long expires;
- worker = list_entry(pool->idle_list.prev, struct worker, entry);
+ worker = list_last_entry(&pool->idle_list, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
if (time_before(jiffies, expires)) {
--
2.19.1.6.gb485710b
From: Lai Jiangshan <[email protected]>
Allow more pools can be shared when attrs->affn_strict.
Signed-off-by: Lai Jiangshan <[email protected]>
---
include/linux/workqueue.h | 3 +++
kernel/workqueue.c | 13 ++++++++-----
2 files changed, 11 insertions(+), 5 deletions(-)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index c885a5f6bb93..86f723579a32 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -180,6 +180,9 @@ struct workqueue_attrs {
* Below fields aren't properties of a worker_pool. They only modify how
* :c:func:`apply_workqueue_attrs` select pools and thus don't
* participate in pool hash calculations or equality comparisons.
+ *
+ * If @affn_strict is set, @cpumask isn't a property of a worker_pool
+ * either.
*/
/**
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3ff91243322d..3bb71cfd3e06 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4517,6 +4517,8 @@ static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
{
attrs->affn_scope = WQ_AFFN_NR_TYPES;
attrs->ordered = false;
+ if (attrs->affn_strict)
+ cpumask_copy(attrs->cpumask, cpu_possible_mask);
}
/* hash value of the content of @attr */
@@ -4525,11 +4527,12 @@ static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
u32 hash = 0;
hash = jhash_1word(attrs->nice, hash);
- hash = jhash(cpumask_bits(attrs->cpumask),
- BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
+ hash = jhash_1word(attrs->affn_strict, hash);
hash = jhash(cpumask_bits(attrs->__pod_cpumask),
BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
- hash = jhash_1word(attrs->affn_strict, hash);
+ if (!attrs->affn_strict)
+ hash = jhash(cpumask_bits(attrs->cpumask),
+ BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
return hash;
}
@@ -4539,11 +4542,11 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
{
if (a->nice != b->nice)
return false;
- if (!cpumask_equal(a->cpumask, b->cpumask))
+ if (a->affn_strict != b->affn_strict)
return false;
if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
return false;
- if (a->affn_strict != b->affn_strict)
+ if (!a->affn_strict && !cpumask_equal(a->cpumask, b->cpumask))
return false;
return true;
}
--
2.19.1.6.gb485710b
On Fri, Mar 08, 2024 at 05:42:50PM +0800, Lai Jiangshan wrote:
> From: Lai Jiangshan <[email protected]>
>
> dead_work is a stack variable.
>
> Signed-off-by: Lai Jiangshan <[email protected]>
Applied to wq/for-6.10.
Thanks.
--
tejun
Hello,
On Fri, Mar 08, 2024 at 05:42:51PM +0800, Lai Jiangshan wrote:
> From: Lai Jiangshan <[email protected]>
>
> So that its size is reduced from 40 to 32 in 64bit kernel, and it can be
> saved more when allocated with kmalloc() in alloc_workqueue_attrs().
Does this matter tho? Also, the sizing would be dependent on the number of
supported CPUs, right?
So, before, the order was - nice, cpumasks, affinity scope related stuff,
and then ordered flag. After, the cpumasks and affinity scope stuff are
mixed. Unless the saving is actually meaningful, I'd rather keep the current
ordering.
Thanks.
--
tejun
On Fri, Mar 08, 2024 at 05:42:53PM +0800, Lai Jiangshan wrote:
> From: Lai Jiangshan <[email protected]>
>
> It is clearer than open code.
>
> Signed-off-by: Lai Jiangshan <[email protected]>
Applied to wq/for-6.10.
Thanks.
--
tejun
On Fri, Mar 08, 2024 at 05:42:52PM +0800, Lai Jiangshan wrote:
> From: Lai Jiangshan <[email protected]>
>
> Allow more pools can be shared when attrs->affn_strict.
>
> Signed-off-by: Lai Jiangshan <[email protected]>
Applied to wq/for-6.10.
Thanks.
--
tejun