2023-12-08 22:06:23

by Frederic Weisbecker

[permalink] [raw]
Subject: [PATCH 0/8] rcu: Fix expedited GP deadlock (and cleanup some nocb stuff)

TREE04 can trigger a writer stall if run with memory pressure. This
is due to a circular dependency between waiting for expedited grace
period and polling on expedited grace period when workqueues go back
to mayday serialization.

Here is a proposal fix.

Frederic Weisbecker (8):
rcu/nocb: Make IRQs disablement symetric
rcu/nocb: Re-arrange call_rcu() NOCB specific code
rcu/exp: Fix RCU expedited parallel grace period kworker allocation
failure recovery
rcu/exp: Handle RCU expedited grace period kworker allocation failure
rcu: s/boost_kthread_mutex/kthread_mutex
rcu/exp: Make parallel exp gp kworker per rcu node
rcu/exp: Handle parallel exp gp kworkers affinity
rcu/exp: Remove rcu_par_gp_wq

kernel/rcu/rcu.h | 5 -
kernel/rcu/tree.c | 222 +++++++++++++++++++++++++--------------
kernel/rcu/tree.h | 12 +--
kernel/rcu/tree_exp.h | 81 +++-----------
kernel/rcu/tree_nocb.h | 38 ++++---
kernel/rcu/tree_plugin.h | 52 ++-------
6 files changed, 191 insertions(+), 219 deletions(-)

--
2.42.1


2023-12-08 22:06:40

by Frederic Weisbecker

[permalink] [raw]
Subject: [PATCH 4/8] rcu/exp: Handle RCU expedited grace period kworker allocation failure

Just like is done for the kworker performing nodes initialization,
gracefully handle the possible allocation failure of the RCU expedited
grace period main kworker.

While at it perform a rename of the related checking functions to better
reflect the expedited specifics.

Fixes: 9621fbee44df ("rcu: Move expedited grace period (GP) work to RT kthread_worker")
Cc: Kalesh Singh <[email protected]>
Signed-off-by: Frederic Weisbecker <[email protected]>
---
kernel/rcu/tree.c | 2 ++
kernel/rcu/tree_exp.h | 25 +++++++++++++++++++------
2 files changed, 21 insertions(+), 6 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 055f4817bc70..39679cf66c3a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -4717,6 +4717,7 @@ static void __init rcu_start_exp_gp_kworkers(void)
rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
pr_err("Failed to create %s!\n", gp_kworker_name);
+ rcu_exp_gp_kworker = NULL;
return;
}

@@ -4725,6 +4726,7 @@ static void __init rcu_start_exp_gp_kworkers(void)
pr_err("Failed to create %s!\n", par_gp_kworker_name);
rcu_exp_par_gp_kworker = NULL;
kthread_destroy_worker(rcu_exp_gp_kworker);
+ rcu_exp_gp_kworker = NULL;
return;
}

diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 6d7cea5d591f..cb31f4fb4b36 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -429,7 +429,12 @@ static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
__sync_rcu_exp_select_node_cpus(rewp);
}

-static inline bool rcu_gp_par_worker_started(void)
+static inline bool rcu_exp_worker_started(void)
+{
+ return !!READ_ONCE(rcu_exp_gp_kworker);
+}
+
+static inline bool rcu_exp_par_worker_started(void)
{
return !!READ_ONCE(rcu_exp_par_gp_kworker);
}
@@ -479,7 +484,12 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
__sync_rcu_exp_select_node_cpus(rewp);
}

-static inline bool rcu_gp_par_worker_started(void)
+static inline bool rcu_exp_worker_started(void)
+{
+ return !!READ_ONCE(rcu_gp_wq);
+}
+
+static inline bool rcu_exp_par_worker_started(void)
{
return !!READ_ONCE(rcu_par_gp_wq);
}
@@ -542,7 +552,7 @@ static void sync_rcu_exp_select_cpus(void)
rnp->exp_need_flush = false;
if (!READ_ONCE(rnp->expmask))
continue; /* Avoid early boot non-existent wq. */
- if (!rcu_gp_par_worker_started() ||
+ if (!rcu_exp_par_worker_started() ||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
rcu_is_last_leaf_node(rnp)) {
/* No worker started yet or last leaf, do direct call. */
@@ -957,7 +967,7 @@ static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
*/
void synchronize_rcu_expedited(void)
{
- bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
+ bool can_queue;
unsigned long flags;
struct rcu_exp_work rew;
struct rcu_node *rnp;
@@ -968,6 +978,9 @@ void synchronize_rcu_expedited(void)
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu_expedited() in RCU read-side critical section");

+ can_queue = (rcu_scheduler_active != RCU_SCHEDULER_INIT) &&
+ rcu_exp_worker_started();
+
/* Is the state is such that the call is a grace period? */
if (rcu_blocking_is_gp()) {
// Note well that this code runs with !PREEMPT && !SMP.
@@ -997,7 +1010,7 @@ void synchronize_rcu_expedited(void)
return; /* Someone else did our work for us. */

/* Ensure that load happens before action based on it. */
- if (unlikely(boottime)) {
+ if (unlikely(!can_queue)) {
/* Direct call during scheduler init and early_initcalls(). */
rcu_exp_sel_wait_wake(s);
} else {
@@ -1015,7 +1028,7 @@ void synchronize_rcu_expedited(void)
/* Let the next expedited grace period start. */
mutex_unlock(&rcu_state.exp_mutex);

- if (likely(!boottime))
+ if (likely(can_queue))
synchronize_rcu_expedited_destroy_work(&rew);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
--
2.42.1

2023-12-08 22:06:43

by Frederic Weisbecker

[permalink] [raw]
Subject: [PATCH 2/8] rcu/nocb: Re-arrange call_rcu() NOCB specific code

Currently the call_rcu() function interleaves NOCB and !NOCB enqueue
code in a complicated way such that:

* The bypass enqueue code may or may not have enqueued and may or may
not have locked the ->nocb_lock. Everything that follows is in a
Schrödinger locking state for the unwary reviewer's eyes.

* The was_alldone is always set but only used in NOCB related code.

* The NOCB wake up is distantly related to the locking hopefully
performed by the bypass enqueue code that did not enqueue on the
bypass list.

Unconfuse the whole and gather NOCB and !NOCB specific enqueue code to
their own functions.

Signed-off-by: Frederic Weisbecker <[email protected]>
---
kernel/rcu/tree.c | 44 +++++++++++++++++++-----------------------
kernel/rcu/tree.h | 5 ++---
kernel/rcu/tree_nocb.h | 18 ++++++++++++++---
3 files changed, 37 insertions(+), 30 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 74159c6d3bdf..82f8130d3fe3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2559,12 +2559,26 @@ static int __init rcu_spawn_core_kthreads(void)
return 0;
}

+static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func)
+{
+ rcu_segcblist_enqueue(&rdp->cblist, head);
+ if (__is_kvfree_rcu_offset((unsigned long)func))
+ trace_rcu_kvfree_callback(rcu_state.name, head,
+ (unsigned long)func,
+ rcu_segcblist_n_cbs(&rdp->cblist));
+ else
+ trace_rcu_callback(rcu_state.name, head,
+ rcu_segcblist_n_cbs(&rdp->cblist));
+ trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
+}
+
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
*/
-static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
- unsigned long flags)
+static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
+ rcu_callback_t func, unsigned long flags)
{
+ rcutree_enqueue(rdp, head, func);
/*
* If called from an extended quiescent state, invoke the RCU
* core in order to force a re-evaluation of RCU's idleness.
@@ -2660,7 +2674,6 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
unsigned long flags;
bool lazy;
struct rcu_data *rdp;
- bool was_alldone;

/* Misaligned rcu_head! */
WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
@@ -2697,28 +2710,11 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
}

check_cb_ovld(rdp);
- if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
- local_irq_restore(flags);
- return; // Enqueued onto ->nocb_bypass, so just leave.
- }
- // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
- rcu_segcblist_enqueue(&rdp->cblist, head);
- if (__is_kvfree_rcu_offset((unsigned long)func))
- trace_rcu_kvfree_callback(rcu_state.name, head,
- (unsigned long)func,
- rcu_segcblist_n_cbs(&rdp->cblist));
+
+ if (unlikely(rcu_rdp_is_offloaded(rdp)))
+ call_rcu_nocb(rdp, head, func, flags, lazy);
else
- trace_rcu_callback(rcu_state.name, head,
- rcu_segcblist_n_cbs(&rdp->cblist));
-
- trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
-
- /* Go handle any RCU core processing required. */
- if (unlikely(rcu_rdp_is_offloaded(rdp))) {
- __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
- } else {
- __call_rcu_core(rdp, head, flags);
- }
+ call_rcu_core(rdp, head, func, flags);
local_irq_restore(flags);
}

diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index e9821a8422db..e0e70b663cbf 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -467,9 +467,8 @@ static void rcu_init_one_nocb(struct rcu_node *rnp);
static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
unsigned long j, bool lazy);
-static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
- bool *was_alldone, unsigned long flags,
- bool lazy);
+static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
+ rcu_callback_t func, unsigned long flags, bool lazy);
static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
unsigned long flags);
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 3f70fd0a2db4..868063621c2f 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -622,6 +622,18 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
}
}

+static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
+ rcu_callback_t func, unsigned long flags, bool lazy)
+{
+ bool was_alldone;
+
+ if (!rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
+ /* Not enqueued on bypass but locked, do regular enqueue */
+ rcutree_enqueue(rdp, head, func);
+ __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
+ }
+}
+
static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
bool *wake_state)
{
@@ -1765,10 +1777,10 @@ static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
return true;
}

-static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
- bool *was_alldone, unsigned long flags, bool lazy)
+static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
+ rcu_callback_t func, unsigned long flags, bool lazy)
{
- return false;
+ WARN_ON_ONCE(1); /* Should be dead code! */
}

static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
--
2.42.1

2023-12-08 22:06:43

by Frederic Weisbecker

[permalink] [raw]
Subject: [PATCH 1/8] rcu/nocb: Make IRQs disablement symetric

Currently IRQs are disabled on call_rcu() and then depending on the
context:

* If the CPU is in nocb mode:

- If the callback is enqueued in the bypass list, IRQs are re-enabled
implictly by rcu_nocb_try_bypass()

- If the callback is enqueued in the normal list, IRQs are re-enabled
implicitly by __call_rcu_nocb_wake()

* If the CPU is NOT in nocb mode, IRQs are reenabled explicitly from call_rcu()

This makes the code a bit hard to follow, especially as it interleaves
with nocb locking.

To make the IRQ flags coverage clearer and also in order to prepare for
moving all the nocb enqueue code to its own function, always re-enable
the IRQ flags explicitly from call_rcu().

Signed-off-by: Frederic Weisbecker <[email protected]>
---
kernel/rcu/tree.c | 9 ++++++---
kernel/rcu/tree_nocb.h | 20 +++++++++-----------
2 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 52c2fdbc6363..74159c6d3bdf 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2697,8 +2697,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
}

check_cb_ovld(rdp);
- if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
+ if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
+ local_irq_restore(flags);
return; // Enqueued onto ->nocb_bypass, so just leave.
+ }
// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
rcu_segcblist_enqueue(&rdp->cblist, head);
if (__is_kvfree_rcu_offset((unsigned long)func))
@@ -2716,8 +2718,8 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
} else {
__call_rcu_core(rdp, head, flags);
- local_irq_restore(flags);
}
+ local_irq_restore(flags);
}

#ifdef CONFIG_RCU_LAZY
@@ -4615,8 +4617,9 @@ void rcutree_migrate_callbacks(int cpu)
__call_rcu_nocb_wake(my_rdp, true, flags);
} else {
rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
- raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
+ raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
}
+ local_irq_restore(flags);
if (needwake)
rcu_gp_kthread_wake();
lockdep_assert_irqs_enabled();
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 5598212d1f27..3f70fd0a2db4 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -532,9 +532,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
// 2. Both of these conditions are met:
// a. The bypass list previously had only lazy CBs, and:
// b. The new CB is non-lazy.
- if (ncbs && (!bypass_is_lazy || lazy)) {
- local_irq_restore(flags);
- } else {
+ if (!ncbs || (bypass_is_lazy && !lazy)) {
// No-CBs GP kthread might be indefinitely asleep, if so, wake.
rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
@@ -544,7 +542,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
} else {
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("FirstBQnoWake"));
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ rcu_nocb_unlock(rdp);
}
}
return true; // Callback already enqueued.
@@ -570,7 +568,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
// If we are being polled or there is no kthread, just leave.
t = READ_ONCE(rdp->nocb_gp_kthread);
if (rcu_nocb_poll || !t) {
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ rcu_nocb_unlock(rdp);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("WakeNotPoll"));
return;
@@ -583,17 +581,17 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
rdp->qlen_last_fqs_check = len;
// Only lazy CBs in bypass list
if (lazy_len && bypass_len == lazy_len) {
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ rcu_nocb_unlock(rdp);
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
TPS("WakeLazy"));
} else if (!irqs_disabled_flags(flags)) {
/* ... if queue was empty ... */
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ rcu_nocb_unlock(rdp);
wake_nocb_gp(rdp, false);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("WakeEmpty"));
} else {
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ rcu_nocb_unlock(rdp);
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
TPS("WakeEmptyIsDeferred"));
}
@@ -611,15 +609,15 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
if ((rdp->nocb_cb_sleep ||
!rcu_segcblist_ready_cbs(&rdp->cblist)) &&
!timer_pending(&rdp->nocb_timer)) {
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ rcu_nocb_unlock(rdp);
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
TPS("WakeOvfIsDeferred"));
} else {
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ rcu_nocb_unlock(rdp);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
}
} else {
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ rcu_nocb_unlock(rdp);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
}
}
--
2.42.1

2023-12-08 22:06:45

by Frederic Weisbecker

[permalink] [raw]
Subject: [PATCH 3/8] rcu/exp: Fix RCU expedited parallel grace period kworker allocation failure recovery

Under CONFIG_RCU_EXP_KTHREAD=y, the nodes initialization for expedited
grace periods is queued to a kworker. However if the allocation of that
kworker failed, the nodes initialization is performed synchronously by
the caller instead.

Now the check for kworker initialization failure relies on the kworker
pointer to be NULL while its value might actually encapsulate an
allocation failure error.

Make sure to handle this case.

Fixes: 9621fbee44df ("rcu: Move expedited grace period (GP) work to RT kthread_worker")
Cc: Kalesh Singh <[email protected]>
Signed-off-by: Frederic Weisbecker <[email protected]>
---
kernel/rcu/tree.c | 1 +
1 file changed, 1 insertion(+)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 82f8130d3fe3..055f4817bc70 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -4723,6 +4723,7 @@ static void __init rcu_start_exp_gp_kworkers(void)
rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
pr_err("Failed to create %s!\n", par_gp_kworker_name);
+ rcu_exp_par_gp_kworker = NULL;
kthread_destroy_worker(rcu_exp_gp_kworker);
return;
}
--
2.42.1

2023-12-08 22:06:55

by Frederic Weisbecker

[permalink] [raw]
Subject: [PATCH 6/8] rcu/exp: Make parallel exp gp kworker per rcu node

When CONFIG_RCU_EXP_KTHREAD=n, the expedited grace period per node
initialization is performed in parallel via workqueues (one work per
node).

However in CONFIG_RCU_EXP_KTHREAD=y, this per node initialization is
performed by a single kworker serializing each node initialization (one
work for all nodes).

The second part is certainly less scalable and efficient beyond a single
leaf node.

To improve this, expand this single kworker into per-node kworkers. This
new layout is eventually intended to remove the workqueues based
implementation since it will essentially now become duplicate code.

Signed-off-by: Frederic Weisbecker <[email protected]>
---
kernel/rcu/rcu.h | 1 -
kernel/rcu/tree.c | 123 +++++++++++++++++++++++----------------
kernel/rcu/tree.h | 1 +
kernel/rcu/tree_exp.h | 10 ++--
kernel/rcu/tree_plugin.h | 10 +---
5 files changed, 81 insertions(+), 64 deletions(-)

diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index f94f65877f2b..6beaf70d629f 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -625,7 +625,6 @@ void rcu_force_quiescent_state(void);
extern struct workqueue_struct *rcu_gp_wq;
#ifdef CONFIG_RCU_EXP_KTHREAD
extern struct kthread_worker *rcu_exp_gp_kworker;
-extern struct kthread_worker *rcu_exp_par_gp_kworker;
#else /* !CONFIG_RCU_EXP_KTHREAD */
extern struct workqueue_struct *rcu_par_gp_wq;
#endif /* CONFIG_RCU_EXP_KTHREAD */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 996efaded5bf..060d418c2b44 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -4367,6 +4367,75 @@ rcu_boot_init_percpu_data(int cpu)
rcu_boot_init_nocb_percpu_data(rdp);
}

+#ifdef CONFIG_RCU_EXP_KTHREAD
+struct kthread_worker *rcu_exp_gp_kworker;
+
+static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
+{
+ struct kthread_worker *kworker;
+ const char *name = "rcu_exp_par_gp_kthread_worker/%d";
+ struct sched_param param = { .sched_priority = kthread_prio };
+ int rnp_index = rnp - rcu_get_root();
+
+ if (rnp->exp_kworker)
+ return;
+
+ kworker = kthread_create_worker(0, name, rnp_index);
+ if (IS_ERR_OR_NULL(kworker)) {
+ pr_err("Failed to create par gp kworker on %d/%d\n",
+ rnp->grplo, rnp->grphi);
+ return;
+ }
+ WRITE_ONCE(rnp->exp_kworker, kworker);
+ sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
+}
+
+static void __init rcu_start_exp_gp_kworker(void)
+{
+ const char *name = "rcu_exp_gp_kthread_worker";
+ struct sched_param param = { .sched_priority = kthread_prio };
+
+ rcu_exp_gp_kworker = kthread_create_worker(0, name);
+ if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
+ pr_err("Failed to create %s!\n", name);
+ rcu_exp_gp_kworker = NULL;
+ return;
+ }
+ sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
+}
+
+static inline void rcu_alloc_par_gp_wq(void)
+{
+}
+#else /* !CONFIG_RCU_EXP_KTHREAD */
+struct workqueue_struct *rcu_par_gp_wq;
+
+static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
+{
+}
+
+static void __init rcu_start_exp_gp_kworker(void)
+{
+}
+
+static inline void rcu_alloc_par_gp_wq(void)
+{
+ rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
+ WARN_ON(!rcu_par_gp_wq);
+}
+#endif /* CONFIG_RCU_EXP_KTHREAD */
+
+static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp)
+{
+ if ((IS_ENABLED(CONFIG_RCU_EXP_KTHREAD) ||
+ IS_ENABLED(CONFIG_RCU_BOOST)) && rcu_scheduler_fully_active) {
+ mutex_lock(&rnp->kthread_mutex);
+ rcu_spawn_one_boost_kthread(rnp);
+ rcu_spawn_exp_par_gp_kworker(rnp);
+ mutex_unlock(&rnp->kthread_mutex);
+ }
+}
+
/*
* Invoked early in the CPU-online process, when pretty much all services
* are available. The incoming CPU is not present.
@@ -4415,7 +4484,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- rcu_spawn_one_boost_kthread(rnp);
+ rcu_spawn_rnp_kthreads(rnp);
rcu_spawn_cpu_nocb_kthread(cpu);
WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);

@@ -4704,54 +4773,6 @@ static int rcu_pm_notify(struct notifier_block *self,
return NOTIFY_OK;
}

-#ifdef CONFIG_RCU_EXP_KTHREAD
-struct kthread_worker *rcu_exp_gp_kworker;
-struct kthread_worker *rcu_exp_par_gp_kworker;
-
-static void __init rcu_start_exp_gp_kworkers(void)
-{
- const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
- const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
- struct sched_param param = { .sched_priority = kthread_prio };
-
- rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
- if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
- pr_err("Failed to create %s!\n", gp_kworker_name);
- rcu_exp_gp_kworker = NULL;
- return;
- }
-
- rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
- if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
- pr_err("Failed to create %s!\n", par_gp_kworker_name);
- rcu_exp_par_gp_kworker = NULL;
- kthread_destroy_worker(rcu_exp_gp_kworker);
- rcu_exp_gp_kworker = NULL;
- return;
- }
-
- sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
- sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
- &param);
-}
-
-static inline void rcu_alloc_par_gp_wq(void)
-{
-}
-#else /* !CONFIG_RCU_EXP_KTHREAD */
-struct workqueue_struct *rcu_par_gp_wq;
-
-static void __init rcu_start_exp_gp_kworkers(void)
-{
-}
-
-static inline void rcu_alloc_par_gp_wq(void)
-{
- rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
- WARN_ON(!rcu_par_gp_wq);
-}
-#endif /* CONFIG_RCU_EXP_KTHREAD */
-
/*
* Spawn the kthreads that handle RCU's grace periods.
*/
@@ -4786,10 +4807,10 @@ static int __init rcu_spawn_gp_kthread(void)
* due to rcu_scheduler_fully_active.
*/
rcu_spawn_cpu_nocb_kthread(smp_processor_id());
- rcu_spawn_one_boost_kthread(rdp->mynode);
+ rcu_spawn_rnp_kthreads(rdp->mynode);
rcu_spawn_core_kthreads();
/* Create kthread worker for expedited GPs */
- rcu_start_exp_gp_kworkers();
+ rcu_start_exp_gp_kworker();
return 0;
}
early_initcall(rcu_spawn_gp_kthread);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index f98a245e5f32..ef3d3385063f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -72,6 +72,7 @@ struct rcu_node {
/* Online CPUs for next expedited GP. */
/* Any CPU that has ever been online will */
/* have its bit set. */
+ struct kthread_worker *exp_kworker;
unsigned long cbovldmask;
/* CPUs experiencing callback overload. */
unsigned long ffmask; /* Fully functional CPUs. */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index cb31f4fb4b36..744d6acf5553 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -434,9 +434,9 @@ static inline bool rcu_exp_worker_started(void)
return !!READ_ONCE(rcu_exp_gp_kworker);
}

-static inline bool rcu_exp_par_worker_started(void)
+static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp)
{
- return !!READ_ONCE(rcu_exp_par_gp_kworker);
+ return !!READ_ONCE(rnp->exp_kworker);
}

static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
@@ -447,7 +447,7 @@ static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
* another work item on the same kthread worker can result in
* deadlock.
*/
- kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work);
+ kthread_queue_work(READ_ONCE(rnp->exp_kworker), &rnp->rew.rew_work);
}

static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
@@ -489,7 +489,7 @@ static inline bool rcu_exp_worker_started(void)
return !!READ_ONCE(rcu_gp_wq);
}

-static inline bool rcu_exp_par_worker_started(void)
+static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp)
{
return !!READ_ONCE(rcu_par_gp_wq);
}
@@ -552,7 +552,7 @@ static void sync_rcu_exp_select_cpus(void)
rnp->exp_need_flush = false;
if (!READ_ONCE(rnp->expmask))
continue; /* Avoid early boot non-existent wq. */
- if (!rcu_exp_par_worker_started() ||
+ if (!rcu_exp_par_worker_started(rnp) ||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
rcu_is_last_leaf_node(rnp)) {
/* No worker started yet or last leaf, do direct call. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0d307674915c..09bdd36ca9ff 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1195,14 +1195,13 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
struct sched_param sp;
struct task_struct *t;

- mutex_lock(&rnp->kthread_mutex);
- if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
- goto out;
+ if (rnp->boost_kthread_task)
+ return;

t = kthread_create(rcu_boost_kthread, (void *)rnp,
"rcub/%d", rnp_index);
if (WARN_ON_ONCE(IS_ERR(t)))
- goto out;
+ return;

raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->boost_kthread_task = t;
@@ -1210,9 +1209,6 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
sp.sched_priority = kthread_prio;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
-
- out:
- mutex_unlock(&rnp->kthread_mutex);
}

/*
--
2.42.1

2023-12-08 22:06:58

by Frederic Weisbecker

[permalink] [raw]
Subject: [PATCH 8/8] rcu/exp: Remove rcu_par_gp_wq

TREE04 running on short iterations can produce writer stalls of the
following kind:

??? Writer stall state RTWS_EXP_SYNC(4) g3968 f0x0 ->state 0x2 cpu 0
task:rcu_torture_wri state:D stack:14568 pid:83 ppid:2 flags:0x00004000
Call Trace:
<TASK>
__schedule+0x2de/0x850
? trace_event_raw_event_rcu_exp_funnel_lock+0x6d/0xb0
schedule+0x4f/0x90
synchronize_rcu_expedited+0x430/0x670
? __pfx_autoremove_wake_function+0x10/0x10
? __pfx_synchronize_rcu_expedited+0x10/0x10
do_rtws_sync.constprop.0+0xde/0x230
rcu_torture_writer+0x4b4/0xcd0
? __pfx_rcu_torture_writer+0x10/0x10
kthread+0xc7/0xf0
? __pfx_kthread+0x10/0x10
ret_from_fork+0x2f/0x50
? __pfx_kthread+0x10/0x10
ret_from_fork_asm+0x1b/0x30
</TASK>

Waiting for an expedited grace period and polling for an expedited
grace period both are operations that internally rely on the same
workqueue performing necessary asynchronous work.

However, a dependency chain is involved between those two operations,
as depicted below:

====== CPU 0 ======= ====== CPU 1 =======

synchronize_rcu_expedited()
exp_funnel_lock()
mutex_lock(&rcu_state.exp_mutex);
start_poll_synchronize_rcu_expedited
queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
synchronize_rcu_expedited_queue_work()
queue_work(rcu_gp_wq, &rew->rew_work);
wait_event() // A, wait for &rew->rew_work completion
mutex_unlock() // B
//======> switch to kworker

sync_rcu_do_polled_gp() {
synchronize_rcu_expedited()
exp_funnel_lock()
mutex_lock(&rcu_state.exp_mutex); // C, wait B
....
} // D

Since workqueues are usually implemented on top of several kworkers
handling the queue concurrently, the above situation wouldn't deadlock
most of the time because A then doesn't depend on D. But in case of
memory stress, a single kworker may end up handling alone all the works
in a serialized way. In that case the above layout becomes a problem
because A then waits for D, closing a circular dependency:

A -> D -> C -> B -> A

This however only happens when CONFIG_RCU_EXP_KTHREAD=n. Indeed
synchronize_rcu_expedited() is otherwise implemented on top of a kthread
worker while polling still relies on rcu_gp_wq workqueue, breaking the
above circular dependency chain.

Fix this with making expedited grace period to always rely on kthread
worker. The workqueue based implementation is essentially a duplicate
anyway now that the per-node initialization is performed by per-node
kthread workers.

Meanwhile the CONFIG_RCU_EXP_KTHREAD switch is still kept around to
manage the scheduler policy of these kthread workers.

Reported-by: Anna-Maria Behnsen <[email protected]>
Reported-by: Thomas Gleixner <[email protected]>
Suggested-by: Joel Fernandes <[email protected]>
Suggested-by: Paul E. McKenney <[email protected]>
Suggested-by: Neeraj upadhyay <[email protected]>
Signed-off-by: Frederic Weisbecker <[email protected]>
---
kernel/rcu/rcu.h | 4 ---
kernel/rcu/tree.c | 40 ++++---------------------
kernel/rcu/tree.h | 4 ---
kernel/rcu/tree_exp.h | 70 +------------------------------------------
4 files changed, 7 insertions(+), 111 deletions(-)

diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 6beaf70d629f..99032b9cb667 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -623,11 +623,7 @@ int rcu_get_gp_kthreads_prio(void);
void rcu_fwd_progress_check(unsigned long j);
void rcu_force_quiescent_state(void);
extern struct workqueue_struct *rcu_gp_wq;
-#ifdef CONFIG_RCU_EXP_KTHREAD
extern struct kthread_worker *rcu_exp_gp_kworker;
-#else /* !CONFIG_RCU_EXP_KTHREAD */
-extern struct workqueue_struct *rcu_par_gp_wq;
-#endif /* CONFIG_RCU_EXP_KTHREAD */
void rcu_gp_slow_register(atomic_t *rgssp);
void rcu_gp_slow_unregister(atomic_t *rgssp);
#endif /* #else #ifdef CONFIG_TINY_RCU */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e75ddf42e9b1..0c28adb56ad4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -4367,7 +4367,6 @@ rcu_boot_init_percpu_data(int cpu)
rcu_boot_init_nocb_percpu_data(rdp);
}

-#ifdef CONFIG_RCU_EXP_KTHREAD
struct kthread_worker *rcu_exp_gp_kworker;

static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
@@ -4387,7 +4386,9 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
return;
}
WRITE_ONCE(rnp->exp_kworker, kworker);
- sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
+
+ if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
+ sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
}

static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
@@ -4411,39 +4412,14 @@ static void __init rcu_start_exp_gp_kworker(void)
rcu_exp_gp_kworker = NULL;
return;
}
- sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
-}
-
-static inline void rcu_alloc_par_gp_wq(void)
-{
-}
-#else /* !CONFIG_RCU_EXP_KTHREAD */
-struct workqueue_struct *rcu_par_gp_wq;
-
-static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
-{
-}
-
-static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
-{
- return NULL;
-}
-
-static void __init rcu_start_exp_gp_kworker(void)
-{
-}

-static inline void rcu_alloc_par_gp_wq(void)
-{
- rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
- WARN_ON(!rcu_par_gp_wq);
+ if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
+ sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
}
-#endif /* CONFIG_RCU_EXP_KTHREAD */

static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp)
{
- if ((IS_ENABLED(CONFIG_RCU_EXP_KTHREAD) ||
- IS_ENABLED(CONFIG_RCU_BOOST)) && rcu_scheduler_fully_active) {
+ if (rcu_scheduler_fully_active) {
mutex_lock(&rnp->kthread_mutex);
rcu_spawn_one_boost_kthread(rnp);
rcu_spawn_exp_par_gp_kworker(rnp);
@@ -4527,9 +4503,6 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu)
struct rcu_node *rnp;
struct task_struct *task_boost, *task_exp;

- if (!IS_ENABLED(CONFIG_RCU_EXP_KTHREAD) && !IS_ENABLED(CONFIG_RCU_BOOST))
- return;
-
rdp = per_cpu_ptr(&rcu_data, cpu);
rnp = rdp->mynode;

@@ -5209,7 +5182,6 @@ void __init rcu_init(void)
/* Create workqueue for Tree SRCU and for expedited GPs. */
rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
WARN_ON(!rcu_gp_wq);
- rcu_alloc_par_gp_wq();

/* Fill in default value for rcutree.qovld boot parameter. */
/* -After- the rcu_node ->lock fields are initialized! */
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index ef3d3385063f..35f7af331e6c 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -24,11 +24,7 @@
/* Communicate arguments to a workqueue handler. */
struct rcu_exp_work {
unsigned long rew_s;
-#ifdef CONFIG_RCU_EXP_KTHREAD
struct kthread_work rew_work;
-#else
- struct work_struct rew_work;
-#endif /* CONFIG_RCU_EXP_KTHREAD */
};

/* RCU's kthread states for tracing. */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 744d6acf5553..dd33948ab80f 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -420,7 +420,6 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)

static void rcu_exp_sel_wait_wake(unsigned long s);

-#ifdef CONFIG_RCU_EXP_KTHREAD
static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
{
struct rcu_exp_work *rewp =
@@ -472,69 +471,6 @@ static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew
kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
}

-static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
-{
-}
-#else /* !CONFIG_RCU_EXP_KTHREAD */
-static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
-{
- struct rcu_exp_work *rewp =
- container_of(wp, struct rcu_exp_work, rew_work);
-
- __sync_rcu_exp_select_node_cpus(rewp);
-}
-
-static inline bool rcu_exp_worker_started(void)
-{
- return !!READ_ONCE(rcu_gp_wq);
-}
-
-static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp)
-{
- return !!READ_ONCE(rcu_par_gp_wq);
-}
-
-static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
-{
- int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
-
- INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
- /* If all offline, queue the work on an unbound CPU. */
- if (unlikely(cpu > rnp->grphi - rnp->grplo))
- cpu = WORK_CPU_UNBOUND;
- else
- cpu += rnp->grplo;
- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
-}
-
-static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
-{
- flush_work(&rnp->rew.rew_work);
-}
-
-/*
- * Work-queue handler to drive an expedited grace period forward.
- */
-static void wait_rcu_exp_gp(struct work_struct *wp)
-{
- struct rcu_exp_work *rewp;
-
- rewp = container_of(wp, struct rcu_exp_work, rew_work);
- rcu_exp_sel_wait_wake(rewp->rew_s);
-}
-
-static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
-{
- INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp);
- queue_work(rcu_gp_wq, &rew->rew_work);
-}
-
-static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
-{
- destroy_work_on_stack(&rew->rew_work);
-}
-#endif /* CONFIG_RCU_EXP_KTHREAD */
-
/*
* Select the nodes that the upcoming expedited grace period needs
* to wait for.
@@ -978,8 +914,7 @@ void synchronize_rcu_expedited(void)
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu_expedited() in RCU read-side critical section");

- can_queue = (rcu_scheduler_active != RCU_SCHEDULER_INIT) &&
- rcu_exp_worker_started();
+ can_queue = (rcu_scheduler_active != RCU_SCHEDULER_INIT) && rcu_exp_worker_started();

/* Is the state is such that the call is a grace period? */
if (rcu_blocking_is_gp()) {
@@ -1027,9 +962,6 @@ void synchronize_rcu_expedited(void)

/* Let the next expedited grace period start. */
mutex_unlock(&rcu_state.exp_mutex);
-
- if (likely(can_queue))
- synchronize_rcu_expedited_destroy_work(&rew);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);

--
2.42.1

2023-12-08 22:06:59

by Frederic Weisbecker

[permalink] [raw]
Subject: [PATCH 7/8] rcu/exp: Handle parallel exp gp kworkers affinity

Affine the parallel expedited gp kworkers to their respective RCU node
in order to make them close to the cache their are playing with.

This reuses the boost kthreads machinery that probe into CPU hotplug
operations such that the kthreads become/stay affine to their respective
node as soon/long as they contain online CPUs. Otherwise and if the
current CPU going down was the last online on the leaf node, the related
kthread is affine to the housekeeping CPUs.

In the long run, this affinity VS CPU hotplug operation game should
probably be implemented at the generic kthread level.

Signed-off-by: Frederic Weisbecker <[email protected]>
---
kernel/rcu/tree.c | 79 +++++++++++++++++++++++++++++++++++++---
kernel/rcu/tree_plugin.h | 42 ++-------------------
2 files changed, 78 insertions(+), 43 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 060d418c2b44..e75ddf42e9b1 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -145,7 +145,7 @@ static int rcu_scheduler_fully_active __read_mostly;

static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
unsigned long gps, unsigned long flags);
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
+static struct task_struct *rcu_boost_task(struct rcu_node *rnp);
static void invoke_rcu_core(void);
static void rcu_report_exp_rdp(struct rcu_data *rdp);
static void sync_sched_exp_online_cleanup(int cpu);
@@ -4390,6 +4390,16 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
}

+static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
+{
+ struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker);
+
+ if (!kworker)
+ return NULL;
+
+ return kworker->task;
+}
+
static void __init rcu_start_exp_gp_kworker(void)
{
const char *name = "rcu_exp_gp_kthread_worker";
@@ -4414,6 +4424,11 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
{
}

+static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
+{
+ return NULL;
+}
+
static void __init rcu_start_exp_gp_kworker(void)
{
}
@@ -4492,13 +4507,67 @@ int rcutree_prepare_cpu(unsigned int cpu)
}

/*
- * Update RCU priority boot kthread affinity for CPU-hotplug changes.
+ * Update kthreads affinity during CPU-hotplug changes.
+ *
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question. The CPU hotplug lock is still
+ * held, so the value of rnp->qsmaskinit will be stable.
+ *
+ * We don't include outgoingcpu in the affinity set, use -1 if there is
+ * no outgoing CPU. If there are no CPUs left in the affinity set,
+ * this function allows the kthread to execute on any CPU.
+ *
+ * Any future concurrent calls are serialized via ->kthread_mutex.
*/
-static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
+static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu)
{
- struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ cpumask_var_t cm;
+ unsigned long mask;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+ struct task_struct *task_boost, *task_exp;

- rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
+ if (!IS_ENABLED(CONFIG_RCU_EXP_KTHREAD) && !IS_ENABLED(CONFIG_RCU_BOOST))
+ return;
+
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ rnp = rdp->mynode;
+
+ task_boost = rcu_boost_task(rnp);
+ task_exp = rcu_exp_par_gp_task(rnp);
+
+ /*
+ * If CPU is the boot one, those tasks are created later from early
+ * initcall since kthreadd must be created first.
+ */
+ if (!task_boost && !task_exp)
+ return;
+
+ if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
+ return;
+
+ mutex_lock(&rnp->kthread_mutex);
+ mask = rcu_rnp_online_cpus(rnp);
+ for_each_leaf_node_possible_cpu(rnp, cpu)
+ if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
+ cpu != outgoingcpu)
+ cpumask_set_cpu(cpu, cm);
+ cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
+ if (cpumask_empty(cm)) {
+ cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
+ if (outgoingcpu >= 0)
+ cpumask_clear_cpu(outgoingcpu, cm);
+ }
+
+ if (task_exp)
+ set_cpus_allowed_ptr(task_exp, cm);
+
+ if (task_boost)
+ set_cpus_allowed_ptr(task_boost, cm);
+
+ mutex_unlock(&rnp->kthread_mutex);
+
+ free_cpumask_var(cm);
}

/*
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 09bdd36ca9ff..08246cca663f 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1211,43 +1211,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
}

-/*
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
- * served by the rcu_node in question. The CPU hotplug lock is still
- * held, so the value of rnp->qsmaskinit will be stable.
- *
- * We don't include outgoingcpu in the affinity set, use -1 if there is
- * no outgoing CPU. If there are no CPUs left in the affinity set,
- * this function allows the kthread to execute on any CPU.
- *
- * Any future concurrent calls are serialized via ->kthread_mutex.
- */
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+static struct task_struct *rcu_boost_task(struct rcu_node *rnp)
{
- struct task_struct *t = rnp->boost_kthread_task;
- unsigned long mask;
- cpumask_var_t cm;
- int cpu;
-
- if (!t)
- return;
- if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
- return;
- mutex_lock(&rnp->kthread_mutex);
- mask = rcu_rnp_online_cpus(rnp);
- for_each_leaf_node_possible_cpu(rnp, cpu)
- if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
- cpu != outgoingcpu)
- cpumask_set_cpu(cpu, cm);
- cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
- if (cpumask_empty(cm)) {
- cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
- if (outgoingcpu >= 0)
- cpumask_clear_cpu(outgoingcpu, cm);
- }
- set_cpus_allowed_ptr(t, cm);
- mutex_unlock(&rnp->kthread_mutex);
- free_cpumask_var(cm);
+ return READ_ONCE(rnp->boost_kthread_task);
}

#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1266,10 +1232,10 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
{
}

-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+static struct task_struct * rcu_boost_task(struct rcu_node *rnp)
{
+ return NULL;
}
-
#endif /* #else #ifdef CONFIG_RCU_BOOST */

/*
--
2.42.1

2023-12-08 22:07:06

by Frederic Weisbecker

[permalink] [raw]
Subject: [PATCH 5/8] rcu: s/boost_kthread_mutex/kthread_mutex

This mutex currently protecting per node boost kthreads creation and
affinity setting across CPU hotplug operations will soon be used also
for expedited kworkers.

Generalize its name to prepare for that.

Signed-off-by: Frederic Weisbecker <[email protected]>
---
kernel/rcu/tree.c | 2 +-
kernel/rcu/tree.h | 2 +-
kernel/rcu/tree_plugin.h | 10 +++++-----
3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 39679cf66c3a..996efaded5bf 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -4892,7 +4892,7 @@ static void __init rcu_init_one(void)
init_waitqueue_head(&rnp->exp_wq[2]);
init_waitqueue_head(&rnp->exp_wq[3]);
spin_lock_init(&rnp->exp_lock);
- mutex_init(&rnp->boost_kthread_mutex);
+ mutex_init(&rnp->kthread_mutex);
raw_spin_lock_init(&rnp->exp_poll_lock);
rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index e0e70b663cbf..f98a245e5f32 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -113,7 +113,7 @@ struct rcu_node {
/* side effect, not as a lock. */
unsigned long boost_time;
/* When to start boosting (jiffies). */
- struct mutex boost_kthread_mutex;
+ struct mutex kthread_mutex;
/* Exclusion for thread spawning and affinity */
/* manipulation. */
struct task_struct *boost_kthread_task;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 41021080ad25..0d307674915c 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1195,7 +1195,7 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
struct sched_param sp;
struct task_struct *t;

- mutex_lock(&rnp->boost_kthread_mutex);
+ mutex_lock(&rnp->kthread_mutex);
if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
goto out;

@@ -1212,7 +1212,7 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */

out:
- mutex_unlock(&rnp->boost_kthread_mutex);
+ mutex_unlock(&rnp->kthread_mutex);
}

/*
@@ -1224,7 +1224,7 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
* no outgoing CPU. If there are no CPUs left in the affinity set,
* this function allows the kthread to execute on any CPU.
*
- * Any future concurrent calls are serialized via ->boost_kthread_mutex.
+ * Any future concurrent calls are serialized via ->kthread_mutex.
*/
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
{
@@ -1237,7 +1237,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
return;
if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
return;
- mutex_lock(&rnp->boost_kthread_mutex);
+ mutex_lock(&rnp->kthread_mutex);
mask = rcu_rnp_online_cpus(rnp);
for_each_leaf_node_possible_cpu(rnp, cpu)
if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
@@ -1250,7 +1250,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
cpumask_clear_cpu(outgoingcpu, cm);
}
set_cpus_allowed_ptr(t, cm);
- mutex_unlock(&rnp->boost_kthread_mutex);
+ mutex_unlock(&rnp->kthread_mutex);
free_cpumask_var(cm);
}

--
2.42.1

2023-12-11 16:39:15

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH 0/8] rcu: Fix expedited GP deadlock (and cleanup some nocb stuff)

On Fri, Dec 08, 2023 at 11:05:37PM +0100, Frederic Weisbecker wrote:
> TREE04 can trigger a writer stall if run with memory pressure. This
> is due to a circular dependency between waiting for expedited grace
> period and polling on expedited grace period when workqueues go back
> to mayday serialization.
>
> Here is a proposal fix.

The torture.sh "acceptance test" with KCSAN and --duration 30 ran
fine except for this in TREE09:

kernel/rcu/tree_nocb.h:1785:13: error: unused function '__call_rcu_nocb_wake' [-Werror,-Wunused-function]

My guess is that the declaration of __call_rcu_nocb_wake() in
kernel/rcu/tree.h needs an "#ifdef CONFIG_SMP", but you might have a
better fix.

Thanx, Paul

> Frederic Weisbecker (8):
> rcu/nocb: Make IRQs disablement symetric
> rcu/nocb: Re-arrange call_rcu() NOCB specific code
> rcu/exp: Fix RCU expedited parallel grace period kworker allocation
> failure recovery
> rcu/exp: Handle RCU expedited grace period kworker allocation failure
> rcu: s/boost_kthread_mutex/kthread_mutex
> rcu/exp: Make parallel exp gp kworker per rcu node
> rcu/exp: Handle parallel exp gp kworkers affinity
> rcu/exp: Remove rcu_par_gp_wq
>
> kernel/rcu/rcu.h | 5 -
> kernel/rcu/tree.c | 222 +++++++++++++++++++++++++--------------
> kernel/rcu/tree.h | 12 +--
> kernel/rcu/tree_exp.h | 81 +++-----------
> kernel/rcu/tree_nocb.h | 38 ++++---
> kernel/rcu/tree_plugin.h | 52 ++-------
> 6 files changed, 191 insertions(+), 219 deletions(-)
>
> --
> 2.42.1
>

2023-12-11 20:04:17

by Frederic Weisbecker

[permalink] [raw]
Subject: Re: [PATCH 0/8] rcu: Fix expedited GP deadlock (and cleanup some nocb stuff)

Le Mon, Dec 11, 2023 at 08:38:59AM -0800, Paul E. McKenney a ?crit :
> On Fri, Dec 08, 2023 at 11:05:37PM +0100, Frederic Weisbecker wrote:
> > TREE04 can trigger a writer stall if run with memory pressure. This
> > is due to a circular dependency between waiting for expedited grace
> > period and polling on expedited grace period when workqueues go back
> > to mayday serialization.
> >
> > Here is a proposal fix.
>
> The torture.sh "acceptance test" with KCSAN and --duration 30 ran
> fine except for this in TREE09:
>
> kernel/rcu/tree_nocb.h:1785:13: error: unused function '__call_rcu_nocb_wake' [-Werror,-Wunused-function]
>
> My guess is that the declaration of __call_rcu_nocb_wake() in
> kernel/rcu/tree.h needs an "#ifdef CONFIG_SMP", but you might have a
> better fix.

Could be because if CONFIG_RCU_NO_CB_CPU=n, the function is only called
(though as dead code) from rcutree_migrate_callbacks() which in turn only
exists if CONFIG_HOTPLUG_CPU=y.

Something like that then:

diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 35f7af331e6c..e1ff53d5084c 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -445,6 +445,8 @@ static void rcu_qs(void);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
+static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
+ unsigned long flags);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static int rcu_print_task_exp_stall(struct rcu_node *rnp);
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
@@ -466,8 +468,6 @@ static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
unsigned long j, bool lazy);
static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
rcu_callback_t func, unsigned long flags, bool lazy);
-static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
- unsigned long flags);
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);

2023-12-11 21:40:06

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH 0/8] rcu: Fix expedited GP deadlock (and cleanup some nocb stuff)

On Mon, Dec 11, 2023 at 09:04:04PM +0100, Frederic Weisbecker wrote:
> Le Mon, Dec 11, 2023 at 08:38:59AM -0800, Paul E. McKenney a écrit :
> > On Fri, Dec 08, 2023 at 11:05:37PM +0100, Frederic Weisbecker wrote:
> > > TREE04 can trigger a writer stall if run with memory pressure. This
> > > is due to a circular dependency between waiting for expedited grace
> > > period and polling on expedited grace period when workqueues go back
> > > to mayday serialization.
> > >
> > > Here is a proposal fix.
> >
> > The torture.sh "acceptance test" with KCSAN and --duration 30 ran
> > fine except for this in TREE09:
> >
> > kernel/rcu/tree_nocb.h:1785:13: error: unused function '__call_rcu_nocb_wake' [-Werror,-Wunused-function]
> >
> > My guess is that the declaration of __call_rcu_nocb_wake() in
> > kernel/rcu/tree.h needs an "#ifdef CONFIG_SMP", but you might have a
> > better fix.
>
> Could be because if CONFIG_RCU_NO_CB_CPU=n, the function is only called
> (though as dead code) from rcutree_migrate_callbacks() which in turn only
> exists if CONFIG_HOTPLUG_CPU=y.
>
> Something like that then:
>
> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
> index 35f7af331e6c..e1ff53d5084c 100644
> --- a/kernel/rcu/tree.h
> +++ b/kernel/rcu/tree.h
> @@ -445,6 +445,8 @@ static void rcu_qs(void);
> static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
> #ifdef CONFIG_HOTPLUG_CPU
> static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
> +static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
> + unsigned long flags);
> #endif /* #ifdef CONFIG_HOTPLUG_CPU */
> static int rcu_print_task_exp_stall(struct rcu_node *rnp);
> static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
> @@ -466,8 +468,6 @@ static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
> unsigned long j, bool lazy);
> static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
> rcu_callback_t func, unsigned long flags, bool lazy);
> -static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
> - unsigned long flags);
> static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
> static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
> static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);

This one passes TREE01 and TINY01, but on TREE09 still gets this:

kernel/rcu/tree_nocb.h:1785:13: error: ‘__call_rcu_nocb_wake’ defined but not used [-Werror=unused-function]

Huh. I suppose that there is always __maybe_unused?

Thanx, Paul

2023-12-12 13:34:25

by Frederic Weisbecker

[permalink] [raw]
Subject: Re: [PATCH 0/8] rcu: Fix expedited GP deadlock (and cleanup some nocb stuff)

On Mon, Dec 11, 2023 at 01:39:40PM -0800, Paul E. McKenney wrote:
> This one passes TREE01 and TINY01, but on TREE09 still gets this:
>
> kernel/rcu/tree_nocb.h:1785:13: error: ‘__call_rcu_nocb_wake’ defined but not used [-Werror=unused-function]
>
> Huh. I suppose that there is always __maybe_unused?

Looks like a good fit indeed!

Thanks!

> Thanx, Paul

2023-12-12 16:04:44

by Kalesh Singh

[permalink] [raw]
Subject: Re: [PATCH 3/8] rcu/exp: Fix RCU expedited parallel grace period kworker allocation failure recovery

On Fri, Dec 8, 2023 at 5:06 PM Frederic Weisbecker <[email protected]> wrote:
>
> Under CONFIG_RCU_EXP_KTHREAD=y, the nodes initialization for expedited
> grace periods is queued to a kworker. However if the allocation of that
> kworker failed, the nodes initialization is performed synchronously by
> the caller instead.
>
> Now the check for kworker initialization failure relies on the kworker
> pointer to be NULL while its value might actually encapsulate an
> allocation failure error.
>
> Make sure to handle this case.
>
> Fixes: 9621fbee44df ("rcu: Move expedited grace period (GP) work to RT kthread_worker")
> Cc: Kalesh Singh <[email protected]>
> Signed-off-by: Frederic Weisbecker <[email protected]>
> ---
> kernel/rcu/tree.c | 1 +
> 1 file changed, 1 insertion(+)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 82f8130d3fe3..055f4817bc70 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -4723,6 +4723,7 @@ static void __init rcu_start_exp_gp_kworkers(void)
> rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
> if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
> pr_err("Failed to create %s!\n", par_gp_kworker_name);
> + rcu_exp_par_gp_kworker = NULL;
> kthread_destroy_worker(rcu_exp_gp_kworker);

Hi Frederic,

Thanks for catching this. I think we need to remove the
kthread_destroy_worker() in this case too.

Otherwise,

Reviewed-by: Kalesh Singh <[email protected]>

--Kalesh

> return;
> }
> --
> 2.42.1
>

2023-12-12 16:11:40

by Kalesh Singh

[permalink] [raw]
Subject: Re: [PATCH 3/8] rcu/exp: Fix RCU expedited parallel grace period kworker allocation failure recovery

On Tue, Dec 12, 2023 at 11:04 AM Kalesh Singh <[email protected]> wrote:
>
> On Fri, Dec 8, 2023 at 5:06 PM Frederic Weisbecker <[email protected]> wrote:
> >
> > Under CONFIG_RCU_EXP_KTHREAD=y, the nodes initialization for expedited
> > grace periods is queued to a kworker. However if the allocation of that
> > kworker failed, the nodes initialization is performed synchronously by
> > the caller instead.
> >
> > Now the check for kworker initialization failure relies on the kworker
> > pointer to be NULL while its value might actually encapsulate an
> > allocation failure error.
> >
> > Make sure to handle this case.
> >
> > Fixes: 9621fbee44df ("rcu: Move expedited grace period (GP) work to RT kthread_worker")
> > Cc: Kalesh Singh <[email protected]>
> > Signed-off-by: Frederic Weisbecker <[email protected]>
> > ---
> > kernel/rcu/tree.c | 1 +
> > 1 file changed, 1 insertion(+)
> >
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 82f8130d3fe3..055f4817bc70 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -4723,6 +4723,7 @@ static void __init rcu_start_exp_gp_kworkers(void)
> > rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
> > if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
> > pr_err("Failed to create %s!\n", par_gp_kworker_name);
> > + rcu_exp_par_gp_kworker = NULL;
> > kthread_destroy_worker(rcu_exp_gp_kworker);
>
> Hi Frederic,
>
> Thanks for catching this. I think we need to remove the
> kthread_destroy_worker() in this case too.

Ahh sorry, that's the other kworker. LGTM. Thanks.

>
> Otherwise,
>
> Reviewed-by: Kalesh Singh <[email protected]>
>
> --Kalesh
>
> > return;
> > }
> > --
> > 2.42.1
> >

2023-12-12 16:15:03

by Kalesh Singh

[permalink] [raw]
Subject: Re: [PATCH 4/8] rcu/exp: Handle RCU expedited grace period kworker allocation failure

On Fri, Dec 8, 2023 at 5:06 PM Frederic Weisbecker <[email protected]> wrote:
>
> Just like is done for the kworker performing nodes initialization,
> gracefully handle the possible allocation failure of the RCU expedited
> grace period main kworker.
>
> While at it perform a rename of the related checking functions to better
> reflect the expedited specifics.
>
> Fixes: 9621fbee44df ("rcu: Move expedited grace period (GP) work to RT kthread_worker")
> Cc: Kalesh Singh <[email protected]>
> Signed-off-by: Frederic Weisbecker <[email protected]>

Reviewed-by: Kalesh Singh <[email protected]>

Thanks,
Kalesh

> ---
> kernel/rcu/tree.c | 2 ++
> kernel/rcu/tree_exp.h | 25 +++++++++++++++++++------
> 2 files changed, 21 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 055f4817bc70..39679cf66c3a 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -4717,6 +4717,7 @@ static void __init rcu_start_exp_gp_kworkers(void)
> rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
> if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
> pr_err("Failed to create %s!\n", gp_kworker_name);
> + rcu_exp_gp_kworker = NULL;
> return;
> }
>
> @@ -4725,6 +4726,7 @@ static void __init rcu_start_exp_gp_kworkers(void)
> pr_err("Failed to create %s!\n", par_gp_kworker_name);
> rcu_exp_par_gp_kworker = NULL;
> kthread_destroy_worker(rcu_exp_gp_kworker);
> + rcu_exp_gp_kworker = NULL;
> return;
> }
>
> diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
> index 6d7cea5d591f..cb31f4fb4b36 100644
> --- a/kernel/rcu/tree_exp.h
> +++ b/kernel/rcu/tree_exp.h
> @@ -429,7 +429,12 @@ static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
> __sync_rcu_exp_select_node_cpus(rewp);
> }
>
> -static inline bool rcu_gp_par_worker_started(void)
> +static inline bool rcu_exp_worker_started(void)
> +{
> + return !!READ_ONCE(rcu_exp_gp_kworker);
> +}
> +
> +static inline bool rcu_exp_par_worker_started(void)
> {
> return !!READ_ONCE(rcu_exp_par_gp_kworker);
> }
> @@ -479,7 +484,12 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
> __sync_rcu_exp_select_node_cpus(rewp);
> }
>
> -static inline bool rcu_gp_par_worker_started(void)
> +static inline bool rcu_exp_worker_started(void)
> +{
> + return !!READ_ONCE(rcu_gp_wq);
> +}
> +
> +static inline bool rcu_exp_par_worker_started(void)
> {
> return !!READ_ONCE(rcu_par_gp_wq);
> }
> @@ -542,7 +552,7 @@ static void sync_rcu_exp_select_cpus(void)
> rnp->exp_need_flush = false;
> if (!READ_ONCE(rnp->expmask))
> continue; /* Avoid early boot non-existent wq. */
> - if (!rcu_gp_par_worker_started() ||
> + if (!rcu_exp_par_worker_started() ||
> rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
> rcu_is_last_leaf_node(rnp)) {
> /* No worker started yet or last leaf, do direct call. */
> @@ -957,7 +967,7 @@ static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
> */
> void synchronize_rcu_expedited(void)
> {
> - bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
> + bool can_queue;
> unsigned long flags;
> struct rcu_exp_work rew;
> struct rcu_node *rnp;
> @@ -968,6 +978,9 @@ void synchronize_rcu_expedited(void)
> lock_is_held(&rcu_sched_lock_map),
> "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
>
> + can_queue = (rcu_scheduler_active != RCU_SCHEDULER_INIT) &&
> + rcu_exp_worker_started();
> +
> /* Is the state is such that the call is a grace period? */
> if (rcu_blocking_is_gp()) {
> // Note well that this code runs with !PREEMPT && !SMP.
> @@ -997,7 +1010,7 @@ void synchronize_rcu_expedited(void)
> return; /* Someone else did our work for us. */
>
> /* Ensure that load happens before action based on it. */
> - if (unlikely(boottime)) {
> + if (unlikely(!can_queue)) {
> /* Direct call during scheduler init and early_initcalls(). */
> rcu_exp_sel_wait_wake(s);
> } else {
> @@ -1015,7 +1028,7 @@ void synchronize_rcu_expedited(void)
> /* Let the next expedited grace period start. */
> mutex_unlock(&rcu_state.exp_mutex);
>
> - if (likely(!boottime))
> + if (likely(can_queue))
> synchronize_rcu_expedited_destroy_work(&rew);
> }
> EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
> --
> 2.42.1
>

2023-12-13 13:01:57

by Neeraj Upadhyay (AMD)

[permalink] [raw]
Subject: Re: [PATCH 1/8] rcu/nocb: Make IRQs disablement symetric

On Sat, Dec 9, 2023 at 3:35 AM Frederic Weisbecker <[email protected]> wrote:
>
> Currently IRQs are disabled on call_rcu() and then depending on the
> context:
>
> * If the CPU is in nocb mode:
>
> - If the callback is enqueued in the bypass list, IRQs are re-enabled
> implictly by rcu_nocb_try_bypass()
>
> - If the callback is enqueued in the normal list, IRQs are re-enabled
> implicitly by __call_rcu_nocb_wake()
>
> * If the CPU is NOT in nocb mode, IRQs are reenabled explicitly from call_rcu()
>
> This makes the code a bit hard to follow, especially as it interleaves
> with nocb locking.
>
> To make the IRQ flags coverage clearer and also in order to prepare for
> moving all the nocb enqueue code to its own function, always re-enable
> the IRQ flags explicitly from call_rcu().
>
> Signed-off-by: Frederic Weisbecker <[email protected]>
> ---
> kernel/rcu/tree.c | 9 ++++++---
> kernel/rcu/tree_nocb.h | 20 +++++++++-----------
> 2 files changed, 15 insertions(+), 14 deletions(-)
>

Nit: s/symetric/symmetric/

Reviewed-by: Neeraj Upadhyay (AMD) <[email protected]>


Thanks
Neeraj

> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 52c2fdbc6363..74159c6d3bdf 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -2697,8 +2697,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
> }
>
> check_cb_ovld(rdp);
> - if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
> + if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
> + local_irq_restore(flags);
> return; // Enqueued onto ->nocb_bypass, so just leave.
> + }
> // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
> rcu_segcblist_enqueue(&rdp->cblist, head);
> if (__is_kvfree_rcu_offset((unsigned long)func))
> @@ -2716,8 +2718,8 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
> __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
> } else {
> __call_rcu_core(rdp, head, flags);
> - local_irq_restore(flags);
> }
> + local_irq_restore(flags);
> }
>
> #ifdef CONFIG_RCU_LAZY
> @@ -4615,8 +4617,9 @@ void rcutree_migrate_callbacks(int cpu)
> __call_rcu_nocb_wake(my_rdp, true, flags);
> } else {
> rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
> - raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
> + raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
> }
> + local_irq_restore(flags);
> if (needwake)
> rcu_gp_kthread_wake();
> lockdep_assert_irqs_enabled();
> diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
> index 5598212d1f27..3f70fd0a2db4 100644
> --- a/kernel/rcu/tree_nocb.h
> +++ b/kernel/rcu/tree_nocb.h
> @@ -532,9 +532,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
> // 2. Both of these conditions are met:
> // a. The bypass list previously had only lazy CBs, and:
> // b. The new CB is non-lazy.
> - if (ncbs && (!bypass_is_lazy || lazy)) {
> - local_irq_restore(flags);
> - } else {
> + if (!ncbs || (bypass_is_lazy && !lazy)) {
> // No-CBs GP kthread might be indefinitely asleep, if so, wake.
> rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
> if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
> @@ -544,7 +542,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
> } else {
> trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
> TPS("FirstBQnoWake"));
> - rcu_nocb_unlock_irqrestore(rdp, flags);
> + rcu_nocb_unlock(rdp);
> }
> }
> return true; // Callback already enqueued.
> @@ -570,7 +568,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
> // If we are being polled or there is no kthread, just leave.
> t = READ_ONCE(rdp->nocb_gp_kthread);
> if (rcu_nocb_poll || !t) {
> - rcu_nocb_unlock_irqrestore(rdp, flags);
> + rcu_nocb_unlock(rdp);
> trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
> TPS("WakeNotPoll"));
> return;
> @@ -583,17 +581,17 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
> rdp->qlen_last_fqs_check = len;
> // Only lazy CBs in bypass list
> if (lazy_len && bypass_len == lazy_len) {
> - rcu_nocb_unlock_irqrestore(rdp, flags);
> + rcu_nocb_unlock(rdp);
> wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
> TPS("WakeLazy"));
> } else if (!irqs_disabled_flags(flags)) {
> /* ... if queue was empty ... */
> - rcu_nocb_unlock_irqrestore(rdp, flags);
> + rcu_nocb_unlock(rdp);
> wake_nocb_gp(rdp, false);
> trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
> TPS("WakeEmpty"));
> } else {
> - rcu_nocb_unlock_irqrestore(rdp, flags);
> + rcu_nocb_unlock(rdp);
> wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
> TPS("WakeEmptyIsDeferred"));
> }
> @@ -611,15 +609,15 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
> if ((rdp->nocb_cb_sleep ||
> !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
> !timer_pending(&rdp->nocb_timer)) {
> - rcu_nocb_unlock_irqrestore(rdp, flags);
> + rcu_nocb_unlock(rdp);
> wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
> TPS("WakeOvfIsDeferred"));
> } else {
> - rcu_nocb_unlock_irqrestore(rdp, flags);
> + rcu_nocb_unlock(rdp);
> trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
> }
> } else {
> - rcu_nocb_unlock_irqrestore(rdp, flags);
> + rcu_nocb_unlock(rdp);
> trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
> }
> }
> --
> 2.42.1
>
>