Hi,
Some cleanups before converting (de-)offloading to only work on offline
CPUs.
Frederic Weisbecker (4):
rcu/nocb: Fix segcblist state machine comments about bypass
rcu/nocb: Fix segcblist state machine stale comments about timers
rcu/nocb: Use kthread parking instead of ad-hoc implementation
rcu/nocb: Remove buggy bypass lock contention mitigation
include/linux/rcu_segcblist.h | 88 +++++++++-----------
kernel/rcu/tree.h | 1 -
kernel/rcu/tree_nocb.h | 149 ++++++++++------------------------
3 files changed, 79 insertions(+), 159 deletions(-)
--
2.44.0
Upon NOCB deoffloading, the rcuo kthread must be forced to sleep
until the corresponding rdp is ever offloaded again. The deoffloader
clears the SEGCBLIST_OFFLOADED flag, wakes up the rcuo kthread which
then notices that change and clears in turn its SEGCBLIST_KTHREAD_CB
flag before going to sleep, until it ever sees the SEGCBLIST_OFFLOADED
flag again, should a re-offloading happen.
Upon NOCB offloading, the rcuo kthread must be forced to wake up and
handle callbacks until the corresponding rdp is ever deoffloaded again.
The offloader sets the SEGCBLIST_OFFLOADED flag, wakes up the rcuo
kthread which then notices that change and sets in turn its
SEGCBLIST_KTHREAD_CB flag before going to check callbacks, until it
ever sees the SEGCBLIST_OFFLOADED flag cleared again, should a
de-offloading happen again.
This is all a crude ad-hoc and error-prone kthread (un-)parking
re-implementation.
Consolidate the behaviour with the appropriate API instead.
Signed-off-by: Frederic Weisbecker <[email protected]>
---
include/linux/rcu_segcblist.h | 81 +++++++++++------------
kernel/rcu/tree_nocb.h | 117 ++++++++++------------------------
2 files changed, 71 insertions(+), 127 deletions(-)
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index 8018045989af..ba95c06675e1 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -84,31 +84,31 @@ struct rcu_cblist {
* ----------------------------------------------------------------------------
* |
* v
- * -----------------------------------
- * | |
- * v v
- * --------------------------------------- ----------------------------------|
- * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
- * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
- * | | | |
- * | | | |
- * | CB kthread woke up and | | GP kthread woke up and |
- * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
- * | Processes callbacks concurrently | | |
- * | with rcu_core(), holding | | |
- * | nocb_lock. | | |
- * --------------------------------------- -----------------------------------
- * | |
- * -----------------------------------
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
+ * | + unparked CB kthread |
+ * | |
+ * | CB kthread got unparked and processes callbacks concurrently with |
+ * | rcu_core(), holding nocb_lock. |
+ * ---------------------------------------------------------------------------
+ * |
+ * v
+ * ---------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
+ * | |
+ * | GP kthread woke up and acknowledged nocb_lock. |
+ * ---------------------------------------- -----------------------------------
* |
* v
* |--------------------------------------------------------------------------|
- * | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_GP | |
- * | SEGCBLIST_KTHREAD_CB |
+ * | + unparked CB kthread |
* | |
* | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
* | handling callbacks. Enable bypass queueing. |
@@ -124,8 +124,8 @@ struct rcu_cblist {
* |--------------------------------------------------------------------------|
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
* | ignores callbacks. Bypass enqueue is enabled. |
@@ -136,8 +136,8 @@ struct rcu_cblist {
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
* | handles callbacks concurrently. Bypass enqueue is disabled. |
@@ -149,40 +149,31 @@ struct rcu_cblist {
* |--------------------------------------------------------------------------|
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
- * | holding nocb_lock. Wake up CB and GP kthreads if necessary. |
+ * | holding nocb_lock. Wake up GP kthread if necessary. |
* ----------------------------------------------------------------------------
* |
* v
- * -----------------------------------
- * | |
- * v v
- * ---------------------------------------------------------------------------|
- * | | |
- * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
- * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
- * | | |
- * | GP kthread woke up and | CB kthread woke up and |
- * | acknowledged the fact that | acknowledged the fact that |
- * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
- * | | The CB kthread goes to sleep |
- * | The callbacks from the target CPU | until it ever gets re-offloaded. |
- * | will be ignored from the GP kthread | |
- * | loop. | |
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | + unparked CB kthread |
+ * | |
+ * | GP kthread woke up and acknowledged the fact that SEGCBLIST_OFFLOADED |
+ * | got cleared. The callbacks from the target CPU will be ignored from the|
+ * | GP kthread loop. |
* ----------------------------------------------------------------------------
- * | |
- * -----------------------------------
* |
* v
* ----------------------------------------------------------------------------
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
+ * | + parked CB kthread |
* | |
- * | Callbacks processed by rcu_core() from softirqs or local |
- * | rcuc kthread, while holding nocb_lock. |
+ * | CB kthread is parked. Callbacks processed by rcu_core() from softirqs or |
+ * | local rcuc kthread, while holding nocb_lock. |
* ----------------------------------------------------------------------------
* |
* v
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 3f85577bddd4..808c9a19fe1d 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -635,8 +635,7 @@ static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
}
}
-static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
- bool *wake_state)
+static int nocb_gp_toggle_rdp(struct rcu_data *rdp)
{
struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long flags;
@@ -650,8 +649,6 @@ static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
* We will handle this rdp until it ever gets de-offloaded.
*/
rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
- if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
- *wake_state = true;
ret = 1;
} else if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
@@ -660,8 +657,6 @@ static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
* We will ignore this rdp until it ever gets re-offloaded.
*/
rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
- if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
- *wake_state = true;
ret = 0;
} else {
WARN_ON_ONCE(1);
@@ -877,16 +872,15 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
}
if (rdp_toggling) {
- bool wake_state = false;
int ret;
- ret = nocb_gp_toggle_rdp(rdp_toggling, &wake_state);
+ ret = nocb_gp_toggle_rdp(rdp_toggling);
if (ret == 1)
list_add_tail(&rdp_toggling->nocb_entry_rdp, &my_rdp->nocb_head_rdp);
else if (ret == 0)
list_del(&rdp_toggling->nocb_entry_rdp);
- if (wake_state)
- swake_up_one(&rdp_toggling->nocb_state_wq);
+
+ swake_up_one(&rdp_toggling->nocb_state_wq);
}
my_rdp->nocb_gp_seq = -1;
@@ -913,16 +907,9 @@ static int rcu_nocb_gp_kthread(void *arg)
return 0;
}
-static inline bool nocb_cb_can_run(struct rcu_data *rdp)
-{
- u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
-
- return rcu_segcblist_test_flags(&rdp->cblist, flags);
-}
-
static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
{
- return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
+ return !READ_ONCE(rdp->nocb_cb_sleep) || kthread_should_park();
}
/*
@@ -934,21 +921,19 @@ static void nocb_cb_wait(struct rcu_data *rdp)
struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long cur_gp_seq;
unsigned long flags;
- bool needwake_state = false;
bool needwake_gp = false;
- bool can_sleep = true;
struct rcu_node *rnp = rdp->mynode;
- do {
- swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
- nocb_cb_wait_cond(rdp));
-
- if (READ_ONCE(rdp->nocb_cb_sleep)) {
- WARN_ON(signal_pending(current));
- trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
- }
- } while (!nocb_cb_can_run(rdp));
+ swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
+ nocb_cb_wait_cond(rdp));
+ if (kthread_should_park()) {
+ kthread_parkme();
+ } else if (READ_ONCE(rdp->nocb_cb_sleep)) {
+ WARN_ON(signal_pending(current));
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+ }
+ WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
local_irq_save(flags);
rcu_momentary_dyntick_idle();
@@ -971,37 +956,16 @@ static void nocb_cb_wait(struct rcu_data *rdp)
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
}
- if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
- if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
- rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
- if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
- needwake_state = true;
- }
- if (rcu_segcblist_ready_cbs(cblist))
- can_sleep = false;
- } else {
- /*
- * De-offloading. Clear our flag and notify the de-offload worker.
- * We won't touch the callbacks and keep sleeping until we ever
- * get re-offloaded.
- */
- WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
- rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
- if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
- needwake_state = true;
- }
-
- WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
-
- if (rdp->nocb_cb_sleep)
+ if (!rcu_segcblist_ready_cbs(cblist)) {
+ WRITE_ONCE(rdp->nocb_cb_sleep, true);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
+ } else {
+ WRITE_ONCE(rdp->nocb_cb_sleep, false);
+ }
rcu_nocb_unlock_irqrestore(rdp, flags);
if (needwake_gp)
rcu_gp_kthread_wake();
-
- if (needwake_state)
- swake_up_one(&rdp->nocb_state_wq);
}
/*
@@ -1094,17 +1058,8 @@ static int rdp_offload_toggle(struct rcu_data *rdp,
bool wake_gp = false;
rcu_segcblist_offload(cblist, offload);
-
- if (rdp->nocb_cb_sleep)
- rdp->nocb_cb_sleep = false;
rcu_nocb_unlock_irqrestore(rdp, flags);
- /*
- * Ignore former value of nocb_cb_sleep and force wake up as it could
- * have been spuriously set to false already.
- */
- swake_up_one(&rdp->nocb_cb_wq);
-
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
// Queue this rdp for add/del to/from the list to iterate on rcuog
WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp);
@@ -1161,19 +1116,11 @@ static long rcu_nocb_rdp_deoffload(void *arg)
if (wake_gp)
wake_up_process(rdp_gp->nocb_gp_kthread);
- /*
- * If rcuo[p] kthread spawn failed, directly remove SEGCBLIST_KTHREAD_CB.
- * Just wait SEGCBLIST_KTHREAD_GP to be cleared by rcuog.
- */
- if (!rdp->nocb_cb_kthread) {
- rcu_nocb_lock_irqsave(rdp, flags);
- rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
- rcu_nocb_unlock_irqrestore(rdp, flags);
- }
-
swait_event_exclusive(rdp->nocb_state_wq,
- !rcu_segcblist_test_flags(cblist,
- SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP));
+ !rcu_segcblist_test_flags(cblist,
+ SEGCBLIST_KTHREAD_GP));
+ if (rdp->nocb_cb_kthread)
+ kthread_park(rdp->nocb_cb_kthread);
} else {
/*
* No kthread to clear the flags for us or remove the rdp from the nocb list
@@ -1181,8 +1128,7 @@ static long rcu_nocb_rdp_deoffload(void *arg)
* but we stick to paranoia in this rare path.
*/
rcu_nocb_lock_irqsave(rdp, flags);
- rcu_segcblist_clear_flags(&rdp->cblist,
- SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
+ rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
rcu_nocb_unlock_irqrestore(rdp, flags);
list_del(&rdp->nocb_entry_rdp);
@@ -1282,8 +1228,10 @@ static long rcu_nocb_rdp_offload(void *arg)
wake_gp = rdp_offload_toggle(rdp, true, flags);
if (wake_gp)
wake_up_process(rdp_gp->nocb_gp_kthread);
+
+ kthread_unpark(rdp->nocb_cb_kthread);
+
swait_event_exclusive(rdp->nocb_state_wq,
- rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
/*
@@ -1468,7 +1416,7 @@ void __init rcu_init_nohz(void)
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
rcu_segcblist_offload(&rdp->cblist, true);
- rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
+ rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE);
}
rcu_organize_nocb_kthreads();
@@ -1526,11 +1474,16 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
/* Spawn the kthread for this CPU. */
- t = kthread_run(rcu_nocb_cb_kthread, rdp,
- "rcuo%c/%d", rcu_state.abbr, cpu);
+ t = kthread_create(rcu_nocb_cb_kthread, rdp,
+ "rcuo%c/%d", rcu_state.abbr, cpu);
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
goto end;
+ if (rcu_rdp_is_offloaded(rdp))
+ wake_up_process(t);
+ else
+ kthread_park(t);
+
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST) && kthread_prio)
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
--
2.44.0
The bypass lock contention mitigation assumes there can be at most
2 contenders on the bypass lock, following this scheme:
1) One kthread takes the bypass lock
2) Another one spins on it and increment the contended counter
3) A third one (a bypass enqueuer) sees the contended counter on and
busy loops waiting on it to decrement.
However this assumption is wrong. There can be only one CPU to find the
lock contended because call_rcu() (the bypass enqueuer) is the only
bypass lock acquire site that may not already hold the NOCB lock
beforehand, all the other sites must first contend on the NOCB lock.
Therefore step 2) is impossible.
The other problem is that the mitigation assumes that contenders all
belong to the same rdp CPU, which is also impossible for a raw spinlock.
In theory the warning could trigger if the enqueuer holds the bypass
lock and another CPU flushes the bypass queue concurrently but this is
prevented from all flush users:
1) NOCB kthreads only flush if they successfully _tried_ to lock the
bypass lock. So no contention management here.
2) Flush on callbacks migration happen remotely when the CPU is offline.
No concurrency against bypass enqueue.
3) Flush on deoffloading happen either locally with IRQs disabled or
remotely when the CPU is not yet online. No concurrency against
bypass enqueue.
4) Flush on barrier entrain happen either locally with IRQs disabled or
remotely when the CPU is offline. No concurrency against
bypass enqueue.
For those reasons, the bypass lock contention mitigation isn't needed
and is even wrong. Remove it but keep the warning reporting a contended
bypass lock on a remote CPU, to keep unexpected contention awareness.
Signed-off-by: Frederic Weisbecker <[email protected]>
---
kernel/rcu/tree.h | 1 -
kernel/rcu/tree_nocb.h | 32 ++++++--------------------------
2 files changed, 6 insertions(+), 27 deletions(-)
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index affcb92a358c..fcf2b4aa3441 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -223,7 +223,6 @@ struct rcu_data {
struct swait_queue_head nocb_state_wq; /* For offloading state changes */
struct task_struct *nocb_gp_kthread;
raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
- atomic_t nocb_lock_contended; /* Contention experienced. */
int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
struct timer_list nocb_timer; /* Enforce finite deferral. */
unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 808c9a19fe1d..3ce30841119a 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -91,8 +91,7 @@ module_param(nocb_nobypass_lim_per_jiffy, int, 0);
/*
* Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
- * lock isn't immediately available, increment ->nocb_lock_contended to
- * flag the contention.
+ * lock isn't immediately available, perform minimal sanity check.
*/
static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
__acquires(&rdp->nocb_bypass_lock)
@@ -100,29 +99,12 @@ static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
lockdep_assert_irqs_disabled();
if (raw_spin_trylock(&rdp->nocb_bypass_lock))
return;
- atomic_inc(&rdp->nocb_lock_contended);
+ /*
+ * Contention expected only when local enqueue collide with
+ * remote flush from kthreads.
+ */
WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
- smp_mb__after_atomic(); /* atomic_inc() before lock. */
raw_spin_lock(&rdp->nocb_bypass_lock);
- smp_mb__before_atomic(); /* atomic_dec() after lock. */
- atomic_dec(&rdp->nocb_lock_contended);
-}
-
-/*
- * Spinwait until the specified rcu_data structure's ->nocb_lock is
- * not contended. Please note that this is extremely special-purpose,
- * relying on the fact that at most two kthreads and one CPU contend for
- * this lock, and also that the two kthreads are guaranteed to have frequent
- * grace-period-duration time intervals between successive acquisitions
- * of the lock. This allows us to use an extremely simple throttling
- * mechanism, and further to apply it only to the CPU doing floods of
- * call_rcu() invocations. Don't try this at home!
- */
-static void rcu_nocb_wait_contended(struct rcu_data *rdp)
-{
- WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
- while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
- cpu_relax();
}
/*
@@ -510,7 +492,6 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
}
// We need to use the bypass.
- rcu_nocb_wait_contended(rdp);
rcu_nocb_bypass_lock(rdp);
ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
@@ -1631,12 +1612,11 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
- pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
+ pr_info(" CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
rdp->cpu, rdp->nocb_gp_rdp->cpu,
nocb_next_rdp ? nocb_next_rdp->cpu : -1,
"kK"[!!rdp->nocb_cb_kthread],
"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
- "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
"sS"[!!rdp->nocb_cb_sleep],
".W"[swait_active(&rdp->nocb_cb_wq)],
--
2.44.0
The (de-)offloading process used to take care about the NOCB timer when
it depended on the per-rdp NOCB locking. However this isn't the case
anymore for a long while. It can now safely be armed and run during the
(de-)offloading process, which doesn't care about it anymore.
Update the comments accordingly.
Signed-off-by: Frederic Weisbecker <[email protected]>
---
include/linux/rcu_segcblist.h | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index 1e5b4ef167ca..8018045989af 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -80,8 +80,7 @@ struct rcu_cblist {
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
* | |
* | Callbacks processed by rcu_core() from softirqs or local |
- * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
- * | allowing nocb_timer to be armed. |
+ * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads. |
* ----------------------------------------------------------------------------
* |
* v
@@ -183,8 +182,7 @@ struct rcu_cblist {
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
* | |
* | Callbacks processed by rcu_core() from softirqs or local |
- * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
- * | Flush pending nocb_timer. |
+ * | rcuc kthread, while holding nocb_lock. |
* ----------------------------------------------------------------------------
* |
* v
--
2.44.0
On Thu, Apr 25, 2024 at 04:18:31PM +0200, Frederic Weisbecker wrote:
> Hi,
>
> Some cleanups before converting (de-)offloading to only work on offline
> CPUs.
Seeing no objections, I have queued this series for testing and
review.
Thanx, Paul
> Frederic Weisbecker (4):
> rcu/nocb: Fix segcblist state machine comments about bypass
> rcu/nocb: Fix segcblist state machine stale comments about timers
> rcu/nocb: Use kthread parking instead of ad-hoc implementation
> rcu/nocb: Remove buggy bypass lock contention mitigation
>
> include/linux/rcu_segcblist.h | 88 +++++++++-----------
> kernel/rcu/tree.h | 1 -
> kernel/rcu/tree_nocb.h | 149 ++++++++++------------------------
> 3 files changed, 79 insertions(+), 159 deletions(-)
>
> --
> 2.44.0
>