2018-08-29 22:55:33

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 0/24] Additional RCU-consolidation cleanups for v4.20/v5.0

Hello!

This series contains yet more cleanups for RCU flavor consolidation:

1. Inline the now-trivial increment_cpu_stall_ticks() into its sole
caller.

2. Pull the rcu_gp_kthread() function's force-quiescent-state loop
into a separate function

3. Consolidate RCU-bh update-side function definitions, putting
them all in one place at the end of rcupdate.h.

4. Consolidate RCU-sched update-side function definitions, putting
them all in one place at the end of rcupdate.h.

5. Add RCU-bh and RCU-sched support for extended readers, so that
rcutorture will mix and match all RCU read-side primitives.

6. Stop testing RCU-bh and RCU-sched.

7-8. Remove the "rcu_bh" and "sched" torture types.

9. Remove now-unused rcutorture APIs.

10-21. Clean up flavor-related definitions and comments.

22. Remove !PREEMPT code from rcu_note_voluntary_context_switch(),
since it now has meaning only in PREEMPT kernels.

23. Define rcu_all_qs() only in !PREEMPT builds, since that is
now the only place it is used.

24. Inline _rcu_barrier() into its sole remaining caller.

Thanx, Paul

------------------------------------------------------------------------

Documentation/admin-guide/kernel-parameters.txt | 6
include/linux/rculist.h | 32 -
include/linux/rcupdate.h | 100 ++-
include/linux/rcupdate_wait.h | 14
include/linux/rcutiny.h | 47 -
include/linux/rcutree.h | 28
include/trace/events/rcu.h | 20
kernel/rcu/Kconfig | 20
kernel/rcu/rcu.h | 28
kernel/rcu/rcuperf.c | 65 --
kernel/rcu/rcutorture.c | 125 ---
kernel/rcu/srcutree.c | 2
kernel/rcu/tiny.c | 10
kernel/rcu/tree.c | 414 +++----------
kernel/rcu/tree.h | 5
kernel/rcu/tree_exp.h | 22
kernel/rcu/tree_plugin.h | 83 +-
kernel/rcu/update.c | 58 -
tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot | 2
tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot | 2
tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot | 2
tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot | 2
tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot | 2
tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot | 2
24 files changed, 366 insertions(+), 725 deletions(-)



2018-08-29 22:55:41

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 23/24] rcu: Define rcu_all_qs() only in !PREEMPT builds

Now that rcu_all_qs() is used only in !PREEMPT builds, move it to
tree_plugin.h so that it is defined only in those builds. This in
turn means that rcu_momentary_dyntick_idle() is only used in !PREEMPT
builds, but it is simply marked __maybe_unused in order to keep it
near the rest of the dyntick-idle code.

Signed-off-by: Paul E. McKenney <[email protected]>
---
include/linux/rcutree.h | 2 ++
kernel/rcu/tree.c | 41 +---------------------------------------
kernel/rcu/tree_plugin.h | 39 ++++++++++++++++++++++++++++++++++++++
3 files changed, 42 insertions(+), 40 deletions(-)

diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index d09a9abe9440..7f83179177d1 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -66,7 +66,9 @@ void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
void rcu_end_inkernel_boot(void);
bool rcu_is_watching(void);
+#ifndef CONFIG_PREEMPT
void rcu_all_qs(void);
+#endif

/* RCUtree hotplug events */
int rcutree_prepare_cpu(unsigned int cpu);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e8fbb7ee76cc..5f2a12a65b42 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -356,7 +356,7 @@ bool rcu_eqs_special_set(int cpu)
*
* The caller must have disabled interrupts and must not be idle.
*/
-static void rcu_momentary_dyntick_idle(void)
+static void __maybe_unused rcu_momentary_dyntick_idle(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
int special;
@@ -381,45 +381,6 @@ static int rcu_is_cpu_rrupt_from_idle(void)
__this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
}

-/*
- * Register an urgently needed quiescent state. If there is an
- * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
- * dyntick-idle quiescent state visible to other CPUs, which will in
- * some cases serve for expedited as well as normal grace periods.
- * Either way, register a lightweight quiescent state.
- *
- * The barrier() calls are redundant in the common case when this is
- * called externally, but just in case this is called from within this
- * file.
- *
- */
-void rcu_all_qs(void)
-{
- unsigned long flags;
-
- if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
- return;
- preempt_disable();
- /* Load rcu_urgent_qs before other flags. */
- if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
- preempt_enable();
- return;
- }
- this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
- barrier(); /* Avoid RCU read-side critical sections leaking down. */
- if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
- local_irq_save(flags);
- rcu_momentary_dyntick_idle();
- local_irq_restore(flags);
- }
- if (unlikely(raw_cpu_read(rcu_data.cpu_no_qs.b.exp)))
- rcu_qs();
- this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
- barrier(); /* Avoid RCU read-side critical sections leaking up. */
- preempt_enable();
-}
-EXPORT_SYMBOL_GPL(rcu_all_qs);
-
#define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */
static long blimit = DEFAULT_RCU_BLIMIT;
#define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index cd4c1b979446..7add1c297500 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -947,6 +947,45 @@ static void rcu_qs(void)
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
}

+/*
+ * Register an urgently needed quiescent state. If there is an
+ * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
+ * dyntick-idle quiescent state visible to other CPUs, which will in
+ * some cases serve for expedited as well as normal grace periods.
+ * Either way, register a lightweight quiescent state.
+ *
+ * The barrier() calls are redundant in the common case when this is
+ * called externally, but just in case this is called from within this
+ * file.
+ *
+ */
+void rcu_all_qs(void)
+{
+ unsigned long flags;
+
+ if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
+ return;
+ preempt_disable();
+ /* Load rcu_urgent_qs before other flags. */
+ if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
+ preempt_enable();
+ return;
+ }
+ this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
+ barrier(); /* Avoid RCU read-side critical sections leaking down. */
+ if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
+ local_irq_save(flags);
+ rcu_momentary_dyntick_idle();
+ local_irq_restore(flags);
+ }
+ if (unlikely(raw_cpu_read(rcu_data.cpu_no_qs.b.exp)))
+ rcu_qs();
+ this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
+ barrier(); /* Avoid RCU read-side critical sections leaking up. */
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(rcu_all_qs);
+
/*
* Note a PREEMPT=n context switch. The caller must have disabled interrupts.
*/
--
2.17.1


2018-08-29 22:55:49

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 21/24] rcu: Clean up flavor-related definitions and comments in update.c

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/update.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index ee366faecea6..fa089ead4bd6 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -332,7 +332,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
int i;
int j;

- /* Initialize and register callbacks for each flavor specified. */
+ /* Initialize and register callbacks for each crcu_array element. */
for (i = 0; i < n; i++) {
if (checktiny &&
(crcu_array[i] == call_rcu ||
@@ -697,19 +697,19 @@ static int __noreturn rcu_tasks_kthread(void *arg)

/*
* Wait for all pre-existing t->on_rq and t->nvcsw
- * transitions to complete. Invoking synchronize_sched()
+ * transitions to complete. Invoking synchronize_rcu()
* suffices because all these transitions occur with
- * interrupts disabled. Without this synchronize_sched(),
+ * interrupts disabled. Without this synchronize_rcu(),
* a read-side critical section that started before the
* grace period might be incorrectly seen as having started
* after the grace period.
*
- * This synchronize_sched() also dispenses with the
+ * This synchronize_rcu() also dispenses with the
* need for a memory barrier on the first store to
* ->rcu_tasks_holdout, as it forces the store to happen
* after the beginning of the grace period.
*/
- synchronize_sched();
+ synchronize_rcu();

/*
* There were callbacks, so we need to wait for an
@@ -736,7 +736,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
* This does only part of the job, ensuring that all
* tasks that were previously exiting reach the point
* where they have disabled preemption, allowing the
- * later synchronize_sched() to finish the job.
+ * later synchronize_rcu() to finish the job.
*/
synchronize_srcu(&tasks_rcu_exit_srcu);

@@ -786,20 +786,20 @@ static int __noreturn rcu_tasks_kthread(void *arg)
* cause their RCU-tasks read-side critical sections to
* extend past the end of the grace period. However,
* because these ->nvcsw updates are carried out with
- * interrupts disabled, we can use synchronize_sched()
+ * interrupts disabled, we can use synchronize_rcu()
* to force the needed ordering on all such CPUs.
*
- * This synchronize_sched() also confines all
+ * This synchronize_rcu() also confines all
* ->rcu_tasks_holdout accesses to be within the grace
* period, avoiding the need for memory barriers for
* ->rcu_tasks_holdout accesses.
*
- * In addition, this synchronize_sched() waits for exiting
+ * In addition, this synchronize_rcu() waits for exiting
* tasks to complete their final preempt_disable() region
* of execution, cleaning up after the synchronize_srcu()
* above.
*/
- synchronize_sched();
+ synchronize_rcu();

/* Invoke the callbacks. */
while (list) {
--
2.17.1


2018-08-29 22:56:00

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 10/24] rcu: Clean up flavor-related definitions and comments in rcupdate.h

Signed-off-by: Paul E. McKenney <[email protected]>
---
include/linux/rcupdate.h | 27 ++++++++++++---------------
1 file changed, 12 insertions(+), 15 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 12103e1bbe67..d6d543b60a9f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -119,11 +119,10 @@ static inline void rcu_init_nohz(void) { }
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
* @a: Code that RCU needs to pay attention to.
*
- * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
- * in the inner idle loop, that is, between the rcu_idle_enter() and
- * the rcu_idle_exit() -- RCU will happily ignore any such read-side
- * critical sections. However, things like powertop need tracepoints
- * in the inner idle loop.
+ * RCU read-side critical sections are forbidden in the inner idle loop,
+ * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
+ * will happily ignore any such read-side critical sections. However,
+ * things like powertop need tracepoints in the inner idle loop.
*
* This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
* will tell RCU that it needs to pay attention, invoke its argument
@@ -163,7 +162,7 @@ void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
#define rcu_tasks_qs(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
-#define call_rcu_tasks call_rcu_sched
+#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_finish(void) { }
@@ -309,8 +308,8 @@ static inline void rcu_preempt_sleep_check(void) { }
* Helper functions for rcu_dereference_check(), rcu_dereference_protected()
* and rcu_assign_pointer(). Some of these could be folded into their
* callers, but they are left separate in order to ease introduction of
- * multiple flavors of pointers to match the multiple flavors of RCU
- * (e.g., __rcu_sched, and __srcu), should this make sense in the future.
+ * multiple pointers markings to match different RCU implementations
+ * (e.g., __srcu), should this make sense in the future.
*/

#ifdef __CHECKER__
@@ -670,9 +669,8 @@ static inline void rcu_read_unlock(void)
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
* This is equivalent of rcu_read_lock(), but also disables softirqs.
- * Note that synchronize_rcu() and friends may be used for the update
- * side, although synchronize_rcu_bh() is available as a wrapper in the
- * short term. Longer term, the _bh update-side API will be eliminated.
+ * Note that anything else that disables softirqs can also serve as
+ * an RCU read-side critical section.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
@@ -705,10 +703,9 @@ static inline void rcu_read_unlock_bh(void)
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_sched() or synchronize_rcu_sched().
- * Read-side critical sections can also be introduced by anything that
- * disables preemption, including local_irq_disable() and friends.
+ * This is equivalent of rcu_read_lock(), but disables preemption.
+ * Read-side critical sections can also be introduced by anything else
+ * that disables preemption, including local_irq_disable() and friends.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
--
2.17.1


2018-08-29 22:56:08

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 02/24] rcu: Pull rcu_gp_kthread() FQS loop into separate function

The rcu_gp_kthread() function is long and deeply indented, so this
commit pulls the loop that repeatedly invokes rcu_gp_fqs() into a new
rcu_gp_fqs_loop() function.

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/tree.c | 125 ++++++++++++++++++++++++----------------------
1 file changed, 66 insertions(+), 59 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 35a4346c7cce..bfb6b15bc27c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1975,6 +1975,71 @@ static void rcu_gp_fqs(bool first_time)
}
}

+/*
+ * Loop doing repeated quiescent-state forcing until the grace period ends.
+ */
+static void rcu_gp_fqs_loop(void)
+{
+ bool first_gp_fqs;
+ int gf;
+ unsigned long j;
+ int ret;
+ struct rcu_node *rnp = rcu_get_root();
+
+ first_gp_fqs = true;
+ j = jiffies_till_first_fqs;
+ ret = 0;
+ for (;;) {
+ if (!ret) {
+ rcu_state.jiffies_force_qs = jiffies + j;
+ WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
+ jiffies + 3 * j);
+ }
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
+ TPS("fqswait"));
+ rcu_state.gp_state = RCU_GP_WAIT_FQS;
+ ret = swait_event_idle_timeout_exclusive(
+ rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
+ rcu_state.gp_state = RCU_GP_DOING_FQS;
+ /* Locking provides needed memory barriers. */
+ /* If grace period done, leave loop. */
+ if (!READ_ONCE(rnp->qsmask) &&
+ !rcu_preempt_blocked_readers_cgp(rnp))
+ break;
+ /* If time for quiescent-state forcing, do it. */
+ if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) ||
+ (gf & RCU_GP_FLAG_FQS)) {
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
+ TPS("fqsstart"));
+ rcu_gp_fqs(first_gp_fqs);
+ first_gp_fqs = false;
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
+ TPS("fqsend"));
+ cond_resched_tasks_rcu_qs();
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ ret = 0; /* Force full wait till next FQS. */
+ j = jiffies_till_next_fqs;
+ } else {
+ /* Deal with stray signal. */
+ cond_resched_tasks_rcu_qs();
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ WARN_ON(signal_pending(current));
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
+ TPS("fqswaitsig"));
+ ret = 1; /* Keep old FQS timing. */
+ j = jiffies;
+ if (time_after(jiffies, rcu_state.jiffies_force_qs))
+ j = 1;
+ else
+ j = rcu_state.jiffies_force_qs - j;
+ }
+ }
+}
+
/*
* Clean up after the old grace period.
*/
@@ -2065,12 +2130,6 @@ static void rcu_gp_cleanup(void)
*/
static int __noreturn rcu_gp_kthread(void *unused)
{
- bool first_gp_fqs;
- int gf;
- unsigned long j;
- int ret;
- struct rcu_node *rnp = rcu_get_root();
-
rcu_bind_gp_kthread();
for (;;) {

@@ -2096,59 +2155,7 @@ static int __noreturn rcu_gp_kthread(void *unused)
}

/* Handle quiescent-state forcing. */
- first_gp_fqs = true;
- j = jiffies_till_first_fqs;
- ret = 0;
- for (;;) {
- if (!ret) {
- rcu_state.jiffies_force_qs = jiffies + j;
- WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
- jiffies + 3 * j);
- }
- trace_rcu_grace_period(rcu_state.name,
- READ_ONCE(rcu_state.gp_seq),
- TPS("fqswait"));
- rcu_state.gp_state = RCU_GP_WAIT_FQS;
- ret = swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
- rcu_gp_fqs_check_wake(&gf), j);
- rcu_state.gp_state = RCU_GP_DOING_FQS;
- /* Locking provides needed memory barriers. */
- /* If grace period done, leave loop. */
- if (!READ_ONCE(rnp->qsmask) &&
- !rcu_preempt_blocked_readers_cgp(rnp))
- break;
- /* If time for quiescent-state forcing, do it. */
- if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) ||
- (gf & RCU_GP_FLAG_FQS)) {
- trace_rcu_grace_period(rcu_state.name,
- READ_ONCE(rcu_state.gp_seq),
- TPS("fqsstart"));
- rcu_gp_fqs(first_gp_fqs);
- first_gp_fqs = false;
- trace_rcu_grace_period(rcu_state.name,
- READ_ONCE(rcu_state.gp_seq),
- TPS("fqsend"));
- cond_resched_tasks_rcu_qs();
- WRITE_ONCE(rcu_state.gp_activity, jiffies);
- ret = 0; /* Force full wait till next FQS. */
- j = jiffies_till_next_fqs;
- } else {
- /* Deal with stray signal. */
- cond_resched_tasks_rcu_qs();
- WRITE_ONCE(rcu_state.gp_activity, jiffies);
- WARN_ON(signal_pending(current));
- trace_rcu_grace_period(rcu_state.name,
- READ_ONCE(rcu_state.gp_seq),
- TPS("fqswaitsig"));
- ret = 1; /* Keep old FQS timing. */
- j = jiffies;
- if (time_after(jiffies,
- rcu_state.jiffies_force_qs))
- j = 1;
- else
- j = rcu_state.jiffies_force_qs - j;
- }
- }
+ rcu_gp_fqs_loop();

/* Handle grace-period end. */
rcu_state.gp_state = RCU_GP_CLEANUP;
--
2.17.1


2018-08-29 22:56:19

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 04/24] rcu: Consolidate RCU-sched update-side function definitions

This commit saves a few lines by consolidating the RCU-sched function
definitions at the end of include/linux/rcupdate.h. This consolidation
also makes it easier to remove them all when the time comes.

Signed-off-by: Paul E. McKenney <[email protected]>
---
include/linux/rcupdate.h | 38 +++++++++++++++++++++-----
include/linux/rcutiny.h | 32 +---------------------
include/linux/rcutree.h | 9 -------
kernel/rcu/tree.c | 58 ----------------------------------------
4 files changed, 32 insertions(+), 105 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index e530f5739033..12103e1bbe67 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -48,12 +48,6 @@
#define ulong2long(a) (*(long *)(&(a)))

/* Exported common interfaces */
-
-#ifndef CONFIG_TINY_RCU
-void synchronize_sched(void);
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
-#endif
-
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void synchronize_rcu(void);
@@ -170,7 +164,7 @@ void exit_tasks_rcu_finish(void);
#define rcu_tasks_qs(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#define call_rcu_tasks call_rcu_sched
-#define synchronize_rcu_tasks synchronize_sched
+#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
@@ -892,4 +886,34 @@ static inline void rcu_barrier_bh(void)
rcu_barrier();
}

+static inline void synchronize_sched(void)
+{
+ synchronize_rcu();
+}
+
+static inline void synchronize_sched_expedited(void)
+{
+ synchronize_rcu_expedited();
+}
+
+static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+
+static inline void rcu_barrier_sched(void)
+{
+ rcu_barrier();
+}
+
+static inline unsigned long get_state_synchronize_sched(void)
+{
+ return get_state_synchronize_rcu();
+}
+
+static inline void cond_synchronize_sched(unsigned long oldstate)
+{
+ cond_synchronize_rcu(oldstate);
+}
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index df82bada9b19..7fa4fb9e899e 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -36,11 +36,6 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
/* Never flag non-existent other CPUs! */
static inline bool rcu_eqs_special_set(int cpu) { return false; }

-static inline void synchronize_sched(void)
-{
- synchronize_rcu();
-}
-
static inline unsigned long get_state_synchronize_rcu(void)
{
return 0;
@@ -51,36 +46,11 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}

-static inline unsigned long get_state_synchronize_sched(void)
-{
- return 0;
-}
-
-static inline void cond_synchronize_sched(unsigned long oldstate)
-{
- might_sleep();
-}
-
extern void rcu_barrier(void);

-static inline void rcu_barrier_sched(void)
-{
- rcu_barrier(); /* Only one CPU, so only one list of callbacks! */
-}
-
static inline void synchronize_rcu_expedited(void)
{
- synchronize_sched();
-}
-
-static inline void synchronize_sched_expedited(void)
-{
- synchronize_sched();
-}
-
-static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
-{
- call_rcu(head, func);
+ synchronize_rcu();
}

static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 94820156aa62..d09a9abe9440 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -46,21 +46,12 @@ static inline void rcu_virt_note_context_switch(int cpu)
}

void synchronize_rcu_expedited(void);
-
-static inline void synchronize_sched_expedited(void)
-{
- synchronize_rcu_expedited();
-}
-
void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);

void rcu_barrier(void);
-void rcu_barrier_sched(void);
bool rcu_eqs_special_set(int cpu);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
-unsigned long get_state_synchronize_sched(void);
-void cond_synchronize_sched(unsigned long oldstate);

void rcu_idle_enter(void);
void rcu_idle_exit(void);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f1e8a4431ebd..b2ebdd01c8a2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2949,19 +2949,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func)
}
EXPORT_SYMBOL_GPL(call_rcu);

-/**
- * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * This is transitional.
- */
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
-{
- call_rcu(head, func);
-}
-EXPORT_SYMBOL_GPL(call_rcu_sched);
-
/*
* Queue an RCU callback for lazy invocation after a grace period.
* This will likely be later named something like "call_rcu_lazy()",
@@ -2975,17 +2962,6 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);

-/**
- * synchronize_sched - wait until an rcu-sched grace period has elapsed.
- *
- * This is transitional.
- */
-void synchronize_sched(void)
-{
- synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(synchronize_sched);
-
/**
* get_state_synchronize_rcu - Snapshot current RCU state
*
@@ -3027,29 +3003,6 @@ void cond_synchronize_rcu(unsigned long oldstate)
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu);

-/**
- * get_state_synchronize_sched - Snapshot current RCU-sched state
- *
- * This is transitional, and only used by rcutorture.
- */
-unsigned long get_state_synchronize_sched(void)
-{
- return get_state_synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
-
-/**
- * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
- * @oldstate: return value from earlier call to get_state_synchronize_sched()
- *
- * This is transitional and only used by rcutorture.
- */
-void cond_synchronize_sched(unsigned long oldstate)
-{
- cond_synchronize_rcu(oldstate);
-}
-EXPORT_SYMBOL_GPL(cond_synchronize_sched);
-
/*
* Check to see if there is any immediate RCU-related work to be done by
* the current CPU, for the specified type of RCU, returning 1 if so and
@@ -3265,17 +3218,6 @@ void rcu_barrier(void)
}
EXPORT_SYMBOL_GPL(rcu_barrier);

-/**
- * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
- *
- * This is transitional.
- */
-void rcu_barrier_sched(void)
-{
- rcu_barrier();
-}
-EXPORT_SYMBOL_GPL(rcu_barrier_sched);
-
/*
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
* first CPU in a given leaf rcu_node structure coming online. The caller
--
2.17.1


2018-08-29 22:56:43

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 07/24] rcutorture: Remove the "rcu_bh" and "sched" torture types

Now that the RCU-bh and RCU-sched update-side functions are simple
wrappers around their RCU counterparts, there isn't a whole lot of point
in testing them. This commit therefore removes the "rcu_bh" and "sched"
torture types from rcutorture.

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/rcutorture.c | 89 ++---------------------------------------
1 file changed, 3 insertions(+), 86 deletions(-)

diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 1bc0e37dffa8..a228ad762fba 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -128,7 +128,7 @@ torture_param(int, verbose, 1,

static char *torture_type = "rcu";
module_param(torture_type, charp, 0444);
-MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
+MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");

static int nrealreaders;
static int ncbflooders;
@@ -438,47 +438,6 @@ static struct rcu_torture_ops rcu_ops = {
.name = "rcu"
};

-/*
- * Definitions for rcu_bh torture testing.
- */
-
-static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
-{
- rcu_read_lock_bh();
- return 0;
-}
-
-static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
-{
- rcu_read_unlock_bh();
-}
-
-static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
-}
-
-static struct rcu_torture_ops rcu_bh_ops = {
- .ttype = RCU_BH_FLAVOR,
- .init = rcu_sync_torture_init,
- .readlock = rcu_bh_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .get_gp_seq = rcu_bh_get_gp_seq,
- .gp_diff = rcu_seq_diff,
- .deferred_free = rcu_bh_torture_deferred_free,
- .sync = synchronize_rcu_bh,
- .exp_sync = synchronize_rcu_bh_expedited,
- .call = call_rcu_bh,
- .cb_barrier = rcu_barrier_bh,
- .fqs = rcu_bh_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
- .ext_irq_conflict = RCUTORTURE_RDR_RCU,
- .name = "rcu_bh"
-};
-
/*
* Don't even think about trying any of these in real life!!!
* The names includes "busted", and they really means it!
@@ -666,48 +625,6 @@ static struct rcu_torture_ops busted_srcud_ops = {
.name = "busted_srcud"
};

-/*
- * Definitions for sched torture testing.
- */
-
-static int sched_torture_read_lock(void)
-{
- preempt_disable();
- return 0;
-}
-
-static void sched_torture_read_unlock(int idx)
-{
- preempt_enable();
-}
-
-static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
-}
-
-static struct rcu_torture_ops sched_ops = {
- .ttype = RCU_SCHED_FLAVOR,
- .init = rcu_sync_torture_init,
- .readlock = sched_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .get_gp_seq = rcu_sched_get_gp_seq,
- .gp_diff = rcu_seq_diff,
- .deferred_free = rcu_sched_torture_deferred_free,
- .sync = synchronize_sched,
- .exp_sync = synchronize_sched_expedited,
- .get_state = get_state_synchronize_sched,
- .cond_sync = cond_synchronize_sched,
- .call = call_rcu_sched,
- .cb_barrier = rcu_barrier_sched,
- .fqs = rcu_sched_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .extendables = RCUTORTURE_MAX_EXTEND,
- .name = "sched"
-};
-
/*
* Definitions for RCU-tasks torture testing.
*/
@@ -1956,8 +1873,8 @@ rcu_torture_init(void)
int cpu;
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] = {
- &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
- &busted_srcud_ops, &sched_ops, &tasks_ops,
+ &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
+ &busted_srcud_ops, &tasks_ops,
};

if (!torture_init_begin(torture_type, verbose))
--
2.17.1


2018-08-29 22:56:56

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 09/24] rcu: Remove now-unused rcutorture APIs

This commit removes rcu_sched_get_gp_seq(), rcu_bh_get_gp_seq(),
rcu_exp_batches_completed_sched(), rcu_sched_force_quiescent_state(),
and rcu_bh_force_quiescent_state(), which are no longer used because
rcutorture no longer does "rcu_bh" and "rcu_sched" torture types.

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/rcu.h | 10 ----------
kernel/rcu/tree.c | 47 -----------------------------------------------
2 files changed, 57 deletions(-)

diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 2bb77fddc11f..aa3dc08af4b3 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -509,29 +509,19 @@ void srcutorture_get_gp_data(enum rcutorture_type test_type,

#ifdef CONFIG_TINY_RCU
static inline unsigned long rcu_get_gp_seq(void) { return 0; }
-static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
-static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
-static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
static inline unsigned long
srcu_batches_completed(struct srcu_struct *sp) { return 0; }
static inline void rcu_force_quiescent_state(void) { }
-static inline void rcu_bh_force_quiescent_state(void) { }
-static inline void rcu_sched_force_quiescent_state(void) { }
static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
#else /* #ifdef CONFIG_TINY_RCU */
unsigned long rcu_get_gp_seq(void);
-unsigned long rcu_bh_get_gp_seq(void);
-unsigned long rcu_sched_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
-unsigned long rcu_exp_batches_completed_sched(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
void show_rcu_gp_kthreads(void);
int rcu_get_gp_kthreads_prio(void);
void rcu_force_quiescent_state(void);
-void rcu_bh_force_quiescent_state(void);
-void rcu_sched_force_quiescent_state(void);
extern struct workqueue_struct *rcu_gp_wq;
extern struct workqueue_struct *rcu_par_gp_wq;
#endif /* #else #ifdef CONFIG_TINY_RCU */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b2ebdd01c8a2..fb888085d304 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -490,25 +490,6 @@ unsigned long rcu_get_gp_seq(void)
}
EXPORT_SYMBOL_GPL(rcu_get_gp_seq);

-/*
- * Return the number of RCU-sched GPs completed thus far for debug & stats.
- */
-unsigned long rcu_sched_get_gp_seq(void)
-{
- return rcu_get_gp_seq();
-}
-EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
-
-/*
- * Return the number of RCU GPs completed thus far for debug & stats.
- * This is a transitional API and will soon be removed.
- */
-unsigned long rcu_bh_get_gp_seq(void)
-{
- return READ_ONCE(rcu_state.gp_seq);
-}
-EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
-
/*
* Return the number of RCU expedited batches completed thus far for
* debug & stats. Odd numbers mean that a batch is in progress, even
@@ -521,16 +502,6 @@ unsigned long rcu_exp_batches_completed(void)
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);

-/*
- * Return the number of RCU-sched expedited batches completed thus far
- * for debug & stats. Similar to rcu_exp_batches_completed().
- */
-unsigned long rcu_exp_batches_completed_sched(void)
-{
- return rcu_state.expedited_sequence;
-}
-EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
-
/*
* Force a quiescent state.
*/
@@ -540,24 +511,6 @@ void rcu_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);

-/*
- * Force a quiescent state for RCU BH.
- */
-void rcu_bh_force_quiescent_state(void)
-{
- force_quiescent_state();
-}
-EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
-
-/*
- * Force a quiescent state for RCU-sched.
- */
-void rcu_sched_force_quiescent_state(void)
-{
- rcu_force_quiescent_state();
-}
-EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
-
/*
* Show the state of the grace-period kthreads.
*/
--
2.17.1


2018-08-29 22:56:56

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 20/24] rcu: Clean up flavor-related definitions and comments in tree_plugin.h

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/tree_plugin.h | 36 +++++++++++++++++-------------------
1 file changed, 17 insertions(+), 19 deletions(-)

diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index cd276c46bc14..cd4c1b979446 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -38,8 +38,7 @@
#include "../locking/rtmutex_common.h"

/*
- * Control variables for per-CPU and per-rcu_node kthreads. These
- * handle all flavors of RCU.
+ * Control variables for per-CPU and per-rcu_node kthreads.
*/
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
@@ -826,8 +825,8 @@ static void rcu_flavor_check_callbacks(int user)
*
* Note that this guarantee implies further memory-ordering guarantees.
* On systems with more than one CPU, when synchronize_rcu() returns,
- * each CPU is guaranteed to have executed a full memory barrier since the
- * end of its last RCU-sched read-side critical section whose beginning
+ * each CPU is guaranteed to have executed a full memory barrier since
+ * the end of its last RCU read-side critical section whose beginning
* preceded the call to synchronize_rcu(). In addition, each CPU having
* an RCU read-side critical section that extends beyond the return from
* synchronize_rcu() is guaranteed to have executed a full memory barrier
@@ -1069,7 +1068,7 @@ void synchronize_rcu(void)
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_rcu() in RCU-sched read-side critical section");
+ "Illegal synchronize_rcu() in RCU read-side critical section");
if (rcu_blocking_is_gp())
return;
if (rcu_gp_is_expedited())
@@ -1341,9 +1340,9 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu)
}

/*
- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
- * RCU softirq used in flavors and configurations of RCU that do not
- * support RCU priority boosting.
+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces
+ * the RCU softirq used in configurations of RCU that do not support RCU
+ * priority boosting.
*/
static void rcu_cpu_kthread(unsigned int cpu)
{
@@ -1484,8 +1483,8 @@ static void rcu_prepare_kthreads(int cpu)
* 1 if so. This function is part of the RCU implementation; it is -not-
* an exported member of the RCU API.
*
- * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
- * any flavor of RCU.
+ * Because we not have RCU_FAST_NO_HZ, just check whether or not this
+ * CPU has RCU callbacks queued.
*/
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
@@ -1551,9 +1550,9 @@ static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
module_param(rcu_idle_lazy_gp_delay, int, 0644);

/*
- * Try to advance callbacks for all flavors of RCU on the current CPU, but
- * only if it has been awhile since the last time we did so. Afterwards,
- * if there are any callbacks ready for immediate invocation, return true.
+ * Try to advance callbacks on the current CPU, but only if it has been
+ * awhile since the last time we did so. Afterwards, if there are any
+ * callbacks ready for immediate invocation, return true.
*/
static bool __maybe_unused rcu_try_advance_all_cbs(void)
{
@@ -1808,7 +1807,7 @@ static void print_cpu_stall_info_end(void)
pr_err("\t");
}

-/* Zero ->ticks_this_gp for all flavors of RCU. */
+/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
static void zero_cpu_stall_ticks(struct rcu_data *rdp)
{
rdp->ticks_this_gp = 0;
@@ -1939,7 +1938,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
}

/*
- * Does the specified CPU need an RCU callback for the specified flavor
+ * Does the specified CPU need an RCU callback for this invocation
* of rcu_barrier()?
*/
static bool rcu_nocb_cpu_needs_barrier(int cpu)
@@ -2419,9 +2418,8 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)

/*
* If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo kthread for the specified RCU flavor, spawn it. If the CPUs are
- * brought online out of order, this can require re-organizing the
- * leader-follower relationships.
+ * rcuo kthread, spawn it. If the CPUs are brought online out of order,
+ * this can require re-organizing the leader-follower relationships.
*/
static void rcu_spawn_one_nocb_kthread(int cpu)
{
@@ -2458,7 +2456,7 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
rdp_spawn->nocb_next_follower = rdp_old_leader;
}

- /* Spawn the kthread for this CPU and RCU flavor. */
+ /* Spawn the kthread for this CPU. */
t = kthread_run(rcu_nocb_kthread, rdp_spawn,
"rcuo%c/%d", rcu_state.abbr, cpu);
BUG_ON(IS_ERR(t));
--
2.17.1


2018-08-29 22:56:58

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 11/24] rcu: Clean up flavor-related definitions and comments in rculist.h

Signed-off-by: Paul E. McKenney <[email protected]>
---
include/linux/rculist.h | 32 +++++++++++++++-----------------
1 file changed, 15 insertions(+), 17 deletions(-)

diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4786c2235b98..e91ec9ddcd30 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -182,7 +182,7 @@ static inline void list_replace_rcu(struct list_head *old,
* @list: the RCU-protected list to splice
* @prev: points to the last element of the existing list
* @next: points to the first element of the existing list
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*
* The list pointed to by @prev and @next can be RCU-read traversed
* concurrently with this function.
@@ -240,7 +240,7 @@ static inline void __list_splice_init_rcu(struct list_head *list,
* designed for stacks.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
@@ -255,7 +255,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list, designed for queues.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_tail_init_rcu(struct list_head *list,
struct list_head *head,
@@ -359,13 +359,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
- * primitives such as list_add_rcu(), but requires some implicit RCU
- * read-side guarding. One example is running within a special
- * exception-time environment where preemption is disabled and where
- * lockdep cannot be invoked (in which case updaters must use RCU-sched,
- * as in synchronize_sched(), call_rcu_sched(), and friends). Another
- * example is when items are added to the list, but never deleted.
+ * This primitive may safely run concurrently with the _rcu
+ * list-mutation primitives such as list_add_rcu(), but requires some
+ * implicit RCU read-side guarding. One example is running within a special
+ * exception-time environment where preemption is disabled and where lockdep
+ * cannot be invoked. Another example is when items are added to the list,
+ * but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
container_of((typeof(ptr))READ_ONCE(ptr), type, member)
@@ -376,13 +375,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
- * primitives such as list_add_rcu(), but requires some implicit RCU
- * read-side guarding. One example is running within a special
- * exception-time environment where preemption is disabled and where
- * lockdep cannot be invoked (in which case updaters must use RCU-sched,
- * as in synchronize_sched(), call_rcu_sched(), and friends). Another
- * example is when items are added to the list, but never deleted.
+ * This primitive may safely run concurrently with the _rcu
+ * list-mutation primitives such as list_add_rcu(), but requires some
+ * implicit RCU read-side guarding. One example is running within a special
+ * exception-time environment where preemption is disabled and where lockdep
+ * cannot be invoked. Another example is when items are added to the list,
+ * but never deleted.
*/
#define list_for_each_entry_lockless(pos, head, member) \
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
--
2.17.1


2018-08-29 22:57:00

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 06/24] rcu: Stop testing RCU-bh and RCU-sched

Now that the RCU-bh and RCU-sched update-side functions are simple
wrappers around their RCU counterparts, there isn't a whole lot of
point in testing them. This commit therefore removes the self-test
capability and removes the corresponding kernel-boot parameters.
It also updates the various rcutorture .boot files to remove the
kernel boot parameters that call for testing RCU-bh and RCU-sched.

Signed-off-by: Paul E. McKenney <[email protected]>
---
.../admin-guide/kernel-parameters.txt | 6 ---
kernel/rcu/update.c | 38 +------------------
.../rcutorture/configs/rcu/TINY02.boot | 2 -
.../rcutorture/configs/rcu/TREE01.boot | 2 +-
.../rcutorture/configs/rcu/TREE04.boot | 2 +-
.../rcutorture/configs/rcu/TREE05.boot | 2 -
.../rcutorture/configs/rcu/TREE06.boot | 2 -
.../rcutorture/configs/rcu/TREE08.boot | 2 -
8 files changed, 3 insertions(+), 53 deletions(-)

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 9871e649ffef..aa96e669bcb8 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3863,12 +3863,6 @@
rcupdate.rcu_self_test= [KNL]
Run the RCU early boot self tests

- rcupdate.rcu_self_test_bh= [KNL]
- Run the RCU bh early boot self tests
-
- rcupdate.rcu_self_test_sched= [KNL]
- Run the RCU sched early boot self tests
-
rdinit= [KNL]
Format: <full_path>
Run specified binary instead of /init from the ramdisk,
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 9ea87d0aa386..ee366faecea6 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -203,11 +203,7 @@ void rcu_test_sync_prims(void)
if (!IS_ENABLED(CONFIG_PROVE_RCU))
return;
synchronize_rcu();
- synchronize_rcu_bh();
- synchronize_sched();
synchronize_rcu_expedited();
- synchronize_rcu_bh_expedited();
- synchronize_sched_expedited();
}

#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
@@ -870,15 +866,10 @@ static void __init rcu_tasks_bootup_oddness(void)
#ifdef CONFIG_PROVE_RCU

/*
- * Early boot self test parameters, one for each flavor
+ * Early boot self test parameters.
*/
static bool rcu_self_test;
-static bool rcu_self_test_bh;
-static bool rcu_self_test_sched;
-
module_param(rcu_self_test, bool, 0444);
-module_param(rcu_self_test_bh, bool, 0444);
-module_param(rcu_self_test_sched, bool, 0444);

static int rcu_self_test_counter;

@@ -895,30 +886,12 @@ static void early_boot_test_call_rcu(void)
call_rcu(&head, test_callback);
}

-static void early_boot_test_call_rcu_bh(void)
-{
- static struct rcu_head head;
-
- call_rcu_bh(&head, test_callback);
-}
-
-static void early_boot_test_call_rcu_sched(void)
-{
- static struct rcu_head head;
-
- call_rcu_sched(&head, test_callback);
-}
-
void rcu_early_boot_tests(void)
{
pr_info("Running RCU self tests\n");

if (rcu_self_test)
early_boot_test_call_rcu();
- if (rcu_self_test_bh)
- early_boot_test_call_rcu_bh();
- if (rcu_self_test_sched)
- early_boot_test_call_rcu_sched();
rcu_test_sync_prims();
}

@@ -931,15 +904,6 @@ static int rcu_verify_early_boot_tests(void)
early_boot_test_counter++;
rcu_barrier();
}
- if (rcu_self_test_bh) {
- early_boot_test_counter++;
- rcu_barrier_bh();
- }
- if (rcu_self_test_sched) {
- early_boot_test_counter++;
- rcu_barrier_sched();
- }
-
if (rcu_self_test_counter != early_boot_test_counter) {
WARN_ON(1);
ret = -1;
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot b/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot
index 6c1a292a65fb..b39f1553a478 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot
@@ -1,3 +1 @@
rcupdate.rcu_self_test=1
-rcupdate.rcu_self_test_bh=1
-rcutorture.torture_type=rcu_bh
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
index 9f3a4d28e508..ea47da95374b 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
@@ -1,4 +1,4 @@
-rcutorture.torture_type=rcu_bh maxcpus=8 nr_cpus=43
+maxcpus=8 nr_cpus=43
rcutree.gp_preinit_delay=3
rcutree.gp_init_delay=3
rcutree.gp_cleanup_delay=3
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
index e6071bb96c7d..5adc6756792a 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
@@ -1 +1 @@
-rcutorture.torture_type=rcu_bh rcutree.rcu_fanout_leaf=4 nohz_full=1-7
+rcutree.rcu_fanout_leaf=4 nohz_full=1-7
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
index c7fd050dfcd9..779f1aed4606 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
@@ -1,5 +1,3 @@
-rcutorture.torture_type=sched
-rcupdate.rcu_self_test_sched=1
rcutree.gp_preinit_delay=3
rcutree.gp_init_delay=3
rcutree.gp_cleanup_delay=3
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
index ad18b52a2cad..055f4aa79077 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
@@ -1,6 +1,4 @@
rcupdate.rcu_self_test=1
-rcupdate.rcu_self_test_bh=1
-rcupdate.rcu_self_test_sched=1
rcutree.rcu_fanout_exact=1
rcutree.gp_preinit_delay=3
rcutree.gp_init_delay=3
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
index 1bd8efc4141e..22478fd3a865 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
@@ -1,5 +1,3 @@
-rcutorture.torture_type=sched
rcupdate.rcu_self_test=1
-rcupdate.rcu_self_test_sched=1
rcutree.rcu_fanout_exact=1
rcu_nocbs=0-7
--
2.17.1


2018-08-29 22:57:10

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 24/24] rcu: Inline _rcu_barrier() into its sole remaining caller

Because rcu_barrier() is a one-line wrapper function for _rcu_barrier()
and because nothing else calls _rcu_barrier(), this commit inlines
_rcu_barrier() into rcu_barrier().

Signed-off-by: Paul E. McKenney <[email protected]>
---
include/trace/events/rcu.h | 20 ++++++-------
kernel/rcu/tree.c | 58 +++++++++++++++++---------------------
kernel/rcu/tree.h | 4 +--
kernel/rcu/tree_plugin.h | 2 +-
4 files changed, 39 insertions(+), 45 deletions(-)

diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index a8d07feff6a0..175e0bce22bd 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -705,20 +705,20 @@ TRACE_EVENT(rcu_torture_read,
);

/*
- * Tracepoint for _rcu_barrier() execution. The string "s" describes
- * the _rcu_barrier phase:
- * "Begin": _rcu_barrier() started.
- * "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
- * "Inc1": _rcu_barrier() piggyback check counter incremented.
- * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
- * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU.
- * "OnlineQ": _rcu_barrier() found online CPU with callbacks.
- * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
+ * Tracepoint for rcu_barrier() execution. The string "s" describes
+ * the rcu_barrier phase:
+ * "Begin": rcu_barrier() started.
+ * "EarlyExit": rcu_barrier() piggybacked, thus early exit.
+ * "Inc1": rcu_barrier() piggyback check counter incremented.
+ * "OfflineNoCB": rcu_barrier() found callback on never-online CPU
+ * "OnlineNoCB": rcu_barrier() found online no-CBs CPU.
+ * "OnlineQ": rcu_barrier() found online CPU with callbacks.
+ * "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback.
- * "Inc2": _rcu_barrier() piggyback check counter incremented.
+ * "Inc2": rcu_barrier() piggyback check counter incremented.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 5f2a12a65b42..31e94f672d01 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2746,7 +2746,7 @@ static void rcu_leak_callback(struct rcu_head *rhp)
/*
* Helper function for call_rcu() and friends. The cpu argument will
* normally be -1, indicating "currently running CPU". It may specify
- * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
+ * a CPU only if that CPU is a no-CBs CPU. Currently, only rcu_barrier()
* is expected to specify a CPU.
*/
static void
@@ -2980,27 +2980,27 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
}

/*
- * Helper function for _rcu_barrier() tracing. If tracing is disabled,
+ * Helper function for rcu_barrier() tracing. If tracing is disabled,
* the compiler is expected to optimize this away.
*/
-static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done)
+static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
{
trace_rcu_barrier(rcu_state.name, s, cpu,
atomic_read(&rcu_state.barrier_cpu_count), done);
}

/*
- * RCU callback function for _rcu_barrier(). If we are last, wake
- * up the task executing _rcu_barrier().
+ * RCU callback function for rcu_barrier(). If we are last, wake
+ * up the task executing rcu_barrier().
*/
static void rcu_barrier_callback(struct rcu_head *rhp)
{
if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
- _rcu_barrier_trace(TPS("LastCB"), -1,
+ rcu_barrier_trace(TPS("LastCB"), -1,
rcu_state.barrier_sequence);
complete(&rcu_state.barrier_completion);
} else {
- _rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
+ rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
}
}

@@ -3011,33 +3011,40 @@ static void rcu_barrier_func(void *unused)
{
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);

- _rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
+ rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
rdp->barrier_head.func = rcu_barrier_callback;
debug_rcu_head_queue(&rdp->barrier_head);
if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
atomic_inc(&rcu_state.barrier_cpu_count);
} else {
debug_rcu_head_unqueue(&rdp->barrier_head);
- _rcu_barrier_trace(TPS("IRQNQ"), -1,
+ rcu_barrier_trace(TPS("IRQNQ"), -1,
rcu_state.barrier_sequence);
}
}

-/* Orchestrate an RCU barrier, waiting for all RCU callbacks to complete. */
-static void _rcu_barrier(void)
+/**
+ * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
+ *
+ * Note that this primitive does not necessarily wait for an RCU grace period
+ * to complete. For example, if there are no RCU callbacks queued anywhere
+ * in the system, then rcu_barrier() is within its rights to return
+ * immediately, without waiting for anything, much less an RCU grace period.
+ */
+void rcu_barrier(void)
{
int cpu;
struct rcu_data *rdp;
unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);

- _rcu_barrier_trace(TPS("Begin"), -1, s);
+ rcu_barrier_trace(TPS("Begin"), -1, s);

/* Take mutex to serialize concurrent rcu_barrier() requests. */
mutex_lock(&rcu_state.barrier_mutex);

/* Did someone else do our work for us? */
if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
- _rcu_barrier_trace(TPS("EarlyExit"), -1,
+ rcu_barrier_trace(TPS("EarlyExit"), -1,
rcu_state.barrier_sequence);
smp_mb(); /* caller's subsequent code after above check. */
mutex_unlock(&rcu_state.barrier_mutex);
@@ -3046,7 +3053,7 @@ static void _rcu_barrier(void)

/* Mark the start of the barrier operation. */
rcu_seq_start(&rcu_state.barrier_sequence);
- _rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
+ rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);

/*
* Initialize the count to one rather than to zero in order to
@@ -3069,10 +3076,10 @@ static void _rcu_barrier(void)
rdp = per_cpu_ptr(&rcu_data, cpu);
if (rcu_is_nocb_cpu(cpu)) {
if (!rcu_nocb_cpu_needs_barrier(cpu)) {
- _rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
+ rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
rcu_state.barrier_sequence);
} else {
- _rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
+ rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
rcu_state.barrier_sequence);
smp_mb__before_atomic();
atomic_inc(&rcu_state.barrier_cpu_count);
@@ -3080,11 +3087,11 @@ static void _rcu_barrier(void)
rcu_barrier_callback, cpu, 0);
}
} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
- _rcu_barrier_trace(TPS("OnlineQ"), cpu,
+ rcu_barrier_trace(TPS("OnlineQ"), cpu,
rcu_state.barrier_sequence);
smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
} else {
- _rcu_barrier_trace(TPS("OnlineNQ"), cpu,
+ rcu_barrier_trace(TPS("OnlineNQ"), cpu,
rcu_state.barrier_sequence);
}
}
@@ -3101,25 +3108,12 @@ static void _rcu_barrier(void)
wait_for_completion(&rcu_state.barrier_completion);

/* Mark the end of the barrier operation. */
- _rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
+ rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
rcu_seq_end(&rcu_state.barrier_sequence);

/* Other rcu_barrier() invocations can now safely proceed. */
mutex_unlock(&rcu_state.barrier_mutex);
}
-
-/**
- * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
- *
- * Note that this primitive does not necessarily wait for an RCU grace period
- * to complete. For example, if there are no RCU callbacks queued anywhere
- * in the system, then rcu_barrier() is within its rights to return
- * immediately, without waiting for anything, much less an RCU grace period.
- */
-void rcu_barrier(void)
-{
- _rcu_barrier();
-}
EXPORT_SYMBOL_GPL(rcu_barrier);

/*
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 46452d3d0fad..8cf93ac277ec 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -222,7 +222,7 @@ struct rcu_data {
/* Grace period that needs help */
/* from cond_resched(). */

- /* 5) _rcu_barrier(), OOM callbacks, and expediting. */
+ /* 5) rcu_barrier(), OOM callbacks, and expediting. */
struct rcu_head barrier_head;
int exp_dynticks_snap; /* Double-check need for IPI. */

@@ -328,7 +328,7 @@ struct rcu_state {
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
struct completion barrier_completion; /* Wake at barrier end. */
unsigned long barrier_sequence; /* ++ at start and end of */
- /* _rcu_barrier(). */
+ /* rcu_barrier(). */
/* End of fields guarded by barrier_mutex. */

struct mutex exp_mutex; /* Serialize expedited GP. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 7add1c297500..beaaca7a11f4 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1993,7 +1993,7 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
* There needs to be a barrier before this function is called,
* but associated with a prior determination that no more
* callbacks would be posted. In the worst case, the first
- * barrier in _rcu_barrier() suffices (but the caller cannot
+ * barrier in rcu_barrier() suffices (but the caller cannot
* necessarily rely on this, not a substitute for the caller
* getting the concurrency design right!). There must also be
* a barrier between the following load an posting of a callback
--
2.17.1


2018-08-29 22:57:11

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 17/24] rcu: Clean up flavor-related definitions and comments in tiny.c

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/tiny.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 30826fb6e438..a77853b73bfe 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -117,9 +117,9 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused

/*
* Wait for a grace period to elapse. But it is illegal to invoke
- * synchronize_sched() from within an RCU read-side critical section.
- * Therefore, any legal call to synchronize_sched() is a quiescent
- * state, and so on a UP system, synchronize_sched() need do nothing.
+ * synchronize_rcu() from within an RCU read-side critical section.
+ * Therefore, any legal call to synchronize_rcu() is a quiescent
+ * state, and so on a UP system, synchronize_rcu() need do nothing.
* (But Lai Jiangshan points out the benefits of doing might_sleep()
* to reduce latency.)
*
@@ -130,12 +130,12 @@ void synchronize_rcu(void)
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_sched() in RCU read-side critical section");
+ "Illegal synchronize_rcu() in RCU read-side critical section");
}
EXPORT_SYMBOL_GPL(synchronize_rcu);

/*
- * Post an RCU callback to be invoked after the end of an RCU-sched grace
+ * Post an RCU callback to be invoked after the end of an RCU grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
--
2.17.1


2018-08-29 22:57:20

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 01/24] rcu: Inline increment_cpu_stall_ticks() into its sole caller

Consolidation of the RCU flavors into one makes increment_cpu_stall_ticks()
a trivial one-line function with only one caller. This commit therefore
inlines it.

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/tree.c | 2 +-
kernel/rcu/tree.h | 1 -
kernel/rcu/tree_plugin.h | 6 ------
3 files changed, 1 insertion(+), 8 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 91f5d612502a..35a4346c7cce 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2554,7 +2554,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
void rcu_check_callbacks(int user)
{
trace_rcu_utilization(TPS("Start scheduler-tick"));
- increment_cpu_stall_ticks();
+ raw_cpu_inc(rcu_data.ticks_this_gp);
rcu_flavor_check_callbacks(user);
if (rcu_pending())
invoke_rcu_core();
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 8abc15c42d84..46452d3d0fad 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -466,7 +466,6 @@ static void print_cpu_stall_info_begin(void);
static void print_cpu_stall_info(int cpu);
static void print_cpu_stall_info_end(void);
static void zero_cpu_stall_ticks(struct rcu_data *rdp);
-static void increment_cpu_stall_ticks(void);
static bool rcu_nocb_cpu_needs_barrier(int cpu);
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 878a1d2cd465..cd276c46bc14 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1815,12 +1815,6 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp)
rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
}

-/* Increment ->ticks_this_gp for all flavors of RCU. */
-static void increment_cpu_stall_ticks(void)
-{
- raw_cpu_inc(rcu_data.ticks_this_gp);
-}
-
#ifdef CONFIG_RCU_NOCB_CPU

/*
--
2.17.1


2018-08-29 22:57:21

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 15/24] rcu: Clean up flavor-related definitions and comments in rcutorture.c

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/rcutorture.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index a228ad762fba..294b3f6b7eb6 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1221,7 +1221,7 @@ static void rcutorture_loop_extend(int *readstate,

WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
if (!((mask - 1) & mask))
- return; /* Current RCU flavor not extendable. */
+ return; /* Current RCU reader not extendable. */
i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
while (i--) {
mask = rcutorture_extend_mask(*readstate, trsp);
@@ -1790,7 +1790,7 @@ rcu_torture_cleanup(void)
cpuhp_remove_state(rcutor_hp);

/*
- * Wait for all RCU callbacks to fire, then do flavor-specific
+ * Wait for all RCU callbacks to fire, then do torture-type-specific
* cleanup operations.
*/
if (cur_ops->cb_barrier != NULL)
--
2.17.1


2018-08-29 22:57:28

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 18/24] rcu: Clean up flavor-related definitions and comments in tree.c

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/tree.c | 74 ++++++++++++++++++++---------------------------
1 file changed, 32 insertions(+), 42 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index fb888085d304..e8fbb7ee76cc 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -382,12 +382,11 @@ static int rcu_is_cpu_rrupt_from_idle(void)
}

/*
- * Register a quiescent state for all RCU flavors. If there is an
+ * Register an urgently needed quiescent state. If there is an
* emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
- * dyntick-idle quiescent state visible to other CPUs (but only for those
- * RCU flavors in desperate need of a quiescent state, which will normally
- * be none of them). Either way, do a lightweight quiescent state for
- * all RCU flavors.
+ * dyntick-idle quiescent state visible to other CPUs, which will in
+ * some cases serve for expedited as well as normal grace periods.
+ * Either way, register a lightweight quiescent state.
*
* The barrier() calls are redundant in the common case when this is
* called externally, but just in case this is called from within this
@@ -564,7 +563,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);

/*
- * Return the root node of the specified rcu_state structure.
+ * Return the root node of the rcu_state structure.
*/
static struct rcu_node *rcu_get_root(void)
{
@@ -948,11 +947,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
* Disable preemption to avoid false positives that could otherwise
* happen due to the current CPU number being sampled, this task being
* preempted, its old CPU being taken offline, resuming on some other CPU,
- * then determining that its old CPU is now offline. Because there are
- * multiple flavors of RCU, and because this function can be called in the
- * midst of updating the flavors while a given CPU coming online or going
- * offline, it is necessary to check all flavors. If any of the flavors
- * believe that given CPU is online, it is considered to be online.
+ * then determining that its old CPU is now offline.
*
* Disable checking if in an NMI handler because we cannot safely
* report errors from NMI handlers anyway. In addition, it is OK to use
@@ -1562,11 +1557,10 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
}

/*
- * Awaken the grace-period kthread for the specified flavor of RCU.
- * Don't do a self-awaken, and don't bother awakening when there is
- * nothing for the grace-period kthread to do (as in several CPUs
- * raced to awaken, and we lost), and finally don't try to awaken
- * a kthread that has not yet been created.
+ * Awaken the grace-period kthread. Don't do a self-awaken, and don't
+ * bother awakening when there is nothing for the grace-period kthread
+ * to do (as in several CPUs raced to awaken, and we lost), and finally
+ * don't try to awaken a kthread that has not yet been created.
*/
static void rcu_gp_kthread_wake(void)
{
@@ -2118,13 +2112,13 @@ static int __noreturn rcu_gp_kthread(void *unused)
}

/*
- * Report a full set of quiescent states to the specified rcu_state data
- * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
- * kthread if another grace period is required. Whether we wake
- * the grace-period kthread or it awakens itself for the next round
- * of quiescent-state forcing, that kthread will clean up after the
- * just-completed grace period. Note that the caller must hold rnp->lock,
- * which is released before return.
+ * Report a full set of quiescent states to the rcu_state data structure.
+ * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
+ * another grace period is required. Whether we wake the grace-period
+ * kthread or it awakens itself for the next round of quiescent-state
+ * forcing, that kthread will clean up after the just-completed grace
+ * period. Note that the caller must hold rnp->lock, which is released
+ * before return.
*/
static void rcu_report_qs_rsp(unsigned long flags)
__releases(rcu_get_root()->lock)
@@ -2211,7 +2205,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
/*
* Record a quiescent state for all tasks that were previously queued
* on the specified rcu_node structure and that were blocking the current
- * RCU grace period. The caller must hold the specified rnp->lock with
+ * RCU grace period. The caller must hold the corresponding rnp->lock with
* irqs disabled, and this lock is released upon return, but irqs remain
* disabled.
*/
@@ -2713,11 +2707,11 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
}

/*
- * Schedule RCU callback invocation. If the specified type of RCU
- * does not support RCU priority boosting, just do a direct call,
- * otherwise wake up the per-CPU kernel kthread. Note that because we
- * are running on the current CPU with softirqs disabled, the
- * rcu_cpu_kthread_task cannot disappear out from under us.
+ * Schedule RCU callback invocation. If the running implementation of RCU
+ * does not support RCU priority boosting, just do a direct call, otherwise
+ * wake up the per-CPU kernel kthread. Note that because we are running
+ * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
+ * cannot disappear out from under us.
*/
static void invoke_rcu_callbacks(struct rcu_data *rdp)
{
@@ -2958,11 +2952,10 @@ EXPORT_SYMBOL_GPL(cond_synchronize_rcu);

/*
* Check to see if there is any immediate RCU-related work to be done by
- * the current CPU, for the specified type of RCU, returning 1 if so and
- * zero otherwise. The checks are in order of increasing expense: checks
- * that can be carried out against CPU-local state are performed first.
- * However, we must check for CPU stalls first, else we might not get
- * a chance.
+ * the current CPU, returning 1 if so and zero otherwise. The checks are
+ * in order of increasing expense: checks that can be carried out against
+ * CPU-local state are performed first. However, we must check for CPU
+ * stalls first, else we might not get a chance.
*/
static int rcu_pending(void)
{
@@ -3069,10 +3062,7 @@ static void rcu_barrier_func(void *unused)
}
}

-/*
- * Orchestrate the specified type of RCU barrier, waiting for all
- * RCU callbacks of the specified type to complete.
- */
+/* Orchestrate an RCU barrier, waiting for all RCU callbacks to complete. */
static void _rcu_barrier(void)
{
int cpu;
@@ -3392,7 +3382,7 @@ void rcu_report_dead(unsigned int cpu)
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */

- /* QS for any half-done expedited RCU-sched GP. */
+ /* QS for any half-done expedited grace period. */
preempt_disable();
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
preempt_enable();
@@ -3481,7 +3471,7 @@ static int rcu_pm_notify(struct notifier_block *self,
}

/*
- * Spawn the kthreads that handle each RCU flavor's grace periods.
+ * Spawn the kthreads that handle RCU's grace periods.
*/
static int __init rcu_spawn_gp_kthread(void)
{
@@ -3544,7 +3534,7 @@ void rcu_scheduler_starting(void)
}

/*
- * Helper function for rcu_init() that initializes one rcu_state structure.
+ * Helper function for rcu_init() that initializes the rcu_state structure.
*/
static void __init rcu_init_one(void)
{
@@ -3706,7 +3696,7 @@ static void __init rcu_init_geometry(void)

/*
* Dump out the structure of the rcu_node combining tree associated
- * with the rcu_state structure referenced by rsp.
+ * with the rcu_state structure.
*/
static void __init rcu_dump_rcu_node_tree(void)
{
--
2.17.1


2018-08-29 22:57:33

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 05/24] rcutorture: Add RCU-bh and RCU-sched support for extended readers

Since there is now a single consolidated RCU flavor, rcutorture
needs to test extending of RCU readers via rcu_read_lock_bh() and
rcu_read_lock_sched(). This commit adds this support, with added checks
(just like for local_bh_enable()) to ensure that rcu_read_unlock_bh()
will not be invoked while interrupts are disabled.

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/rcutorture.c | 32 ++++++++++++++++++++++----------
1 file changed, 22 insertions(+), 10 deletions(-)

diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index c55d1483886e..1bc0e37dffa8 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -66,13 +66,16 @@ MODULE_AUTHOR("Paul E. McKenney <[email protected]> and Josh Triplett <josh@jos
/* Bits for ->extendables field, extendables param, and related definitions. */
#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
-#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */
-#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */
-#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */
-#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */
-#define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */
-#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
- RCUTORTURE_RDR_PREEMPT)
+#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
+#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
+#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
+#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
+#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
+#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
+#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
+#define RCUTORTURE_MAX_EXTEND \
+ (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
+ RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
/* Must be power of two minus one. */

@@ -1217,6 +1220,10 @@ static void rcutorture_one_extend(int *readstate, int newstate,
local_irq_disable();
if (statesnew & RCUTORTURE_RDR_PREEMPT)
preempt_disable();
+ if (statesnew & RCUTORTURE_RDR_RBH)
+ rcu_read_lock_bh();
+ if (statesnew & RCUTORTURE_RDR_SCHED)
+ rcu_read_lock_sched();
if (statesnew & RCUTORTURE_RDR_RCU)
idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;

@@ -1227,6 +1234,10 @@ static void rcutorture_one_extend(int *readstate, int newstate,
local_bh_enable();
if (statesold & RCUTORTURE_RDR_PREEMPT)
preempt_enable();
+ if (statesold & RCUTORTURE_RDR_RBH)
+ rcu_read_unlock_bh();
+ if (statesold & RCUTORTURE_RDR_SCHED)
+ rcu_read_unlock_sched();
if (statesold & RCUTORTURE_RDR_RCU)
cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);

@@ -1269,10 +1280,11 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
mask = mask & randmask2;
else
mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
+ /* Can't enable bh w/irq disabled. */
if ((mask & RCUTORTURE_RDR_IRQ) &&
- !(mask & RCUTORTURE_RDR_BH) &&
- (oldmask & RCUTORTURE_RDR_BH))
- mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
+ ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
+ (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
+ mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
if ((mask & RCUTORTURE_RDR_IRQ) &&
!(mask & cur_ops->ext_irq_conflict) &&
(oldmask & cur_ops->ext_irq_conflict))
--
2.17.1


2018-08-29 22:57:37

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 19/24] rcu: Clean up flavor-related definitions and comments in tree_exp.h

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/tree_exp.h | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 060bdb45cd95..78553a8fa3c6 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -601,8 +601,8 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
}

/*
- * Given an rcu_state pointer and a smp_call_function() handler, kick
- * off the specified flavor of expedited grace period.
+ * Given a smp_call_function() handler, kick off the specified
+ * implementation of expedited grace period.
*/
static void _synchronize_rcu_expedited(smp_call_func_t func)
{
@@ -721,7 +721,7 @@ static void sync_rcu_exp_handler(void *unused)
resched_cpu(rdp->cpu);
}

-/* PREEMPT=y, so no RCU-sched to clean up after. */
+/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
static void sync_sched_exp_online_cleanup(int cpu)
{
}
@@ -798,13 +798,13 @@ static void sync_sched_exp_online_cleanup(int cpu)
}

/*
- * Because a context switch is a grace period for RCU-sched, any blocking
- * grace-period wait automatically implies a grace period if there
- * is only one CPU online at any point time during execution of either
- * synchronize_sched() or synchronize_rcu_bh(). It is OK to occasionally
- * incorrectly indicate that there are multiple CPUs online when there
- * was in fact only one the whole time, as this just adds some overhead:
- * RCU still operates correctly.
+ * Because a context switch is a grace period for !PREEMPT, any
+ * blocking grace-period wait automatically implies a grace period if
+ * there is only one CPU online at any point time during execution of
+ * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
+ * occasionally incorrectly indicate that there are multiple CPUs online
+ * when there was in fact only one the whole time, as this just adds some
+ * overhead: RCU still operates correctly.
*/
static int rcu_blocking_is_gp(void)
{
@@ -823,7 +823,7 @@ void synchronize_rcu_expedited(void)
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_sched_expedited() in RCU read-side critical section");
+ "Illegal synchronize_rcu_expedited() in RCU read-side critical section");

/* If only one CPU, this is automatically a grace period. */
if (rcu_blocking_is_gp())
--
2.17.1


2018-08-29 22:57:47

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 08/24] rcuperf: Remove the "rcu_bh" and "sched" torture types

Now that the RCU-bh and RCU-sched update-side functions are simple
wrappers around their RCU counterparts, there isn't a whole lot of point
in testing them. This commit therefore removes the "rcu_bh" and "sched"
torture types from rcuperf.

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/rcuperf.c | 65 ++------------------------------------------
1 file changed, 2 insertions(+), 63 deletions(-)

diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 34244523550e..8de53f3dc5b0 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -189,36 +189,6 @@ static struct rcu_perf_ops rcu_ops = {
.name = "rcu"
};

-/*
- * Definitions for rcu_bh perf testing.
- */
-
-static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
-{
- rcu_read_lock_bh();
- return 0;
-}
-
-static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
-{
- rcu_read_unlock_bh();
-}
-
-static struct rcu_perf_ops rcu_bh_ops = {
- .ptype = RCU_BH_FLAVOR,
- .init = rcu_sync_perf_init,
- .readlock = rcu_bh_perf_read_lock,
- .readunlock = rcu_bh_perf_read_unlock,
- .get_gp_seq = rcu_bh_get_gp_seq,
- .gp_diff = rcu_seq_diff,
- .exp_completed = rcu_exp_batches_completed_sched,
- .async = call_rcu_bh,
- .gp_barrier = rcu_barrier_bh,
- .sync = synchronize_rcu_bh,
- .exp_sync = synchronize_rcu_bh_expedited,
- .name = "rcu_bh"
-};
-
/*
* Definitions for srcu perf testing.
*/
@@ -305,36 +275,6 @@ static struct rcu_perf_ops srcud_ops = {
.name = "srcud"
};

-/*
- * Definitions for sched perf testing.
- */
-
-static int sched_perf_read_lock(void)
-{
- preempt_disable();
- return 0;
-}
-
-static void sched_perf_read_unlock(int idx)
-{
- preempt_enable();
-}
-
-static struct rcu_perf_ops sched_ops = {
- .ptype = RCU_SCHED_FLAVOR,
- .init = rcu_sync_perf_init,
- .readlock = sched_perf_read_lock,
- .readunlock = sched_perf_read_unlock,
- .get_gp_seq = rcu_sched_get_gp_seq,
- .gp_diff = rcu_seq_diff,
- .exp_completed = rcu_exp_batches_completed_sched,
- .async = call_rcu_sched,
- .gp_barrier = rcu_barrier_sched,
- .sync = synchronize_sched,
- .exp_sync = synchronize_sched_expedited,
- .name = "sched"
-};
-
/*
* Definitions for RCU-tasks perf testing.
*/
@@ -611,7 +551,7 @@ rcu_perf_cleanup(void)
kfree(writer_n_durations);
}

- /* Do flavor-specific cleanup operations. */
+ /* Do torture-type-specific cleanup operations. */
if (cur_ops->cleanup != NULL)
cur_ops->cleanup();

@@ -661,8 +601,7 @@ rcu_perf_init(void)
long i;
int firsterr = 0;
static struct rcu_perf_ops *perf_ops[] = {
- &rcu_ops, &rcu_bh_ops, &srcu_ops, &srcud_ops, &sched_ops,
- &tasks_ops,
+ &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
};

if (!torture_init_begin(perf_type, verbose))
--
2.17.1


2018-08-29 22:57:54

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 22/24] rcu: Remove !PREEMPT code from rcu_note_voluntary_context_switch()

Because RCU-tasks exists only in PREEMPT kernels and because RCU-sched
no longer exists in PREEMPT kernels, it is no longer necessary for the
rcu_note_voluntary_context_switch() macro to do anything for !PREEMPT
kernels. This commit therefore removes !PREEMPT-related code from
this macro, namely, the rcu_all_qs().

Signed-off-by: Paul E. McKenney <[email protected]>
---
include/linux/rcupdate.h | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d6d543b60a9f..e4f821165d0b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -150,18 +150,14 @@ static inline void rcu_init_nohz(void) { }
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
-#define rcu_note_voluntary_context_switch(t) \
- do { \
- rcu_all_qs(); \
- rcu_tasks_qs(t); \
- } while (0)
+#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
#define rcu_tasks_qs(t) do { } while (0)
-#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
+#define rcu_note_voluntary_context_switch(t) do { } while (0)
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
--
2.17.1


2018-08-29 22:57:54

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 12/24] rcu: Clean up flavor-related definitions and comments in rcupdate_wait.h

Signed-off-by: Paul E. McKenney <[email protected]>
---
include/linux/rcupdate_wait.h | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index bc104699560e..8a16c3eb3dd0 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -33,17 +33,17 @@ do { \

/**
* synchronize_rcu_mult - Wait concurrently for multiple grace periods
- * @...: List of call_rcu() functions for the flavors to wait on.
+ * @...: List of call_rcu() functions for different grace periods to wait on
*
- * This macro waits concurrently for multiple flavors of RCU grace periods.
- * For example, synchronize_rcu_mult(call_rcu, call_rcu_sched) would wait
- * on concurrent RCU and RCU-sched grace periods. Waiting on a give SRCU
+ * This macro waits concurrently for multiple types of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
+ * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU
* domain requires you to write a wrapper function for that SRCU domain's
* call_srcu() function, supplying the corresponding srcu_struct.
*
- * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
- * or RCU-sched, given that anywhere synchronize_rcu_mult() can be called
- * is automatically a grace period.
+ * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU,
+ * given that anywhere synchronize_rcu_mult() can be called is automatically
+ * a grace period.
*/
#define synchronize_rcu_mult(...) \
_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
--
2.17.1


2018-08-29 22:58:03

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 14/24] rcu: Clean up flavor-related definitions and comments in rcu.h

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/rcu.h | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index aa3dc08af4b3..5dec94509a7e 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -176,8 +176,9 @@ static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)

/*
* debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
- * by call_rcu() and rcu callback execution, and are therefore not part of the
- * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
+ * by call_rcu() and rcu callback execution, and are therefore not part
+ * of the RCU API. These are in rcupdate.h because they are used by all
+ * RCU implementations.
*/

#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
@@ -328,7 +329,7 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
}
}

-/* Returns first leaf rcu_node of the specified RCU flavor. */
+/* Returns a pointer to the first leaf rcu_node structure. */
#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])

/* Is this rcu_node a leaf? */
@@ -339,7 +340,8 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)

/*
* Do a full breadth-first scan of the {s,}rcu_node structures for the
- * specified rcu_state structure.
+ * specified state structure (for SRCU) or the only rcu_state structure
+ * (for RCU).
*/
#define srcu_for_each_node_breadth_first(sp, rnp) \
for ((rnp) = &(sp)->node[0]; \
@@ -348,10 +350,10 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
srcu_for_each_node_breadth_first(&rcu_state, rnp)

/*
- * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
- * structure. Note that if there is a singleton rcu_node tree with but
- * one rcu_node structure, this loop -will- visit the rcu_node structure.
- * It is still a leaf node, even if it is also the root node.
+ * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
+ * Note that if there is a singleton rcu_node tree with but one rcu_node
+ * structure, this loop -will- visit the rcu_node structure. It is still
+ * a leaf node, even if it is also the root node.
*/
#define rcu_for_each_leaf_node(rnp) \
for ((rnp) = rcu_first_leaf_node(); \
--
2.17.1


2018-08-29 22:58:09

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 13/24] rcu: Clean up flavor-related definitions and comments in Kconfig

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/Kconfig | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index a0b7f0103ca9..939a2056c87a 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -196,7 +196,7 @@ config RCU_BOOST
This option boosts the priority of preempted RCU readers that
block the current preemptible RCU grace period for too long.
This option also prevents heavy loads from blocking RCU
- callback invocation for all flavors of RCU.
+ callback invocation.

Say Y here if you are working with real-time apps or heavy loads
Say N here if you are unsure.
@@ -225,15 +225,15 @@ config RCU_NOCB_CPU
callback invocation to energy-efficient CPUs in battery-powered
asymmetric multiprocessors.

- This option offloads callback invocation from the set of
- CPUs specified at boot time by the rcu_nocbs parameter.
- For each such CPU, a kthread ("rcuox/N") will be created to
- invoke callbacks, where the "N" is the CPU being offloaded,
- and where the "p" for RCU-preempt and "s" for RCU-sched.
- Nothing prevents this kthread from running on the specified
- CPUs, but (1) the kthreads may be preempted between each
- callback, and (2) affinity or cgroups can be used to force
- the kthreads to run on whatever set of CPUs is desired.
+ This option offloads callback invocation from the set of CPUs
+ specified at boot time by the rcu_nocbs parameter. For each
+ such CPU, a kthread ("rcuox/N") will be created to invoke
+ callbacks, where the "N" is the CPU being offloaded, and where
+ the "p" for RCU-preempt (PREEMPT kernels) and "s" for RCU-sched
+ (!PREEMPT kernels). Nothing prevents this kthread from running
+ on the specified CPUs, but (1) the kthreads may be preempted
+ between each callback, and (2) affinity or cgroups can be used
+ to force the kthreads to run on whatever set of CPUs is desired.

Say Y here if you want to help to debug reduced OS jitter.
Say N here if you are unsure.
--
2.17.1


2018-08-29 22:58:14

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 16/24] rcu: Clean up flavor-related definitions and comments in srcutree.h

Signed-off-by: Paul E. McKenney <[email protected]>
---
kernel/rcu/srcutree.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 2042080cd38b..7f266b0f9832 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -980,7 +980,7 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
* There are memory-ordering constraints implied by synchronize_srcu().
* On systems with more than one CPU, when synchronize_srcu() returns,
* each CPU is guaranteed to have executed a full memory barrier since
- * the end of its last corresponding SRCU-sched read-side critical section
+ * the end of its last corresponding SRCU read-side critical section
* whose beginning preceded the call to synchronize_srcu(). In addition,
* each CPU having an SRCU read-side critical section that extends beyond
* the return from synchronize_srcu() is guaranteed to have executed a
--
2.17.1


2018-08-29 22:58:56

by Paul E. McKenney

[permalink] [raw]
Subject: [PATCH tip/core/rcu 03/24] rcu: Consolidate RCU-bh update-side function definitions

This commit saves a few lines by consolidating the RCU-bh function
definitions at the end of include/linux/rcupdate.h. This consolidation
also makes it easier to remove them all when the time comes.

Signed-off-by: Paul E. McKenney <[email protected]>
---
include/linux/rcupdate.h | 27 ++++++++++++++++++++++-----
include/linux/rcutiny.h | 15 ---------------
include/linux/rcutree.h | 17 -----------------
kernel/rcu/tree.c | 9 ---------
4 files changed, 22 insertions(+), 46 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1207c6c9bd8b..e530f5739033 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -58,11 +58,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void synchronize_rcu(void);

-static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
-{
- call_rcu(head, func);
-}
-
#ifdef CONFIG_PREEMPT_RCU

void __rcu_read_lock(void);
@@ -875,4 +870,26 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */


+/* Transitional pre-consolidation compatibility definitions. */
+
+static inline void synchronize_rcu_bh(void)
+{
+ synchronize_rcu();
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_rcu_expedited();
+}
+
+static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+
+static inline void rcu_barrier_bh(void)
+{
+ rcu_barrier();
+}
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e66fb8bc2127..df82bada9b19 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -68,21 +68,6 @@ static inline void rcu_barrier_sched(void)
rcu_barrier(); /* Only one CPU, so only one list of callbacks! */
}

-static inline void rcu_barrier_bh(void)
-{
- rcu_barrier();
-}
-
-static inline void synchronize_rcu_bh(void)
-{
- synchronize_sched();
-}
-
-static inline void synchronize_rcu_bh_expedited(void)
-{
- synchronize_sched();
-}
-
static inline void synchronize_rcu_expedited(void)
{
synchronize_sched();
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 6d30a0809300..94820156aa62 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -45,11 +45,6 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(false);
}

-static inline void synchronize_rcu_bh(void)
-{
- synchronize_rcu();
-}
-
void synchronize_rcu_expedited(void);

static inline void synchronize_sched_expedited(void)
@@ -59,19 +54,7 @@ static inline void synchronize_sched_expedited(void)

void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);

-/**
- * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
- *
- * This is a transitional API and will soon be removed, with all
- * callers converted to synchronize_rcu_expedited().
- */
-static inline void synchronize_rcu_bh_expedited(void)
-{
- synchronize_rcu_expedited();
-}
-
void rcu_barrier(void);
-void rcu_barrier_bh(void);
void rcu_barrier_sched(void);
bool rcu_eqs_special_set(int cpu);
unsigned long get_state_synchronize_rcu(void);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index bfb6b15bc27c..f1e8a4431ebd 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3251,15 +3251,6 @@ static void _rcu_barrier(void)
mutex_unlock(&rcu_state.barrier_mutex);
}

-/**
- * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
- */
-void rcu_barrier_bh(void)
-{
- _rcu_barrier();
-}
-EXPORT_SYMBOL_GPL(rcu_barrier_bh);
-
/**
* rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
*
--
2.17.1