Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932098AbbGQXfd (ORCPT ); Fri, 17 Jul 2015 19:35:33 -0400 Received: from e33.co.us.ibm.com ([32.97.110.151]:48832 "EHLO e33.co.us.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754010AbbGQX3d (ORCPT ); Fri, 17 Jul 2015 19:29:33 -0400 X-Helo: d03dlp02.boulder.ibm.com X-MailFrom: paulmck@linux.vnet.ibm.com X-RcptTo: linux-kernel@vger.kernel.org From: "Paul E. McKenney" To: linux-kernel@vger.kernel.org Cc: mingo@kernel.org, jiangshanlai@gmail.com, dipankar@in.ibm.com, akpm@linux-foundation.org, mathieu.desnoyers@efficios.com, josh@joshtriplett.org, tglx@linutronix.de, peterz@infradead.org, rostedt@goodmis.org, dhowells@redhat.com, edumazet@google.com, dvhart@linux.intel.com, fweisbec@gmail.com, oleg@redhat.com, bobby.prani@gmail.com, "Paul E. McKenney" Subject: [PATCH tip/core/rcu 11/19] rcu: Use funnel locking for synchronize_rcu_expedited()'s polling loop Date: Fri, 17 Jul 2015 16:29:16 -0700 Message-Id: <1437175764-24096-11-git-send-email-paulmck@linux.vnet.ibm.com> X-Mailer: git-send-email 1.8.1.5 In-Reply-To: <1437175764-24096-1-git-send-email-paulmck@linux.vnet.ibm.com> References: <20150717232901.GA22511@linux.vnet.ibm.com> <1437175764-24096-1-git-send-email-paulmck@linux.vnet.ibm.com> X-TM-AS-MML: disable X-Content-Scanned: Fidelis XPS MAILER x-cbid: 15071723-0009-0000-0000-00000C918EBA Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 6152 Lines: 168 From: "Paul E. McKenney" This commit gets rid of synchronize_rcu_expedited()'s mutex_trylock() polling loop in favor of the funnel-locking scheme that was abstracted from synchronize_sched_expedited(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 15 ++++++++------- kernel/rcu/tree_plugin.h | 36 ++++++++++-------------------------- 2 files changed, 18 insertions(+), 33 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 094ed8ff82b4..338ea61929bd 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3309,9 +3309,9 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) return rcu_seq_done(&rsp->expedited_sequence, s); } -/* Common code for synchronize_sched_expedited() work-done checking. */ -static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp, - atomic_long_t *stat, unsigned long s) +/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ +static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp, + atomic_long_t *stat, unsigned long s) { if (rcu_exp_gp_seq_done(rsp, s)) { if (rnp) @@ -3319,7 +3319,6 @@ static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp, /* Ensure test happens before caller kfree(). */ smp_mb__before_atomic(); /* ^^^ */ atomic_long_inc(stat); - put_online_cpus(); return true; } return false; @@ -3345,14 +3344,14 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s) */ rnp0 = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode; for (; rnp0 != NULL; rnp0 = rnp0->parent) { - if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone1, s)) + if (sync_exp_work_done(rsp, rnp1, &rsp->expedited_workdone1, s)) return NULL; mutex_lock(&rnp0->exp_funnel_mutex); if (rnp1) mutex_unlock(&rnp1->exp_funnel_mutex); rnp1 = rnp0; } - if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone2, s)) + if (sync_exp_work_done(rsp, rnp1, &rsp->expedited_workdone2, s)) return NULL; return rnp1; } @@ -3402,8 +3401,10 @@ void synchronize_sched_expedited(void) WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); rnp = exp_funnel_lock(rsp, s); - if (rnp == NULL) + if (rnp == NULL) { + put_online_cpus(); return; /* Someone else did our work for us. */ + } rcu_exp_gp_seq_start(rsp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 759883f51de7..f0d71449ec0c 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -536,7 +536,6 @@ void synchronize_rcu(void) EXPORT_SYMBOL_GPL(synchronize_rcu); static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); -static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); /* * Return non-zero if there are any tasks in RCU read-side critical @@ -556,7 +555,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp) * for the current expedited grace period. Works only for preemptible * RCU -- other RCU implementation use other means. * - * Caller must hold sync_rcu_preempt_exp_mutex. + * Caller must hold the root rcu_node's exp_funnel_mutex. */ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) { @@ -572,7 +571,7 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) * recursively up the tree. (Calm down, calm down, we do the recursion * iteratively!) * - * Caller must hold sync_rcu_preempt_exp_mutex. + * Caller must hold the root rcu_node's exp_funnel_mutex. */ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, bool wake) @@ -611,7 +610,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, * set the ->expmask bits on the leaf rcu_node structures to tell phase 2 * that work is needed here. * - * Caller must hold sync_rcu_preempt_exp_mutex. + * Caller must hold the root rcu_node's exp_funnel_mutex. */ static void sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp) @@ -654,7 +653,7 @@ sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp) * invoke rcu_report_exp_rnp() to clear out the upper-level ->expmask bits, * enabling rcu_read_unlock_special() to do the bit-clearing. * - * Caller must hold sync_rcu_preempt_exp_mutex. + * Caller must hold the root rcu_node's exp_funnel_mutex. */ static void sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp) @@ -702,29 +701,16 @@ sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp) void synchronize_rcu_expedited(void) { struct rcu_node *rnp; + struct rcu_node *rnp_unlock; struct rcu_state *rsp = rcu_state_p; unsigned long s; - int trycount = 0; s = rcu_exp_gp_seq_snap(rsp); - /* - * Acquire lock, falling back to synchronize_rcu() if too many - * lock-acquisition failures. Of course, if someone does the - * expedited grace period for us, just leave. - */ - while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { - if (rcu_exp_gp_seq_done(rsp, s)) - goto mb_ret; /* Others did our work for us. */ - if (trycount++ < 10) { - udelay(trycount * num_online_cpus()); - } else { - wait_rcu_gp(call_rcu); - return; - } - } - if (rcu_exp_gp_seq_done(rsp, s)) - goto unlock_mb_ret; /* Others did our work for us. */ + rnp_unlock = exp_funnel_lock(rsp, s); + if (rnp_unlock == NULL) + return; /* Someone else did our work for us. */ + rcu_exp_gp_seq_start(rsp); /* force all RCU readers onto ->blkd_tasks lists. */ @@ -748,9 +734,7 @@ void synchronize_rcu_expedited(void) /* Clean up and exit. */ rcu_exp_gp_seq_end(rsp); -unlock_mb_ret: - mutex_unlock(&sync_rcu_preempt_exp_mutex); -mb_ret: + mutex_unlock(&rnp_unlock->exp_funnel_mutex); smp_mb(); /* ensure subsequent action seen after grace period. */ } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); -- 1.8.1.5 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/