2020-05-05 14:17:11

by Thomas Gleixner

[permalink] [raw]
Subject: [patch V4 part 1 36/36] rcu: Make RCU IRQ enter/exit functions rely on in_nmi()

From: Paul E. McKenney <[email protected]>

The rcu_nmi_enter_common() and rcu_nmi_exit_common() functions take an
"irq" parameter that indicates whether these functions are invoked from
an irq handler (irq==true) or an NMI handler (irq==false). However,
recent changes have applied notrace to a few critical functions such
that rcu_nmi_enter_common() and rcu_nmi_exit_common() many now rely
on in_nmi(). Note that in_nmi() works no differently than before,
but rather that tracing is now prohibited in code regions where in_nmi()
would incorrectly report NMI state.

Therefore remove the "irq" parameter and inlines rcu_nmi_enter_common() and
rcu_nmi_exit_common() into rcu_nmi_enter() and rcu_nmi_exit(),
respectively.

Signed-off-by: Paul E. McKenney <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
---
kernel/rcu/tree.c | 47 +++++++++++++++--------------------------------
1 file changed, 15 insertions(+), 32 deletions(-)

--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -627,16 +627,18 @@ noinstr void rcu_user_enter(void)
}
#endif /* CONFIG_NO_HZ_FULL */

-/*
+/**
+ * rcu_nmi_exit - inform RCU of exit from NMI context
+ *
* If we are returning from the outermost NMI handler that interrupted an
* RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle.
*
- * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
+ * If you add or remove a call to rcu_nmi_exit(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
-static __always_inline void rcu_nmi_exit_common(bool irq)
+noinstr void rcu_nmi_exit(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);

@@ -667,7 +669,7 @@ static __always_inline void rcu_nmi_exit
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */

- if (irq)
+ if (!in_nmi())
rcu_prepare_for_idle();
instr_end();

@@ -675,22 +677,11 @@ static __always_inline void rcu_nmi_exit
rcu_dynticks_eqs_enter();
// ... but is no longer watching here.

- if (irq)
+ if (!in_nmi())
rcu_dynticks_task_enter();
}

/**
- * rcu_nmi_exit - inform RCU of exit from NMI context
- *
- * If you add or remove a call to rcu_nmi_exit(), be sure to test
- * with CONFIG_RCU_EQS_DEBUG=y.
- */
-void noinstr rcu_nmi_exit(void)
-{
- rcu_nmi_exit_common(false);
-}
-
-/**
* rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
*
* Exit from an interrupt handler, which might possibly result in entering
@@ -712,7 +703,7 @@ void noinstr rcu_nmi_exit(void)
void noinstr rcu_irq_exit(void)
{
lockdep_assert_irqs_disabled();
- rcu_nmi_exit_common(true);
+ rcu_nmi_exit();
}

/*
@@ -801,7 +792,7 @@ void noinstr rcu_user_exit(void)
#endif /* CONFIG_NO_HZ_FULL */

/**
- * rcu_nmi_enter_common - inform RCU of entry to NMI context
+ * rcu_nmi_enter - inform RCU of entry to NMI context
* @irq: Is this call from rcu_irq_enter?
*
* If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
@@ -810,10 +801,10 @@ void noinstr rcu_user_exit(void)
* long as the nesting level does not overflow an int. (You will probably
* run out of stack space first.)
*
- * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
+ * If you add or remove a call to rcu_nmi_enter(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
-static __always_inline void rcu_nmi_enter_common(bool irq)
+noinstr void rcu_nmi_enter(void)
{
long incby = 2;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -831,18 +822,18 @@ static __always_inline void rcu_nmi_ente
*/
if (rcu_dynticks_curr_cpu_in_eqs()) {

- if (irq)
+ if (!in_nmi())
rcu_dynticks_task_exit();

// RCU is not watching here ...
rcu_dynticks_eqs_exit();
// ... but is watching here.

- if (irq)
+ if (!in_nmi())
rcu_cleanup_after_idle();

incby = 1;
- } else if (irq) {
+ } else if (!in_nmi()) {
instr_begin();
if (tick_nohz_full_cpu(rdp->cpu) &&
rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
@@ -877,14 +868,6 @@ static __always_inline void rcu_nmi_ente
}

/**
- * rcu_nmi_enter - inform RCU of entry to NMI context
- */
-noinstr void rcu_nmi_enter(void)
-{
- rcu_nmi_enter_common(false);
-}
-
-/**
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
*
* Enter an interrupt handler, which might possibly result in exiting
@@ -909,7 +892,7 @@ noinstr void rcu_nmi_enter(void)
noinstr void rcu_irq_enter(void)
{
lockdep_assert_irqs_disabled();
- rcu_nmi_enter_common(true);
+ rcu_nmi_enter();
}

/*


2020-05-05 18:15:37

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [patch V4 part 1 36/36] rcu: Make RCU IRQ enter/exit functions rely on in_nmi()

On Tue, May 05, 2020 at 03:16:38PM +0200, Thomas Gleixner wrote:
> From: Paul E. McKenney <[email protected]>
>
> The rcu_nmi_enter_common() and rcu_nmi_exit_common() functions take an
> "irq" parameter that indicates whether these functions are invoked from
> an irq handler (irq==true) or an NMI handler (irq==false). However,
> recent changes have applied notrace to a few critical functions such
> that rcu_nmi_enter_common() and rcu_nmi_exit_common() many now rely
> on in_nmi(). Note that in_nmi() works no differently than before,
> but rather that tracing is now prohibited in code regions where in_nmi()
> would incorrectly report NMI state.
>
> Therefore remove the "irq" parameter and inlines rcu_nmi_enter_common() and
> rcu_nmi_exit_common() into rcu_nmi_enter() and rcu_nmi_exit(),
> respectively.

Not a bad job of ghostwriting, actually. I had forgotten about this
entirely, but did find my February 13th email containing this patch. ;-)

But why not make the commit log official?

------------------------------------------------------------------------

The rcu_nmi_enter_common() and rcu_nmi_exit_common() functions take an
"irq" parameter that indicates whether these functions have been invoked
from an irq handler (irq==true) or an NMI handler (irq==false). However,
recent changes have applied notrace to a number of critical functions,
thus allowing rcu_nmi_enter_common() and rcu_nmi_exit_common() to rely
on in_nmi(). Note that in_nmi() works no differently than before.
Instead, tracing is now prohibited in code regions where in_nmi() would
previously have incorrectly reported NMI state.

This commit therefore removes the "irq" parameter and inlines
rcu_nmi_enter_common() and rcu_nmi_exit_common() into rcu_nmi_enter()
and rcu_nmi_exit(), respectively.

------------------------------------------------------------------------

With that commit log, looks good to me!

Thanx, Paul

> Signed-off-by: Paul E. McKenney <[email protected]>
> Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
> Signed-off-by: Thomas Gleixner <[email protected]>
> ---
> kernel/rcu/tree.c | 47 +++++++++++++++--------------------------------
> 1 file changed, 15 insertions(+), 32 deletions(-)
>
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -627,16 +627,18 @@ noinstr void rcu_user_enter(void)
> }
> #endif /* CONFIG_NO_HZ_FULL */
>
> -/*
> +/**
> + * rcu_nmi_exit - inform RCU of exit from NMI context
> + *
> * If we are returning from the outermost NMI handler that interrupted an
> * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
> * to let the RCU grace-period handling know that the CPU is back to
> * being RCU-idle.
> *
> - * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
> + * If you add or remove a call to rcu_nmi_exit(), be sure to test
> * with CONFIG_RCU_EQS_DEBUG=y.
> */
> -static __always_inline void rcu_nmi_exit_common(bool irq)
> +noinstr void rcu_nmi_exit(void)
> {
> struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
>
> @@ -667,7 +669,7 @@ static __always_inline void rcu_nmi_exit
> trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
> WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
>
> - if (irq)
> + if (!in_nmi())
> rcu_prepare_for_idle();
> instr_end();
>
> @@ -675,22 +677,11 @@ static __always_inline void rcu_nmi_exit
> rcu_dynticks_eqs_enter();
> // ... but is no longer watching here.
>
> - if (irq)
> + if (!in_nmi())
> rcu_dynticks_task_enter();
> }
>
> /**
> - * rcu_nmi_exit - inform RCU of exit from NMI context
> - *
> - * If you add or remove a call to rcu_nmi_exit(), be sure to test
> - * with CONFIG_RCU_EQS_DEBUG=y.
> - */
> -void noinstr rcu_nmi_exit(void)
> -{
> - rcu_nmi_exit_common(false);
> -}
> -
> -/**
> * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
> *
> * Exit from an interrupt handler, which might possibly result in entering
> @@ -712,7 +703,7 @@ void noinstr rcu_nmi_exit(void)
> void noinstr rcu_irq_exit(void)
> {
> lockdep_assert_irqs_disabled();
> - rcu_nmi_exit_common(true);
> + rcu_nmi_exit();
> }
>
> /*
> @@ -801,7 +792,7 @@ void noinstr rcu_user_exit(void)
> #endif /* CONFIG_NO_HZ_FULL */
>
> /**
> - * rcu_nmi_enter_common - inform RCU of entry to NMI context
> + * rcu_nmi_enter - inform RCU of entry to NMI context
> * @irq: Is this call from rcu_irq_enter?
> *
> * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
> @@ -810,10 +801,10 @@ void noinstr rcu_user_exit(void)
> * long as the nesting level does not overflow an int. (You will probably
> * run out of stack space first.)
> *
> - * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
> + * If you add or remove a call to rcu_nmi_enter(), be sure to test
> * with CONFIG_RCU_EQS_DEBUG=y.
> */
> -static __always_inline void rcu_nmi_enter_common(bool irq)
> +noinstr void rcu_nmi_enter(void)
> {
> long incby = 2;
> struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
> @@ -831,18 +822,18 @@ static __always_inline void rcu_nmi_ente
> */
> if (rcu_dynticks_curr_cpu_in_eqs()) {
>
> - if (irq)
> + if (!in_nmi())
> rcu_dynticks_task_exit();
>
> // RCU is not watching here ...
> rcu_dynticks_eqs_exit();
> // ... but is watching here.
>
> - if (irq)
> + if (!in_nmi())
> rcu_cleanup_after_idle();
>
> incby = 1;
> - } else if (irq) {
> + } else if (!in_nmi()) {
> instr_begin();
> if (tick_nohz_full_cpu(rdp->cpu) &&
> rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
> @@ -877,14 +868,6 @@ static __always_inline void rcu_nmi_ente
> }
>
> /**
> - * rcu_nmi_enter - inform RCU of entry to NMI context
> - */
> -noinstr void rcu_nmi_enter(void)
> -{
> - rcu_nmi_enter_common(false);
> -}
> -
> -/**
> * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
> *
> * Enter an interrupt handler, which might possibly result in exiting
> @@ -909,7 +892,7 @@ noinstr void rcu_nmi_enter(void)
> noinstr void rcu_irq_enter(void)
> {
> lockdep_assert_irqs_disabled();
> - rcu_nmi_enter_common(true);
> + rcu_nmi_enter();
> }
>
> /*
>

2020-05-06 17:13:32

by Alexandre Chartre

[permalink] [raw]
Subject: Re: [patch V4 part 1 36/36] rcu: Make RCU IRQ enter/exit functions rely on in_nmi()


On 5/5/20 3:16 PM, Thomas Gleixner wrote:
> From: Paul E. McKenney <[email protected]>
>
> The rcu_nmi_enter_common() and rcu_nmi_exit_common() functions take an
> "irq" parameter that indicates whether these functions are invoked from
> an irq handler (irq==true) or an NMI handler (irq==false). However,
> recent changes have applied notrace to a few critical functions such
> that rcu_nmi_enter_common() and rcu_nmi_exit_common() many now rely
> on in_nmi(). Note that in_nmi() works no differently than before,
> but rather that tracing is now prohibited in code regions where in_nmi()
> would incorrectly report NMI state.
>
> Therefore remove the "irq" parameter and inlines rcu_nmi_enter_common() and
> rcu_nmi_exit_common() into rcu_nmi_enter() and rcu_nmi_exit(),
> respectively.
>
> Signed-off-by: Paul E. McKenney <[email protected]>
> Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
> Signed-off-by: Thomas Gleixner <[email protected]>
> ---
> kernel/rcu/tree.c | 47 +++++++++++++++--------------------------------
> 1 file changed, 15 insertions(+), 32 deletions(-)

I already sent a RB for the first patches for this serie, and went through the
remaining of them (20-36). I am not very familiar with some these areas so for
what it's worth:

Reviewed-by: Alexandre Chartre <[email protected]>

alex.


> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -627,16 +627,18 @@ noinstr void rcu_user_enter(void)
> }
> #endif /* CONFIG_NO_HZ_FULL */
>
> -/*
> +/**
> + * rcu_nmi_exit - inform RCU of exit from NMI context
> + *
> * If we are returning from the outermost NMI handler that interrupted an
> * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
> * to let the RCU grace-period handling know that the CPU is back to
> * being RCU-idle.
> *
> - * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
> + * If you add or remove a call to rcu_nmi_exit(), be sure to test
> * with CONFIG_RCU_EQS_DEBUG=y.
> */
> -static __always_inline void rcu_nmi_exit_common(bool irq)
> +noinstr void rcu_nmi_exit(void)
> {
> struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
>
> @@ -667,7 +669,7 @@ static __always_inline void rcu_nmi_exit
> trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
> WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
>
> - if (irq)
> + if (!in_nmi())
> rcu_prepare_for_idle();
> instr_end();
>
> @@ -675,22 +677,11 @@ static __always_inline void rcu_nmi_exit
> rcu_dynticks_eqs_enter();
> // ... but is no longer watching here.
>
> - if (irq)
> + if (!in_nmi())
> rcu_dynticks_task_enter();
> }
>
> /**
> - * rcu_nmi_exit - inform RCU of exit from NMI context
> - *
> - * If you add or remove a call to rcu_nmi_exit(), be sure to test
> - * with CONFIG_RCU_EQS_DEBUG=y.
> - */
> -void noinstr rcu_nmi_exit(void)
> -{
> - rcu_nmi_exit_common(false);
> -}
> -
> -/**
> * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
> *
> * Exit from an interrupt handler, which might possibly result in entering
> @@ -712,7 +703,7 @@ void noinstr rcu_nmi_exit(void)
> void noinstr rcu_irq_exit(void)
> {
> lockdep_assert_irqs_disabled();
> - rcu_nmi_exit_common(true);
> + rcu_nmi_exit();
> }
>
> /*
> @@ -801,7 +792,7 @@ void noinstr rcu_user_exit(void)
> #endif /* CONFIG_NO_HZ_FULL */
>
> /**
> - * rcu_nmi_enter_common - inform RCU of entry to NMI context
> + * rcu_nmi_enter - inform RCU of entry to NMI context
> * @irq: Is this call from rcu_irq_enter?
> *
> * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
> @@ -810,10 +801,10 @@ void noinstr rcu_user_exit(void)
> * long as the nesting level does not overflow an int. (You will probably
> * run out of stack space first.)
> *
> - * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
> + * If you add or remove a call to rcu_nmi_enter(), be sure to test
> * with CONFIG_RCU_EQS_DEBUG=y.
> */
> -static __always_inline void rcu_nmi_enter_common(bool irq)
> +noinstr void rcu_nmi_enter(void)
> {
> long incby = 2;
> struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
> @@ -831,18 +822,18 @@ static __always_inline void rcu_nmi_ente
> */
> if (rcu_dynticks_curr_cpu_in_eqs()) {
>
> - if (irq)
> + if (!in_nmi())
> rcu_dynticks_task_exit();
>
> // RCU is not watching here ...
> rcu_dynticks_eqs_exit();
> // ... but is watching here.
>
> - if (irq)
> + if (!in_nmi())
> rcu_cleanup_after_idle();
>
> incby = 1;
> - } else if (irq) {
> + } else if (!in_nmi()) {
> instr_begin();
> if (tick_nohz_full_cpu(rdp->cpu) &&
> rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
> @@ -877,14 +868,6 @@ static __always_inline void rcu_nmi_ente
> }
>
> /**
> - * rcu_nmi_enter - inform RCU of entry to NMI context
> - */
> -noinstr void rcu_nmi_enter(void)
> -{
> - rcu_nmi_enter_common(false);
> -}
> -
> -/**
> * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
> *
> * Enter an interrupt handler, which might possibly result in exiting
> @@ -909,7 +892,7 @@ noinstr void rcu_nmi_enter(void)
> noinstr void rcu_irq_enter(void)
> {
> lockdep_assert_irqs_disabled();
> - rcu_nmi_enter_common(true);
> + rcu_nmi_enter();
> }
>
> /*
>

Subject: [tip: core/rcu] rcu: Make RCU IRQ enter/exit functions rely on in_nmi()

The following commit has been merged into the core/rcu branch of tip:

Commit-ID: 9ea366f669ded353ae49754216c042e7d2f72ba6
Gitweb: https://git.kernel.org/tip/9ea366f669ded353ae49754216c042e7d2f72ba6
Author: Paul E. McKenney <[email protected]>
AuthorDate: Thu, 13 Feb 2020 12:31:16 -08:00
Committer: Thomas Gleixner <[email protected]>
CommitterDate: Tue, 19 May 2020 15:51:21 +02:00

rcu: Make RCU IRQ enter/exit functions rely on in_nmi()

The rcu_nmi_enter_common() and rcu_nmi_exit_common() functions take an
"irq" parameter that indicates whether these functions have been invoked from
an irq handler (irq==true) or an NMI handler (irq==false).

However, recent changes have applied notrace to a few critical functions
such that rcu_nmi_enter_common() and rcu_nmi_exit_common() many now rely on
in_nmi(). Note that in_nmi() works no differently than before, but rather
that tracing is now prohibited in code regions where in_nmi() would
incorrectly report NMI state.

Therefore remove the "irq" parameter and inline rcu_nmi_enter_common() and
rcu_nmi_exit_common() into rcu_nmi_enter() and rcu_nmi_exit(),
respectively.

Signed-off-by: Paul E. McKenney <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Alexandre Chartre <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]


---
kernel/rcu/tree.c | 47 ++++++++++++++--------------------------------
1 file changed, 15 insertions(+), 32 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0713ef3..9454016 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -664,16 +664,18 @@ noinstr void rcu_user_enter(void)
}
#endif /* CONFIG_NO_HZ_FULL */

-/*
+/**
+ * rcu_nmi_exit - inform RCU of exit from NMI context
+ *
* If we are returning from the outermost NMI handler that interrupted an
* RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle.
*
- * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
+ * If you add or remove a call to rcu_nmi_exit(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
-static __always_inline void rcu_nmi_exit_common(bool irq)
+noinstr void rcu_nmi_exit(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);

@@ -704,7 +706,7 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */

- if (irq)
+ if (!in_nmi())
rcu_prepare_for_idle();
instrumentation_end();

@@ -712,22 +714,11 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
rcu_dynticks_eqs_enter();
// ... but is no longer watching here.

- if (irq)
+ if (!in_nmi())
rcu_dynticks_task_enter();
}

/**
- * rcu_nmi_exit - inform RCU of exit from NMI context
- *
- * If you add or remove a call to rcu_nmi_exit(), be sure to test
- * with CONFIG_RCU_EQS_DEBUG=y.
- */
-void noinstr rcu_nmi_exit(void)
-{
- rcu_nmi_exit_common(false);
-}
-
-/**
* rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
*
* Exit from an interrupt handler, which might possibly result in entering
@@ -749,7 +740,7 @@ void noinstr rcu_nmi_exit(void)
void noinstr rcu_irq_exit(void)
{
lockdep_assert_irqs_disabled();
- rcu_nmi_exit_common(true);
+ rcu_nmi_exit();
}

/*
@@ -838,7 +829,7 @@ void noinstr rcu_user_exit(void)
#endif /* CONFIG_NO_HZ_FULL */

/**
- * rcu_nmi_enter_common - inform RCU of entry to NMI context
+ * rcu_nmi_enter - inform RCU of entry to NMI context
* @irq: Is this call from rcu_irq_enter?
*
* If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
@@ -847,10 +838,10 @@ void noinstr rcu_user_exit(void)
* long as the nesting level does not overflow an int. (You will probably
* run out of stack space first.)
*
- * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
+ * If you add or remove a call to rcu_nmi_enter(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
-static __always_inline void rcu_nmi_enter_common(bool irq)
+noinstr void rcu_nmi_enter(void)
{
long incby = 2;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -868,18 +859,18 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
*/
if (rcu_dynticks_curr_cpu_in_eqs()) {

- if (irq)
+ if (!in_nmi())
rcu_dynticks_task_exit();

// RCU is not watching here ...
rcu_dynticks_eqs_exit();
// ... but is watching here.

- if (irq)
+ if (!in_nmi())
rcu_cleanup_after_idle();

incby = 1;
- } else if (irq) {
+ } else if (!in_nmi()) {
instrumentation_begin();
if (tick_nohz_full_cpu(rdp->cpu) &&
rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
@@ -914,14 +905,6 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
}

/**
- * rcu_nmi_enter - inform RCU of entry to NMI context
- */
-noinstr void rcu_nmi_enter(void)
-{
- rcu_nmi_enter_common(false);
-}
-
-/**
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
*
* Enter an interrupt handler, which might possibly result in exiting
@@ -946,7 +929,7 @@ noinstr void rcu_nmi_enter(void)
noinstr void rcu_irq_enter(void)
{
lockdep_assert_irqs_disabled();
- rcu_nmi_enter_common(true);
+ rcu_nmi_enter();
}

/*