2020-05-05 14:24:36

by Thomas Gleixner

[permalink] [raw]
Subject: [patch V4 part 3 11/29] rcu: Provide rcu_irq_exit_preempt()

Interrupts and exceptions invoke rcu_irq_enter() on entry and need to
invoke rcu_irq_exit() before they either return to the interrupted code or
invoke the scheduler due to preemption.

The general assumption is that RCU idle code has to have preemption
disabled so that a return from interrupt cannot schedule. So the return
from interrupt code invokes rcu_irq_exit() and preempt_schedule_irq().

If there is any imbalance in the rcu_irq/nmi* invocations or RCU idle code
had preemption enabled then this goes unnoticed until the CPU goes idle or
some other RCU check is executed.

Provide rcu_irq_exit_preempt() which can be invoked from the
interrupt/exception return code in case that preemption is enabled. It
invokes rcu_irq_exit() and contains a few sanity checks in case that
CONFIG_PROVE_RCU is enabled to catch such issues directly.

Signed-off-by: Thomas Gleixner <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Joel Fernandes <[email protected]>
---
include/linux/rcutiny.h | 1 +
include/linux/rcutree.h | 1 +
kernel/rcu/tree.c | 21 +++++++++++++++++++++
3 files changed, 23 insertions(+)

--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -71,6 +71,7 @@ static inline void rcu_irq_enter(void) {
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
+static inline void rcu_irq_exit_preempt(void) { }
static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -46,6 +46,7 @@ void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
+void rcu_irq_exit_preempt(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);

--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -706,6 +706,27 @@ void noinstr rcu_irq_exit(void)
rcu_nmi_exit();
}

+/**
+ * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
+ * towards in kernel preemption
+ *
+ * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
+ * from RCU point of view. Invoked from return from interrupt before kernel
+ * preemption.
+ */
+void rcu_irq_exit_preempt(void)
+{
+ lockdep_assert_irqs_disabled();
+ rcu_nmi_exit();
+
+ RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
+ "RCU dynticks_nesting counter underflow/zero!");
+ RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
+ "RCU dynticks_nmi_nesting counter underflow/zero!");
+ RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
+ "RCU in extended quiescent state!");
+}
+
/*
* Wrapper for rcu_irq_exit() where interrupts are enabled.
*


2020-05-05 22:04:52

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [patch V4 part 3 11/29] rcu: Provide rcu_irq_exit_preempt()

On Tue, May 05, 2020 at 03:44:05PM +0200, Thomas Gleixner wrote:
> Interrupts and exceptions invoke rcu_irq_enter() on entry and need to
> invoke rcu_irq_exit() before they either return to the interrupted code or
> invoke the scheduler due to preemption.
>
> The general assumption is that RCU idle code has to have preemption
> disabled so that a return from interrupt cannot schedule. So the return
> from interrupt code invokes rcu_irq_exit() and preempt_schedule_irq().
>
> If there is any imbalance in the rcu_irq/nmi* invocations or RCU idle code
> had preemption enabled then this goes unnoticed until the CPU goes idle or
> some other RCU check is executed.
>
> Provide rcu_irq_exit_preempt() which can be invoked from the
> interrupt/exception return code in case that preemption is enabled. It
> invokes rcu_irq_exit() and contains a few sanity checks in case that
> CONFIG_PROVE_RCU is enabled to catch such issues directly.
>
> Signed-off-by: Thomas Gleixner <[email protected]>
> Cc: "Paul E. McKenney" <[email protected]>
> Cc: Joel Fernandes <[email protected]>

The ->dynticks_nmi_nesting field is going away at some point, but
there is always "git merge". ;-)

Reviewed-by: Paul E. McKenney <[email protected]>

> ---
> include/linux/rcutiny.h | 1 +
> include/linux/rcutree.h | 1 +
> kernel/rcu/tree.c | 21 +++++++++++++++++++++
> 3 files changed, 23 insertions(+)
>
> --- a/include/linux/rcutiny.h
> +++ b/include/linux/rcutiny.h
> @@ -71,6 +71,7 @@ static inline void rcu_irq_enter(void) {
> static inline void rcu_irq_exit_irqson(void) { }
> static inline void rcu_irq_enter_irqson(void) { }
> static inline void rcu_irq_exit(void) { }
> +static inline void rcu_irq_exit_preempt(void) { }
> static inline void exit_rcu(void) { }
> static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
> {
> --- a/include/linux/rcutree.h
> +++ b/include/linux/rcutree.h
> @@ -46,6 +46,7 @@ void rcu_idle_enter(void);
> void rcu_idle_exit(void);
> void rcu_irq_enter(void);
> void rcu_irq_exit(void);
> +void rcu_irq_exit_preempt(void);
> void rcu_irq_enter_irqson(void);
> void rcu_irq_exit_irqson(void);
>
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -706,6 +706,27 @@ void noinstr rcu_irq_exit(void)
> rcu_nmi_exit();
> }
>
> +/**
> + * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
> + * towards in kernel preemption
> + *
> + * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
> + * from RCU point of view. Invoked from return from interrupt before kernel
> + * preemption.
> + */
> +void rcu_irq_exit_preempt(void)
> +{
> + lockdep_assert_irqs_disabled();
> + rcu_nmi_exit();
> +
> + RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
> + "RCU dynticks_nesting counter underflow/zero!");
> + RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
> + "RCU dynticks_nmi_nesting counter underflow/zero!");
> + RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
> + "RCU in extended quiescent state!");
> +}
> +
> /*
> * Wrapper for rcu_irq_exit() where interrupts are enabled.
> *
>

2020-05-05 22:07:45

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [patch V4 part 3 11/29] rcu: Provide rcu_irq_exit_preempt()

"Paul E. McKenney" <[email protected]> writes:

> On Tue, May 05, 2020 at 03:44:05PM +0200, Thomas Gleixner wrote:
>> Interrupts and exceptions invoke rcu_irq_enter() on entry and need to
>> invoke rcu_irq_exit() before they either return to the interrupted code or
>> invoke the scheduler due to preemption.
>>
>> The general assumption is that RCU idle code has to have preemption
>> disabled so that a return from interrupt cannot schedule. So the return
>> from interrupt code invokes rcu_irq_exit() and preempt_schedule_irq().
>>
>> If there is any imbalance in the rcu_irq/nmi* invocations or RCU idle code
>> had preemption enabled then this goes unnoticed until the CPU goes idle or
>> some other RCU check is executed.
>>
>> Provide rcu_irq_exit_preempt() which can be invoked from the
>> interrupt/exception return code in case that preemption is enabled. It
>> invokes rcu_irq_exit() and contains a few sanity checks in case that
>> CONFIG_PROVE_RCU is enabled to catch such issues directly.
>>
>> Signed-off-by: Thomas Gleixner <[email protected]>
>> Cc: "Paul E. McKenney" <[email protected]>
>> Cc: Joel Fernandes <[email protected]>
>
> The ->dynticks_nmi_nesting field is going away at some point, but
> there is always "git merge". ;-)

Yes. The logistics for merging all of this is going to be interesting :)

2020-05-05 22:26:47

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [patch V4 part 3 11/29] rcu: Provide rcu_irq_exit_preempt()

On Wed, May 06, 2020 at 12:05:04AM +0200, Thomas Gleixner wrote:
> "Paul E. McKenney" <[email protected]> writes:
>
> > On Tue, May 05, 2020 at 03:44:05PM +0200, Thomas Gleixner wrote:
> >> Interrupts and exceptions invoke rcu_irq_enter() on entry and need to
> >> invoke rcu_irq_exit() before they either return to the interrupted code or
> >> invoke the scheduler due to preemption.
> >>
> >> The general assumption is that RCU idle code has to have preemption
> >> disabled so that a return from interrupt cannot schedule. So the return
> >> from interrupt code invokes rcu_irq_exit() and preempt_schedule_irq().
> >>
> >> If there is any imbalance in the rcu_irq/nmi* invocations or RCU idle code
> >> had preemption enabled then this goes unnoticed until the CPU goes idle or
> >> some other RCU check is executed.
> >>
> >> Provide rcu_irq_exit_preempt() which can be invoked from the
> >> interrupt/exception return code in case that preemption is enabled. It
> >> invokes rcu_irq_exit() and contains a few sanity checks in case that
> >> CONFIG_PROVE_RCU is enabled to catch such issues directly.
> >>
> >> Signed-off-by: Thomas Gleixner <[email protected]>
> >> Cc: "Paul E. McKenney" <[email protected]>
> >> Cc: Joel Fernandes <[email protected]>
> >
> > The ->dynticks_nmi_nesting field is going away at some point, but
> > there is always "git merge". ;-)
>
> Yes. The logistics for merging all of this is going to be interesting :)

;-) ;-) ;-)

Thanx, Paul

2020-05-14 01:06:08

by Mathieu Desnoyers

[permalink] [raw]
Subject: Re: [patch V4 part 3 11/29] rcu: Provide rcu_irq_exit_preempt()

----- On May 5, 2020, at 9:44 AM, Thomas Gleixner [email protected] wrote:
[...]
>
> +/**
> + * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
> + * towards in kernel preemption

Not sure what "towards in" means.

Thanks,

Mathieu

--
Mathieu Desnoyers
EfficiOS Inc.
http://www.efficios.com

2020-05-14 02:43:05

by Joel Fernandes

[permalink] [raw]
Subject: Re: [patch V4 part 3 11/29] rcu: Provide rcu_irq_exit_preempt()

Hi Thomas,

On Tue, May 05, 2020 at 03:44:05PM +0200, Thomas Gleixner wrote:

Thank you for CC'ing me.

> Interrupts and exceptions invoke rcu_irq_enter() on entry and need to
> invoke rcu_irq_exit() before they either return to the interrupted code or
> invoke the scheduler due to preemption.
>
> The general assumption is that RCU idle code has to have preemption
> disabled so that a return from interrupt cannot schedule. So the return
> from interrupt code invokes rcu_irq_exit() and preempt_schedule_irq().
>
> If there is any imbalance in the rcu_irq/nmi* invocations or RCU idle code
> had preemption enabled then this goes unnoticed until the CPU goes idle or
> some other RCU check is executed.
>
> Provide rcu_irq_exit_preempt() which can be invoked from the
> interrupt/exception return code in case that preemption is enabled. It
> invokes rcu_irq_exit() and contains a few sanity checks in case that
> CONFIG_PROVE_RCU is enabled to catch such issues directly.

Could you let me know which patch or part in the multi-part series is using it?

>
> Signed-off-by: Thomas Gleixner <[email protected]>
> Cc: "Paul E. McKenney" <[email protected]>
> Cc: Joel Fernandes <[email protected]>
> ---
> include/linux/rcutiny.h | 1 +
> include/linux/rcutree.h | 1 +
> kernel/rcu/tree.c | 21 +++++++++++++++++++++
> 3 files changed, 23 insertions(+)
>
> --- a/include/linux/rcutiny.h
> +++ b/include/linux/rcutiny.h
> @@ -71,6 +71,7 @@ static inline void rcu_irq_enter(void) {
> static inline void rcu_irq_exit_irqson(void) { }
> static inline void rcu_irq_enter_irqson(void) { }
> static inline void rcu_irq_exit(void) { }
> +static inline void rcu_irq_exit_preempt(void) { }
> static inline void exit_rcu(void) { }
> static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
> {
> --- a/include/linux/rcutree.h
> +++ b/include/linux/rcutree.h
> @@ -46,6 +46,7 @@ void rcu_idle_enter(void);
> void rcu_idle_exit(void);
> void rcu_irq_enter(void);
> void rcu_irq_exit(void);
> +void rcu_irq_exit_preempt(void);
> void rcu_irq_enter_irqson(void);
> void rcu_irq_exit_irqson(void);
>
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -706,6 +706,27 @@ void noinstr rcu_irq_exit(void)
> rcu_nmi_exit();
> }
>
> +/**
> + * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
> + * towards in kernel preemption
> + *
> + * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
> + * from RCU point of view. Invoked from return from interrupt before kernel
> + * preemption.
> + */
> +void rcu_irq_exit_preempt(void)
> +{
> + lockdep_assert_irqs_disabled();
> + rcu_nmi_exit();
> +
> + RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
> + "RCU dynticks_nesting counter underflow/zero!");

Makes sense.

> + RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
> + "RCU dynticks_nmi_nesting counter underflow/zero!");

This new function will be called only from the outer-most IRQ that
interrupted kernel mode (process context). Right? If so, a better (more
specific) check for the second RCU_LOCKDEP_WARN above is:

RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != DYNTICK_IRQ_NONIDLE,
"Bad RCU dynticks_nmi_nesting counter\n");

That will make sure, it is only called from outer-most rcu_irq_exit() and
interrupting kernel mode.

Or, if [1] is merged, then we could just combine the checks into one check.
RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) != 1,
"Bad RCU dynticks_nmi_nesting counter\n");

> + RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
> + "RCU in extended quiescent state!");

Makes sense.

BTW, I wonder if a better place to do this "don't enter scheduler while RCU
is not watching" is rcu_note_context_switch()...

thanks,

- Joel
[1] https://git.kernel.org/pub/scm/linux/kernel/git/jfern/linux.git/commit/?h=rcu-dynticks-may4-rebased&id=b48863c234295d8ec956b50f6cf5ae0a0269f48d

> +}
> +
> /*
> * Wrapper for rcu_irq_exit() where interrupts are enabled.
> *
>

2020-05-14 02:51:03

by Joel Fernandes

[permalink] [raw]
Subject: Re: [patch V4 part 3 11/29] rcu: Provide rcu_irq_exit_preempt()

On Wed, May 13, 2020 at 10:41:16PM -0400, Joel Fernandes wrote:
> Hi Thomas,
>
> On Tue, May 05, 2020 at 03:44:05PM +0200, Thomas Gleixner wrote:
>
> Thank you for CC'ing me.
>
> > Interrupts and exceptions invoke rcu_irq_enter() on entry and need to
> > invoke rcu_irq_exit() before they either return to the interrupted code or
> > invoke the scheduler due to preemption.
> >
> > The general assumption is that RCU idle code has to have preemption
> > disabled so that a return from interrupt cannot schedule. So the return
> > from interrupt code invokes rcu_irq_exit() and preempt_schedule_irq().
> >
> > If there is any imbalance in the rcu_irq/nmi* invocations or RCU idle code
> > had preemption enabled then this goes unnoticed until the CPU goes idle or
> > some other RCU check is executed.
> >
> > Provide rcu_irq_exit_preempt() which can be invoked from the
> > interrupt/exception return code in case that preemption is enabled. It
> > invokes rcu_irq_exit() and contains a few sanity checks in case that
> > CONFIG_PROVE_RCU is enabled to catch such issues directly.
>
> Could you let me know which patch or part in the multi-part series is using it?

Ah I see its "x86/entry/common: Provide idtentry_enter/exit()" patch. I'll go
read that tomorrow. Thanks!

- Joel

2020-05-14 14:46:19

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [patch V4 part 3 11/29] rcu: Provide rcu_irq_exit_preempt()

Joel,

Joel Fernandes <[email protected]> writes:
> On Tue, May 05, 2020 at 03:44:05PM +0200, Thomas Gleixner wrote:
> Could you let me know which patch or part in the multi-part series is
> using it?

You found it :)
>> +void rcu_irq_exit_preempt(void)
>> +{
>> + lockdep_assert_irqs_disabled();
>> + rcu_nmi_exit();
>> +
>> + RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
>> + "RCU dynticks_nesting counter underflow/zero!");
>
> Makes sense.
>
>> + RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
>> + "RCU dynticks_nmi_nesting counter underflow/zero!");
>
> This new function will be called only from the outer-most IRQ that
> interrupted kernel mode (process context). Right? If so, a better (more
> specific) check for the second RCU_LOCKDEP_WARN above is:
>
> RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != DYNTICK_IRQ_NONIDLE,
> "Bad RCU dynticks_nmi_nesting counter\n");
>
> That will make sure, it is only called from outer-most rcu_irq_exit() and
> interrupting kernel mode.

Makes sense.

> Or, if [1] is merged, then we could just combine the checks into one check.
> RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) != 1,
> "Bad RCU dynticks_nmi_nesting counter\n");
>
>> + RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
>> + "RCU in extended quiescent state!");
>
> Makes sense.
>
> BTW, I wonder if a better place to do this "don't enter scheduler while RCU
> is not watching" is rcu_note_context_switch()...

I actually want to catch even the case where we don't schedule, i.e.

if (ret_to_kernel) {
if (interrupts_on_after_return((regs)) {
if (IS_ENABLED(CONFIG_PREEMPTION)) {
if (!preempt_count()) {
/* Preemption is possible ... */
rcu_irq_exit_preempt();
if (need_resched())
schedule_preempt_irq();

that catches any exit where preemption is possible and RCU is not
watching after rcu_irq_exit().

It does not matter whether need-resched is set here or not. Any
interrupt/exception could set it.

Yes, I'm paranoid :)

Thanks,

tglx

2020-05-15 19:03:04

by Joel Fernandes

[permalink] [raw]
Subject: Re: [patch V4 part 3 11/29] rcu: Provide rcu_irq_exit_preempt()

On Thu, May 14, 2020 at 04:43:31PM +0200, Thomas Gleixner wrote:
[...]
> > Or, if [1] is merged, then we could just combine the checks into one check.
> > RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) != 1,
> > "Bad RCU dynticks_nmi_nesting counter\n");
> >
> >> + RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
> >> + "RCU in extended quiescent state!");
> >
> > Makes sense.
> >
> > BTW, I wonder if a better place to do this "don't enter scheduler while RCU
> > is not watching" is rcu_note_context_switch()...
>
> I actually want to catch even the case where we don't schedule, i.e.
>
> if (ret_to_kernel) {
> if (interrupts_on_after_return((regs)) {
> if (IS_ENABLED(CONFIG_PREEMPTION)) {
> if (!preempt_count()) {
> /* Preemption is possible ... */
> rcu_irq_exit_preempt();
> if (need_resched())
> schedule_preempt_irq();
>
> that catches any exit where preemption is possible and RCU is not
> watching after rcu_irq_exit().
>
> It does not matter whether need-resched is set here or not. Any
> interrupt/exception could set it.

Yes, your way of doing it is better. Let us do it this way then.

Thanks!

- Joel

Subject: [tip: core/rcu] rcu: Provide rcu_irq_exit_preempt()

The following commit has been merged into the core/rcu branch of tip:

Commit-ID: 8ae0ae6737ad449c8ae21e2bb01d9736f360a933
Gitweb: https://git.kernel.org/tip/8ae0ae6737ad449c8ae21e2bb01d9736f360a933
Author: Thomas Gleixner <[email protected]>
AuthorDate: Sun, 03 May 2020 15:08:52 +02:00
Committer: Thomas Gleixner <[email protected]>
CommitterDate: Tue, 19 May 2020 15:51:21 +02:00

rcu: Provide rcu_irq_exit_preempt()

Interrupts and exceptions invoke rcu_irq_enter() on entry and need to
invoke rcu_irq_exit() before they either return to the interrupted code or
invoke the scheduler due to preemption.

The general assumption is that RCU idle code has to have preemption
disabled so that a return from interrupt cannot schedule. So the return
from interrupt code invokes rcu_irq_exit() and preempt_schedule_irq().

If there is any imbalance in the rcu_irq/nmi* invocations or RCU idle code
had preemption enabled then this goes unnoticed until the CPU goes idle or
some other RCU check is executed.

Provide rcu_irq_exit_preempt() which can be invoked from the
interrupt/exception return code in case that preemption is enabled. It
invokes rcu_irq_exit() and contains a few sanity checks in case that
CONFIG_PROVE_RCU is enabled to catch such issues directly.

Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Paul E. McKenney <[email protected]>
Reviewed-by: Alexandre Chartre <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]


---
include/linux/rcutiny.h | 1 +
include/linux/rcutree.h | 1 +
kernel/rcu/tree.c | 22 ++++++++++++++++++++++
3 files changed, 24 insertions(+)

diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 3465ba7..980eb78 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -71,6 +71,7 @@ static inline void rcu_irq_enter(void) { }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
+static inline void rcu_irq_exit_preempt(void) { }
static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index fbc2627..02016e0 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -47,6 +47,7 @@ void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
+void rcu_irq_exit_preempt(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9454016..62ee012 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -743,6 +743,28 @@ void noinstr rcu_irq_exit(void)
rcu_nmi_exit();
}

+/**
+ * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
+ * towards in kernel preemption
+ *
+ * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
+ * from RCU point of view. Invoked from return from interrupt before kernel
+ * preemption.
+ */
+void rcu_irq_exit_preempt(void)
+{
+ lockdep_assert_irqs_disabled();
+ rcu_nmi_exit();
+
+ RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
+ "RCU dynticks_nesting counter underflow/zero!");
+ RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
+ DYNTICK_IRQ_NONIDLE,
+ "Bad RCU dynticks_nmi_nesting counter\n");
+ RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
+ "RCU in extended quiescent state!");
+}
+
/*
* Wrapper for rcu_irq_exit() where interrupts are enabled.
*