When an IRQ is taken, some accounting needs to be performed to enter and
exit IRQ context around the IRQ handler. Historically arch code would
leave this to the irqchip or core IRQ code, but these days we want this
to happen in exception entry code, and architectures such as arm64 do
this.
Currently handle_domain_irq() performs this entry/exit accounting, and
if used on an architecture where the entry code also does this, the
entry/exit accounting will be performed twice per IRQ. This is
problematic as core RCU code such as rcu_is_cpu_rrupt_from_idle()
depends on this happening once per IRQ, and will not detect quescent
periods correctly, leading to stall warnings.
As irqchip drivers which use handle_domain_irq() need to work on
architectures with or without their own entry/exit accounting, this
patch makes handle_domain_irq() conditionally perform the entry
accounting depending on a new HAVE_ARCH_IRQENTRY Kconfig symbol that
architectures can select if they perform this entry accounting
themselves.
V2 -> V3:
Drop other patches and concentrate on the purpose of [3-4/5] of V2.
And lift the level, where to add {irq_enter,exit}_rcu(), from the
interrupt controler to exception entry
History:
V1: https://lore.kernel.org/linux-arm-kernel/[email protected]
V2: https://lore.kernel.org/linux-arm-kernel/[email protected]
Cc: "Paul E. McKenney" <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Marc Zyngier <[email protected]>
Cc: Joey Gouly <[email protected]>
Cc: Sami Tolvanen <[email protected]>
Cc: Julien Thierry <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Yuichi Ito <[email protected]>
Cc: [email protected]
To: [email protected]
Mark Rutland (1):
arm64: entry: refactor EL1 interrupt entry logic
Pingfan Liu (2):
kernel/irq: make irq_{enter,exit}() in handle_domain_irq() arch
optional
arm64/entry-common: supplement irq accounting
arch/arm64/Kconfig | 1 +
arch/arm64/kernel/entry-common.c | 48 +++++++++++++++++---------------
kernel/irq/Kconfig | 3 ++
kernel/irq/irqdesc.c | 4 +++
4 files changed, 34 insertions(+), 22 deletions(-)
--
2.31.1
From: Mark Rutland <[email protected]>
Currently we distinguish IRQ and definitely-PNMI at entry/exit time
via the enter_el1_irq_or_nmi() and enter_el1_irq_or_nmi() helpers. In
subsequent patches we'll need to handle the two cases more distinctly
in the body of the exception handler.
To make this possible, this patch refactors el1_interrupt to be a
top-level dispatcher to separate handlers for the IRQ and PNMI cases,
removing the need for the enter_el1_irq_or_nmi() and
exit_el1_irq_or_nmi() helpers.
Note that since arm64_enter_nmi() calls __nmi_enter(), which
increments the preemt_count, we could never preempt when handling a
PNMI. We now only check for preemption in the IRQ case, which makes
this clearer.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Marc Zyngier <[email protected]>
Cc: Joey Gouly <[email protected]>
Cc: Sami Tolvanen <[email protected]>
Cc: Julien Thierry <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Yuichi Ito <[email protected]>
Cc: Pingfan Liu <[email protected]>
Cc: [email protected]
To: [email protected]
---
arch/arm64/kernel/entry-common.c | 44 ++++++++++++++++----------------
1 file changed, 22 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 32f9796c4ffe..5f1473319fb0 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -219,22 +219,6 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
lockdep_hardirqs_on(CALLER_ADDR0);
}
-static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
-{
- if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
- arm64_enter_nmi(regs);
- else
- enter_from_kernel_mode(regs);
-}
-
-static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
-{
- if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
- arm64_exit_nmi(regs);
- else
- exit_to_kernel_mode(regs);
-}
-
static void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
@@ -432,14 +416,19 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
}
}
-static void noinstr el1_interrupt(struct pt_regs *regs,
- void (*handler)(struct pt_regs *))
+static __always_inline void
+__el1_pnmi(struct pt_regs *regs, void (*handler)(struct pt_regs *))
{
- write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
-
- enter_el1_irq_or_nmi(regs);
+ arm64_enter_nmi(regs);
do_interrupt_handler(regs, handler);
+ arm64_exit_nmi(regs);
+}
+static __always_inline void
+__el1_interrupt(struct pt_regs *regs, void (*handler)(struct pt_regs *))
+{
+ enter_from_kernel_mode(regs);
+ do_interrupt_handler(regs, handler);
/*
* Note: thread_info::preempt_count includes both thread_info::count
* and thread_info::need_resched, and is not equivalent to
@@ -448,8 +437,19 @@ static void noinstr el1_interrupt(struct pt_regs *regs,
if (IS_ENABLED(CONFIG_PREEMPTION) &&
READ_ONCE(current_thread_info()->preempt_count) == 0)
arm64_preempt_schedule_irq();
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ __el1_pnmi(regs, handler);
+ else
+ __el1_interrupt(regs, handler);
- exit_el1_irq_or_nmi(regs);
}
asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
--
2.31.1
On Thu, 30 Sep 2021 14:17:07 +0100,
Pingfan Liu <[email protected]> wrote:
>
> From: Mark Rutland <[email protected]>
>
> Currently we distinguish IRQ and definitely-PNMI at entry/exit time
> via the enter_el1_irq_or_nmi() and enter_el1_irq_or_nmi() helpers. In
> subsequent patches we'll need to handle the two cases more distinctly
> in the body of the exception handler.
>
> To make this possible, this patch refactors el1_interrupt to be a
> top-level dispatcher to separate handlers for the IRQ and PNMI cases,
> removing the need for the enter_el1_irq_or_nmi() and
> exit_el1_irq_or_nmi() helpers.
>
> Note that since arm64_enter_nmi() calls __nmi_enter(), which
> increments the preemt_count, we could never preempt when handling a
> PNMI. We now only check for preemption in the IRQ case, which makes
> this clearer.
>
> There should be no functional change as a result of this patch.
>
> Signed-off-by: Mark Rutland <[email protected]>
> Cc: "Paul E. McKenney" <[email protected]>
> Cc: Catalin Marinas <[email protected]>
> Cc: Will Deacon <[email protected]>
> Cc: Marc Zyngier <[email protected]>
> Cc: Joey Gouly <[email protected]>
> Cc: Sami Tolvanen <[email protected]>
> Cc: Julien Thierry <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Yuichi Ito <[email protected]>
> Cc: Pingfan Liu <[email protected]>
> Cc: [email protected]
> To: [email protected]
> ---
> arch/arm64/kernel/entry-common.c | 44 ++++++++++++++++----------------
> 1 file changed, 22 insertions(+), 22 deletions(-)
>
> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> index 32f9796c4ffe..5f1473319fb0 100644
> --- a/arch/arm64/kernel/entry-common.c
> +++ b/arch/arm64/kernel/entry-common.c
> @@ -219,22 +219,6 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
> lockdep_hardirqs_on(CALLER_ADDR0);
> }
>
> -static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
> -{
> - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
> - arm64_enter_nmi(regs);
> - else
> - enter_from_kernel_mode(regs);
> -}
> -
> -static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
> -{
> - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
> - arm64_exit_nmi(regs);
> - else
> - exit_to_kernel_mode(regs);
> -}
> -
> static void __sched arm64_preempt_schedule_irq(void)
> {
> lockdep_assert_irqs_disabled();
> @@ -432,14 +416,19 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
> }
> }
>
> -static void noinstr el1_interrupt(struct pt_regs *regs,
> - void (*handler)(struct pt_regs *))
> +static __always_inline void
> +__el1_pnmi(struct pt_regs *regs, void (*handler)(struct pt_regs *))
> {
> - write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
> -
> - enter_el1_irq_or_nmi(regs);
> + arm64_enter_nmi(regs);
> do_interrupt_handler(regs, handler);
> + arm64_exit_nmi(regs);
> +}
>
> +static __always_inline void
> +__el1_interrupt(struct pt_regs *regs, void (*handler)(struct pt_regs *))
> +{
> + enter_from_kernel_mode(regs);
> + do_interrupt_handler(regs, handler);
> /*
> * Note: thread_info::preempt_count includes both thread_info::count
> * and thread_info::need_resched, and is not equivalent to
> @@ -448,8 +437,19 @@ static void noinstr el1_interrupt(struct pt_regs *regs,
> if (IS_ENABLED(CONFIG_PREEMPTION) &&
> READ_ONCE(current_thread_info()->preempt_count) == 0)
> arm64_preempt_schedule_irq();
> + exit_to_kernel_mode(regs);
> +}
> +
> +static void noinstr el1_interrupt(struct pt_regs *regs,
> + void (*handler)(struct pt_regs *))
> +{
> + write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
> +
> + if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
> + __el1_pnmi(regs, handler);
> + else
> + __el1_interrupt(regs, handler);
>
> - exit_el1_irq_or_nmi(regs);
nit: spurious blank line.
> }
>
> asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
With Mark's remark addressed,
Reviewed-by: Marc Zyngier <[email protected]>
M.
--
Without deviation from the norm, progress is not possible.
On Fri, Oct 01, 2021 at 10:21:06AM +0100, Marc Zyngier wrote:
> On Thu, 30 Sep 2021 14:17:07 +0100,
> Pingfan Liu <[email protected]> wrote:
> >
> > From: Mark Rutland <[email protected]>
> >
> > Currently we distinguish IRQ and definitely-PNMI at entry/exit time
> > via the enter_el1_irq_or_nmi() and enter_el1_irq_or_nmi() helpers. In
> > subsequent patches we'll need to handle the two cases more distinctly
> > in the body of the exception handler.
> >
> > To make this possible, this patch refactors el1_interrupt to be a
> > top-level dispatcher to separate handlers for the IRQ and PNMI cases,
> > removing the need for the enter_el1_irq_or_nmi() and
> > exit_el1_irq_or_nmi() helpers.
> >
> > Note that since arm64_enter_nmi() calls __nmi_enter(), which
> > increments the preemt_count, we could never preempt when handling a
> > PNMI. We now only check for preemption in the IRQ case, which makes
> > this clearer.
> >
> > There should be no functional change as a result of this patch.
> >
> > Signed-off-by: Mark Rutland <[email protected]>
> > Cc: "Paul E. McKenney" <[email protected]>
> > Cc: Catalin Marinas <[email protected]>
> > Cc: Will Deacon <[email protected]>
> > Cc: Marc Zyngier <[email protected]>
> > Cc: Joey Gouly <[email protected]>
> > Cc: Sami Tolvanen <[email protected]>
> > Cc: Julien Thierry <[email protected]>
> > Cc: Thomas Gleixner <[email protected]>
> > Cc: Yuichi Ito <[email protected]>
> > Cc: Pingfan Liu <[email protected]>
> > Cc: [email protected]
> > To: [email protected]
> > ---
> > arch/arm64/kernel/entry-common.c | 44 ++++++++++++++++----------------
> > 1 file changed, 22 insertions(+), 22 deletions(-)
> >
> > diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> > index 32f9796c4ffe..5f1473319fb0 100644
> > --- a/arch/arm64/kernel/entry-common.c
> > +++ b/arch/arm64/kernel/entry-common.c
> > @@ -219,22 +219,6 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
> > lockdep_hardirqs_on(CALLER_ADDR0);
> > }
> >
> > -static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
> > -{
> > - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
> > - arm64_enter_nmi(regs);
> > - else
> > - enter_from_kernel_mode(regs);
> > -}
> > -
> > -static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
> > -{
> > - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
> > - arm64_exit_nmi(regs);
> > - else
> > - exit_to_kernel_mode(regs);
> > -}
> > -
> > static void __sched arm64_preempt_schedule_irq(void)
> > {
> > lockdep_assert_irqs_disabled();
> > @@ -432,14 +416,19 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
> > }
> > }
> >
> > -static void noinstr el1_interrupt(struct pt_regs *regs,
> > - void (*handler)(struct pt_regs *))
> > +static __always_inline void
> > +__el1_pnmi(struct pt_regs *regs, void (*handler)(struct pt_regs *))
> > {
> > - write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
> > -
> > - enter_el1_irq_or_nmi(regs);
> > + arm64_enter_nmi(regs);
> > do_interrupt_handler(regs, handler);
> > + arm64_exit_nmi(regs);
> > +}
> >
> > +static __always_inline void
> > +__el1_interrupt(struct pt_regs *regs, void (*handler)(struct pt_regs *))
> > +{
> > + enter_from_kernel_mode(regs);
> > + do_interrupt_handler(regs, handler);
> > /*
> > * Note: thread_info::preempt_count includes both thread_info::count
> > * and thread_info::need_resched, and is not equivalent to
> > @@ -448,8 +437,19 @@ static void noinstr el1_interrupt(struct pt_regs *regs,
> > if (IS_ENABLED(CONFIG_PREEMPTION) &&
> > READ_ONCE(current_thread_info()->preempt_count) == 0)
> > arm64_preempt_schedule_irq();
> > + exit_to_kernel_mode(regs);
> > +}
> > +
> > +static void noinstr el1_interrupt(struct pt_regs *regs,
> > + void (*handler)(struct pt_regs *))
> > +{
> > + write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
> > +
> > + if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
> > + __el1_pnmi(regs, handler);
> > + else
> > + __el1_interrupt(regs, handler);
> >
> > - exit_el1_irq_or_nmi(regs);
>
> nit: spurious blank line.
>
OK, I will fix it.
> > }
> >
> > asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
>
> With Mark's remark addressed,
>
> Reviewed-by: Marc Zyngier <[email protected]>
>
Thanks,
Pingfan