From: afzal mohammed <[email protected]>
commit b75ca5217743e4d7076cf65e044e88389e44318d upstream.
request_irq() is preferred over setup_irq(). Invocations of setup_irq()
occur after memory allocators are ready.
Per tglx[1], setup_irq() existed in olden days when allocators were not
ready by the time early interrupts were initialized.
Hence replace setup_irq() by request_irq().
[1] https://lkml.kernel.org/r/alpine.DEB.2.20.1710191609480.1971@nanos
Cc: Daniel Lezcano <[email protected]>
Cc: Keerthy <[email protected]>
Cc: Tero Kristo <[email protected]>
Signed-off-by: afzal mohammed <[email protected]>
Signed-off-by: Tony Lindgren <[email protected]>
---
arch/arm/mach-omap1/pm.c | 13 ++++++-------
arch/arm/mach-omap1/time.c | 10 +++-------
arch/arm/mach-omap1/timer32k.c | 10 +++-------
arch/arm/mach-omap2/timer.c | 11 +++--------
4 files changed, 15 insertions(+), 29 deletions(-)
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -610,11 +610,6 @@ static irqreturn_t omap_wakeup_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
-static struct irqaction omap_wakeup_irq = {
- .name = "peripheral wakeup",
- .handler = omap_wakeup_interrupt
-};
-
static const struct platform_suspend_ops omap_pm_ops = {
@@ -627,6 +622,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
static int __init omap_pm_init(void)
{
int error = 0;
+ int irq;
if (!cpu_class_is_omap1())
return -ENODEV;
@@ -670,9 +666,12 @@ static int __init omap_pm_init(void)
arm_pm_idle = omap1_pm_idle;
if (cpu_is_omap7xx())
- setup_irq(INT_7XX_WAKE_UP_REQ, &omap_wakeup_irq);
+ irq = INT_7XX_WAKE_UP_REQ;
else if (cpu_is_omap16xx())
- setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
+ irq = INT_1610_WAKE_UP_REQ;
+ if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
+ NULL))
+ pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
/* Program new power ramp-up time
* (0 for most boards since we don't lower voltage when in deep sleep)
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -155,15 +155,11 @@ static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction omap_mpu_timer1_irq = {
- .name = "mpu_timer1",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = omap_mpu_timer1_interrupt,
-};
-
static __init void omap_init_mpu_timer(unsigned long rate)
{
- setup_irq(INT_TIMER1, &omap_mpu_timer1_irq);
+ if (request_irq(INT_TIMER1, omap_mpu_timer1_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "mpu_timer1", NULL))
+ pr_err("Failed to request irq %d (mpu_timer1)\n", INT_TIMER1);
omap_mpu_timer_start(0, (rate / HZ) - 1, 1);
clockevent_mpu_timer1.cpumask = cpumask_of(0);
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -148,15 +148,11 @@ static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction omap_32k_timer_irq = {
- .name = "32KHz timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = omap_32k_timer_interrupt,
-};
-
static __init void omap_init_32k_timer(void)
{
- setup_irq(INT_OS_TIMER, &omap_32k_timer_irq);
+ if (request_irq(INT_OS_TIMER, omap_32k_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "32KHz timer", NULL))
+ pr_err("Failed to request irq %d(32KHz timer)\n", INT_OS_TIMER);
clockevent_32k_timer.cpumask = cpumask_of(0);
clockevents_config_and_register(&clockevent_32k_timer,
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -92,12 +92,6 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction omap2_gp_timer_irq = {
- .name = "gp_timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = omap2_gp_timer_interrupt,
-};
-
static int omap2_gp_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
@@ -383,8 +377,9 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
&clockevent_gpt.name, OMAP_TIMER_POSTED);
BUG_ON(res);
- omap2_gp_timer_irq.dev_id = &clkev;
- setup_irq(clkev.irq, &omap2_gp_timer_irq);
+ if (request_irq(clkev.irq, omap2_gp_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "gp_timer", &clkev))
+ pr_err("Failed to request irq %d (gp_timer)\n", clkev.irq);
__omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW);
--
2.32.0
commit 25de4ce5ed02994aea8bc111d133308f6fd62566 upstream.
There is a timer wrap issue on dra7 for the ARM architected timer.
In a typical clock configuration the timer fails to wrap after 388 days.
To work around the issue, we need to use timer-ti-dm percpu timers instead.
Let's configure dmtimer3 and 4 as percpu timers by default, and warn about
the issue if the dtb is not configured properly.
For more information, please see the errata for "AM572x Sitara Processors
Silicon Revisions 1.1, 2.0":
https://www.ti.com/lit/er/sprz429m/sprz429m.pdf
The concept is based on earlier reference patches done by Tero Kristo and
Keerthy.
Cc: Daniel Lezcano <[email protected]>
Cc: Keerthy <[email protected]>
Cc: Tero Kristo <[email protected]>
[[email protected]: backported to 4.19.y]
Signed-off-by: Tony Lindgren <[email protected]>
---
arch/arm/boot/dts/dra7.dtsi | 11 ++++++
arch/arm/mach-omap2/board-generic.c | 4 +--
arch/arm/mach-omap2/timer.c | 53 ++++++++++++++++++++++++++++-
drivers/clk/ti/clk-7xx.c | 1 +
include/linux/cpuhotplug.h | 1 +
5 files changed, 67 insertions(+), 3 deletions(-)
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -48,6 +48,7 @@
timer {
compatible = "arm,armv7-timer";
+ status = "disabled"; /* See ARM architected timer wrap erratum i940 */
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
@@ -910,6 +911,8 @@
reg = <0x48032000 0x80>;
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "timer2";
+ clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_TIMER2_CLKCTRL 24>;
};
timer3: timer@48034000 {
@@ -917,6 +920,10 @@
reg = <0x48034000 0x80>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "timer3";
+ clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_TIMER3_CLKCTRL 24>;
+ assigned-clocks = <&l4per_clkctrl DRA7_TIMER3_CLKCTRL 24>;
+ assigned-clock-parents = <&timer_sys_clk_div>;
};
timer4: timer@48036000 {
@@ -924,6 +931,10 @@
reg = <0x48036000 0x80>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "timer4";
+ clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_TIMER4_CLKCTRL 24>;
+ assigned-clocks = <&l4per_clkctrl DRA7_TIMER4_CLKCTRL 24>;
+ assigned-clock-parents = <&timer_sys_clk_div>;
};
timer5: timer@48820000 {
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -330,7 +330,7 @@ DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)")
.init_late = dra7xx_init_late,
.init_irq = omap_gic_of_init,
.init_machine = omap_generic_init,
- .init_time = omap5_realtime_timer_init,
+ .init_time = omap3_gptimer_timer_init,
.dt_compat = dra74x_boards_compat,
.restart = omap44xx_restart,
MACHINE_END
@@ -353,7 +353,7 @@ DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)")
.init_late = dra7xx_init_late,
.init_irq = omap_gic_of_init,
.init_machine = omap_generic_init,
- .init_time = omap5_realtime_timer_init,
+ .init_time = omap3_gptimer_timer_init,
.dt_compat = dra72x_boards_compat,
.restart = omap44xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -42,6 +42,7 @@
#include <linux/platform_device.h>
#include <linux/platform_data/dmtimer-omap.h>
#include <linux/sched_clock.h>
+#include <linux/cpu.h>
#include <asm/mach/time.h>
#include <asm/smp_twd.h>
@@ -421,6 +422,53 @@ static void __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
timer->rate);
}
+static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
+
+static int omap_gptimer_starting_cpu(unsigned int cpu)
+{
+ struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
+ struct clock_event_device *dev = &clkevt->dev;
+ struct omap_dm_timer *timer = &clkevt->timer;
+
+ clockevents_config_and_register(dev, timer->rate, 3, ULONG_MAX);
+ irq_force_affinity(dev->irq, cpumask_of(cpu));
+
+ return 0;
+}
+
+static int __init dmtimer_percpu_quirk_init(void)
+{
+ struct dmtimer_clockevent *clkevt;
+ struct clock_event_device *dev;
+ struct device_node *arm_timer;
+ struct omap_dm_timer *timer;
+ int cpu = 0;
+
+ arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
+ if (of_device_is_available(arm_timer)) {
+ pr_warn_once("ARM architected timer wrap issue i940 detected\n");
+ return 0;
+ }
+
+ for_each_possible_cpu(cpu) {
+ clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
+ dev = &clkevt->dev;
+ timer = &clkevt->timer;
+
+ dmtimer_clkevt_init_common(clkevt, 0, "timer_sys_ck",
+ CLOCK_EVT_FEAT_ONESHOT,
+ cpumask_of(cpu),
+ "assigned-clock-parents",
+ 500, "percpu timer");
+ }
+
+ cpuhp_setup_state(CPUHP_AP_OMAP_DM_TIMER_STARTING,
+ "clockevents/omap/gptimer:starting",
+ omap_gptimer_starting_cpu, NULL);
+
+ return 0;
+}
+
/* Clocksource code */
static struct omap_dm_timer clksrc;
static bool use_gptimer_clksrc __initdata;
@@ -565,6 +613,9 @@ static void __init __omap_sync32k_timer_init(int clkev_nr, const char *clkev_src
3, /* Timer internal resynch latency */
0xffffffff);
+ if (soc_is_dra7xx())
+ dmtimer_percpu_quirk_init();
+
/* Enable the use of clocksource="gp_timer" kernel parameter */
if (use_gptimer_clksrc || gptimer)
omap2_gptimer_clocksource_init(clksrc_nr, clksrc_src,
@@ -592,7 +643,7 @@ void __init omap3_secure_sync32k_timer_init(void)
#endif /* CONFIG_ARCH_OMAP3 */
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) || \
- defined(CONFIG_SOC_AM43XX)
+ defined(CONFIG_SOC_AM43XX) || defined(CONFIG_SOC_DRA7XX)
void __init omap3_gptimer_timer_init(void)
{
__omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -733,6 +733,7 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
DT_CLK(NULL, "sys_clkin_ck", "timer_sys_clk_div"),
+ DT_CLK(NULL, "timer_sys_ck", "timer_sys_clk_div"),
DT_CLK(NULL, "sys_clkin", "sys_clkin1"),
DT_CLK(NULL, "atl_dpll_clk_mux", "atl_cm:0000:24"),
DT_CLK(NULL, "atl_gfclk_mux", "atl_cm:0000:26"),
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -118,6 +118,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ CPUHP_AP_OMAP_DM_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
--
2.32.0
commit 3efe7a878a11c13b5297057bfc1e5639ce1241ce upstream.
There is a timer wrap issue on dra7 for the ARM architected timer.
In a typical clock configuration the timer fails to wrap after 388 days.
To work around the issue, we need to use timer-ti-dm timers instead.
Let's prepare for adding support for percpu timers by adding a common
dmtimer_clkevt_init_common() and call it from __omap_sync32k_timer_init().
This patch makes no intentional functional changes.
Cc: Daniel Lezcano <[email protected]>
Cc: Keerthy <[email protected]>
Cc: Tero Kristo <[email protected]>
[[email protected]: backported to 4.19.y]
Signed-off-by: Tony Lindgren <[email protected]>
---
arch/arm/mach-omap2/timer.c | 34 +++++++++++++++++++---------------
1 file changed, 19 insertions(+), 15 deletions(-)
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -368,18 +368,21 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
-static void __init omap2_gp_clockevent_init(int gptimer_id,
- const char *fck_source,
- const char *property)
+static void __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
+ int gptimer_id,
+ const char *fck_source,
+ unsigned int features,
+ const struct cpumask *cpumask,
+ const char *property,
+ int rating, const char *name)
{
- struct dmtimer_clockevent *clkevt = &clockevent;
struct omap_dm_timer *timer = &clkevt->timer;
int res;
timer->id = gptimer_id;
timer->errata = omap_dm_timer_get_errata();
- clkevt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- clkevt->dev.rating = 300;
+ clkevt->dev.features = features;
+ clkevt->dev.rating = rating;
clkevt->dev.set_next_event = omap2_gp_timer_set_next_event;
clkevt->dev.set_state_shutdown = omap2_gp_timer_shutdown;
clkevt->dev.set_state_periodic = omap2_gp_timer_set_periodic;
@@ -397,19 +400,15 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
&clkevt->dev.name, OMAP_TIMER_POSTED);
BUG_ON(res);
- clkevt->dev.cpumask = cpu_possible_mask;
+ clkevt->dev.cpumask = cpumask;
clkevt->dev.irq = omap_dm_timer_get_irq(timer);
- if (request_irq(timer->irq, omap2_gp_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL, "gp_timer", clkevt))
- pr_err("Failed to request irq %d (gp_timer)\n", timer->irq);
+ if (request_irq(clkevt->dev.irq, omap2_gp_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, name, clkevt))
+ pr_err("Failed to request irq %d (gp_timer)\n", clkevt->dev.irq);
__omap_dm_timer_int_enable(timer, OMAP_TIMER_INT_OVERFLOW);
- clockevents_config_and_register(&clkevt->dev, timer->rate,
- 3, /* Timer internal resynch latency */
- 0xffffffff);
-
if (soc_is_am33xx() || soc_is_am43xx()) {
clkevt->dev.suspend = omap_clkevt_idle;
clkevt->dev.resume = omap_clkevt_unidle;
@@ -559,7 +558,12 @@ static void __init __omap_sync32k_timer_init(int clkev_nr, const char *clkev_src
{
omap_clk_init();
omap_dmtimer_init();
- omap2_gp_clockevent_init(clkev_nr, clkev_src, clkev_prop);
+ dmtimer_clkevt_init_common(&clockevent, clkev_nr, clkev_src,
+ CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ cpu_possible_mask, clkev_prop, 300, "clockevent");
+ clockevents_config_and_register(&clockevent.dev, clockevent.timer.rate,
+ 3, /* Timer internal resynch latency */
+ 0xffffffff);
/* Enable the use of clocksource="gp_timer" kernel parameter */
if (use_gptimer_clksrc || gptimer)
--
2.32.0
On Fri, Jul 09, 2021 at 10:37:42AM +0300, Tony Lindgren wrote:
> From: afzal mohammed <[email protected]>
>
> commit b75ca5217743e4d7076cf65e044e88389e44318d upstream.
>
> request_irq() is preferred over setup_irq(). Invocations of setup_irq()
> occur after memory allocators are ready.
>
> Per tglx[1], setup_irq() existed in olden days when allocators were not
> ready by the time early interrupts were initialized.
>
> Hence replace setup_irq() by request_irq().
>
> [1] https://lkml.kernel.org/r/alpine.DEB.2.20.1710191609480.1971@nanos
>
ALl now queued up, thanks.
greg k-h