struct irq_desc still has
unsigned int irq_count;
A timer interrupt occurs 1000 times per second and there are 86400 seconds
in a day. There is a counter for the local timer interrupt that needs to
be continually incremented.
So the counter will overflow in
2^32 / 1000 / 86400 = 46 days
Diagnostic tools will be surprised by the counters suddenly going back to
zero. There may be other interrupt sources that also occur quite often.
Is this the way its intended or should the counters be expanded to 64 bit?
64 bit would last for our lifetime.
On Fri, 3 Oct 2014, Christoph Lameter wrote:
The answer is simply that nobody cared so far.
> Is this the way its intended or should the counters be expanded to 64 bit?
There is no reason why we cannot or should not expand them.
Thanks,
tglx
On Fri, 3 Oct 2014, Thomas Gleixner wrote:
> > Is this the way its intended or should the counters be expanded to 64 bit?
>
> There is no reason why we cannot or should not expand them.
Ok here is a patch to do just that:
Subject: Increase irq counters to 64 bit
Irq counters can overflow easily if they are just 32 bit.
For example the timer interrupt occurs 1000 times per second, so
it is predictable that the timer interrupt will overflow in
2^ 32 / 1000 [interrupts per second] / 86400 [seconds in a day]
which results in 46 days.
Other irq counters for devices may wrap even faster for example
those for high speed networking devices.
This patch is needed to avoid the counter overflow by increasing
the counters to 64 bit.
Signed-off-by: Christoph Lameter <[email protected]>
Index: linux/arch/x86/include/asm/processor.h
===================================================================
--- linux.orig/arch/x86/include/asm/processor.h
+++ linux/arch/x86/include/asm/processor.h
@@ -432,7 +432,7 @@ DECLARE_PER_CPU_FIRST(union irq_stack_un
DECLARE_INIT_PER_CPU(irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr);
-DECLARE_PER_CPU(unsigned int, irq_count);
+DECLARE_PER_CPU(unsigned long, irq_count);
extern asmlinkage void ignore_sysret(void);
#else /* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
Index: linux/arch/x86/kernel/cpu/common.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/common.c
+++ linux/arch/x86/kernel/cpu/common.c
@@ -1144,7 +1144,7 @@ EXPORT_PER_CPU_SYMBOL(current_task);
DEFINE_PER_CPU(char *, irq_stack_ptr) =
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
-DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
+DEFINE_PER_CPU(unsigned long, irq_count) __visible = -1;
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
Index: linux/include/linux/irqdesc.h
===================================================================
--- linux.orig/include/linux/irqdesc.h
+++ linux/include/linux/irqdesc.h
@@ -41,7 +41,7 @@ struct irq_desc;
*/
struct irq_desc {
struct irq_data irq_data;
- unsigned int __percpu *kstat_irqs;
+ unsigned long __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
irq_preflow_handler_t preflow_handler;
@@ -51,7 +51,7 @@ struct irq_desc {
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
- unsigned int irq_count; /* For detecting broken IRQs */
+ unsigned long irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
atomic_t threads_handled;
Index: linux/include/linux/kernel_stat.h
===================================================================
--- linux.orig/include/linux/kernel_stat.h
+++ linux/include/linux/kernel_stat.h
@@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct kernel_cpustat, k
extern unsigned long long nr_context_switches(void);
-extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+extern unsigned long kstat_irqs_cpu(unsigned int irq, int cpu);
extern void kstat_incr_irq_this_cpu(unsigned int irq);
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
Index: linux/kernel/irq/debug.h
===================================================================
--- linux.orig/kernel/irq/debug.h
+++ linux/kernel/irq/debug.h
@@ -11,7 +11,7 @@
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
{
- printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
+ printk("irq %d, desc: %p, depth: %d, count: %lu, unhandled: %d\n",
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
printk("->handle_irq(): %p, ", desc->handle_irq);
print_symbol("%s\n", (unsigned long)desc->handle_irq);
Index: linux/kernel/irq/irqdesc.c
===================================================================
--- linux.orig/kernel/irq/irqdesc.c
+++ linux/kernel/irq/irqdesc.c
@@ -532,7 +532,7 @@ void kstat_incr_irq_this_cpu(unsigned in
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
}
-unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+unsigned long kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
Index: linux/kernel/irq/proc.c
===================================================================
--- linux.orig/kernel/irq/proc.c
+++ linux/kernel/irq/proc.c
@@ -248,7 +248,7 @@ static int irq_spurious_proc_show(struct
{
struct irq_desc *desc = irq_to_desc((long) m->private);
- seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
+ seq_printf(m, "count %lu\n" "unhandled %u\n" "last_unhandled %u ms\n",
desc->irq_count, desc->irqs_unhandled,
jiffies_to_msecs(desc->last_unhandled));
return 0;
@@ -450,7 +450,7 @@ int show_interrupts(struct seq_file *p,
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+ seq_printf(p, "%10lu ", kstat_irqs_cpu(i, j));
if (desc->irq_data.chip) {
if (desc->irq_data.chip->irq_print_chip)
On Fri, Oct 03, 2014 at 06:54:22AM -0500, Christoph Lameter wrote:
> Subject: Increase irq counters to 64 bit
...
> Index: linux/arch/x86/include/asm/processor.h
> ===================================================================
> --- linux.orig/arch/x86/include/asm/processor.h
> +++ linux/arch/x86/include/asm/processor.h
> @@ -432,7 +432,7 @@ DECLARE_PER_CPU_FIRST(union irq_stack_un
> DECLARE_INIT_PER_CPU(irq_stack_union);
>
> DECLARE_PER_CPU(char *, irq_stack_ptr);
> -DECLARE_PER_CPU(unsigned int, irq_count);
> +DECLARE_PER_CPU(unsigned long, irq_count);
Still 32 bit on 32 bit machines...
Thanks,
Richard
On Fri, 3 Oct 2014, Richard Cochran wrote:
> > DECLARE_PER_CPU(char *, irq_stack_ptr);
> > -DECLARE_PER_CPU(unsigned int, irq_count);
> > +DECLARE_PER_CPU(unsigned long, irq_count);
>
> Still 32 bit on 32 bit machines...
64 bit counters on 32 bit machines are not an easy thing and could be
expensive to handle in particular because these counters are used in
performance critical hotpaths.
I thought I better leave it alone on 32 bit.
On Fri, 2014-10-03 at 06:54 -0500, Christoph Lameter wrote:
> On Fri, 3 Oct 2014, Thomas Gleixner wrote:
>
> > > Is this the way its intended or should the counters be expanded to 64 bit?
> >
> > There is no reason why we cannot or should not expand them.
>
> Ok here is a patch to do just that:
>
>
> Subject: Increase irq counters to 64 bit
>
>
> Irq counters can overflow easily if they are just 32 bit.
>
> For example the timer interrupt occurs 1000 times per second, so
> it is predictable that the timer interrupt will overflow in
>
>
> 2^ 32 / 1000 [interrupts per second] / 86400 [seconds in a day]
>
> which results in 46 days.
dc -e "1 k 2 32 ^ 1000 / 86400 / p"
49.7
(That was the number I remembered from stories about a ancient Windows
lockup.)
> Other irq counters for devices may wrap even faster for example
> those for high speed networking devices.
>
> This patch is needed to avoid the counter overflow by increasing
> the counters to 64 bit.
>
> Signed-off-by: Christoph Lameter <[email protected]>
Paul Bolle
On Fri, Oct 03, 2014 at 07:07:46AM -0500, Christoph Lameter wrote:
> I thought I better leave it alone on 32 bit.
Fine, but the subject line sounded different.
Thanks,
Richard
On Fri, 3 Oct 2014, Paul Bolle wrote:
> dc -e "1 k 2 32 ^ 1000 / 86400 / p"
> 49.7
>
> (That was the number I remembered from stories about a ancient Windows
> lockup.)
Well yes, I used bc which discards the remainder on integer divides
> Fine, but the subject line sounded different.
Hmmm.... Found a bug. We need this patch on top
Subject: Fix allocpercpu
Must allocate unsigned long not int.
Signed-off-by: Christoph Lameter <[email protected]>
Index: linux/kernel/irq/irqdesc.c
===================================================================
--- linux.orig/kernel/irq/irqdesc.c
+++ linux/kernel/irq/irqdesc.c
@@ -140,7 +140,7 @@ static struct irq_desc *alloc_desc(int i
if (!desc)
return NULL;
/* allocate based on nr_cpu_ids */
- desc->kstat_irqs = alloc_percpu(unsigned int);
+ desc->kstat_irqs = alloc_percpu(unsigned long);
if (!desc->kstat_irqs)
goto err_desc;
On Fre, 2014-10-03 at 07:23 -0500, Christoph Lameter wrote:
> On Fri, 3 Oct 2014, Paul Bolle wrote:
>
> > dc -e "1 k 2 32 ^ 1000 / 86400 / p"
> > 49.7
> >
> > (That was the number I remembered from stories about a ancient Windows
> > lockup.)
>
> Well yes, I used bc which discards the remainder on integer divides
Use `bc -l`;-)
---- snip ----
2^32 / 1000 / 86400
49.71026962962962962962
---- snip ----
Bernd
--
"I dislike type abstraction if it has no real reason. And saving
on typing is not a good reason - if your typing speed is the main
issue when you're coding, you're doing something seriously wrong."
- Linus Torvalds
On Fri, 3 Oct 2014, Christoph Lameter wrote:
> On Fri, 3 Oct 2014, Richard Cochran wrote:
>
> > > DECLARE_PER_CPU(char *, irq_stack_ptr);
> > > -DECLARE_PER_CPU(unsigned int, irq_count);
> > > +DECLARE_PER_CPU(unsigned long, irq_count);
> >
> > Still 32 bit on 32 bit machines...
>
> 64 bit counters on 32 bit machines are not an easy thing and could be
Whats so hard about 64bit counters on 32bit machines?
> expensive to handle in particular because these counters are used in
> performance critical hotpaths.
The expensive overhead is a single "adcl" instruction.
> I thought I better leave it alone on 32 bit.
And how exactly are we supposed to explain the different behaviour to
users?
Thanks,
tglx
Subject: Increase irq counters to 64 bit V2
V1->V2
- Wrong sie and percpu alloc.
- Use u64 so that this will also work on 32 bit machines
Irq counters can overflow easily if they are just 32 bit.
For example the timer interrupt occurs 1000 times per second, so
it is predictable that the timer interrupt will overflow in
2^ 32 / 1000 [interrupts per second] / 86400 [seconds in a day]
which results in 46 days.
Other irq counters for devices may wrap even faster for example
those for high speed networking devices.
This patch is needed to avoid the counter overflow by increasing
the counters to 64 bit.
Signed-off-by: Christoph Lameter <[email protected]>
Index: linux/arch/x86/include/asm/processor.h
===================================================================
--- linux.orig/arch/x86/include/asm/processor.h
+++ linux/arch/x86/include/asm/processor.h
@@ -432,7 +432,7 @@ DECLARE_PER_CPU_FIRST(union irq_stack_un
DECLARE_INIT_PER_CPU(irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr);
-DECLARE_PER_CPU(unsigned int, irq_count);
+DECLARE_PER_CPU(u64, irq_count);
extern asmlinkage void ignore_sysret(void);
#else /* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
Index: linux/arch/x86/kernel/cpu/common.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/common.c
+++ linux/arch/x86/kernel/cpu/common.c
@@ -1144,7 +1144,7 @@ EXPORT_PER_CPU_SYMBOL(current_task);
DEFINE_PER_CPU(char *, irq_stack_ptr) =
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
-DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
+DEFINE_PER_CPU(u64, irq_count) __visible = -1;
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
Index: linux/include/linux/irqdesc.h
===================================================================
--- linux.orig/include/linux/irqdesc.h
+++ linux/include/linux/irqdesc.h
@@ -41,7 +41,7 @@ struct irq_desc;
*/
struct irq_desc {
struct irq_data irq_data;
- unsigned int __percpu *kstat_irqs;
+ u64 __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
irq_preflow_handler_t preflow_handler;
@@ -51,7 +51,7 @@ struct irq_desc {
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
- unsigned int irq_count; /* For detecting broken IRQs */
+ u64 irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
atomic_t threads_handled;
Index: linux/include/linux/kernel_stat.h
===================================================================
--- linux.orig/include/linux/kernel_stat.h
+++ linux/include/linux/kernel_stat.h
@@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct kernel_cpustat, k
extern unsigned long long nr_context_switches(void);
-extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+extern u64 kstat_irqs_cpu(unsigned int irq, int cpu);
extern void kstat_incr_irq_this_cpu(unsigned int irq);
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
Index: linux/kernel/irq/debug.h
===================================================================
--- linux.orig/kernel/irq/debug.h
+++ linux/kernel/irq/debug.h
@@ -11,7 +11,7 @@
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
{
- printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
+ printk("irq %d, desc: %p, depth: %d, count: %llu, unhandled: %d\n",
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
printk("->handle_irq(): %p, ", desc->handle_irq);
print_symbol("%s\n", (unsigned long)desc->handle_irq);
Index: linux/kernel/irq/irqdesc.c
===================================================================
--- linux.orig/kernel/irq/irqdesc.c
+++ linux/kernel/irq/irqdesc.c
@@ -140,7 +140,7 @@ static struct irq_desc *alloc_desc(int i
if (!desc)
return NULL;
/* allocate based on nr_cpu_ids */
- desc->kstat_irqs = alloc_percpu(unsigned int);
+ desc->kstat_irqs = alloc_percpu(u64);
if (!desc->kstat_irqs)
goto err_desc;
@@ -532,7 +532,7 @@ void kstat_incr_irq_this_cpu(unsigned in
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
}
-unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+u64 kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
Index: linux/kernel/irq/proc.c
===================================================================
--- linux.orig/kernel/irq/proc.c
+++ linux/kernel/irq/proc.c
@@ -248,7 +248,7 @@ static int irq_spurious_proc_show(struct
{
struct irq_desc *desc = irq_to_desc((long) m->private);
- seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
+ seq_printf(m, "count %llu\n" "unhandled %u\n" "last_unhandled %u ms\n",
desc->irq_count, desc->irqs_unhandled,
jiffies_to_msecs(desc->last_unhandled));
return 0;
@@ -450,7 +450,7 @@ int show_interrupts(struct seq_file *p,
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+ seq_printf(p, "%10llu ", kstat_irqs_cpu(i, j));
if (desc->irq_data.chip) {
if (desc->irq_data.chip->irq_print_chip)
On Sun, 2014-10-05 at 18:24 -0500, Christoph Lameter wrote:
> Subject: Increase irq counters to 64 bit V2
> -unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
> +u64 kstat_irqs_cpu(unsigned int irq, int cpu)
> {
> struct irq_desc *desc = irq_to_desc(irq);
Well, this can not safely work on 32bit hosts.
Hint : include/linux/u64_stats_sync.h
Or simply use 'unsigned long' as you did in your first patch,
really I don't feel 32bit hosts should care of this.
On Sun, 2014-10-05 at 23:49 +0200, Thomas Gleixner wrote:
> Whats so hard about 64bit counters on 32bit machines?
Not hard, but not trivial either.
>
> > expensive to handle in particular because these counters are used in
> > performance critical hotpaths.
>
> The expensive overhead is a single "adcl" instruction.
>
Assuming a reader do not care of reading garbage yes, while carry is not
yet propagated.
On Sun, 5 Oct 2014, Eric Dumazet wrote:
> On Sun, 2014-10-05 at 23:49 +0200, Thomas Gleixner wrote:
>
> > Whats so hard about 64bit counters on 32bit machines?
>
> Not hard, but not trivial either.
>
> >
> > > expensive to handle in particular because these counters are used in
> > > performance critical hotpaths.
> >
> > The expensive overhead is a single "adcl" instruction.
> >
>
> Assuming a reader do not care of reading garbage yes, while carry is not
> yet propagated.
Readers and writers are serialized via desc->lock.
Thanks,
tglx
On Sun, 5 Oct 2014, Christoph Lameter wrote:
> -DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
> +DEFINE_PER_CPU(u64, irq_count) __visible = -1;
What's the point of this? irq_count is solely used to figure out
whether interrupts are nested, i.e. whether we need to switch the
stack on x86_64 or not. See the usage sites in arch/x86/kernel/entry_64.S
> @@ -51,7 +51,7 @@ struct irq_desc {
> unsigned int core_internal_state__do_not_mess_with_it;
> unsigned int depth; /* nested irq disables */
> unsigned int wake_depth; /* nested wake enables */
> - unsigned int irq_count; /* For detecting broken IRQs */
> + u64 irq_count; /* For detecting broken IRQs */
This is pointless as this count is solely used for the spurious
detector and reset to 0 when irq_count reaches 100000. See
note_interrupt().
> -extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
> +extern u64 kstat_irqs_cpu(unsigned int irq, int cpu);
Care to fixup the other call sites of this as well? git grep is your friend.
Thanks,
tglx
Subject: Increase irq counters to 64 bit V3
V2->V3
- Remove useless counter changes
- Track use cases for kstat_irq_cpu
- Verify it works on 32 bit.
V1->V2
- Wrong size at percpu alloc.
- Use u64 so that this will also work on 32 bit machines
Irq counters can overflow easily if they are just 32 bit.
For example the timer interrupt occurs 1000 times per second, so
it is predictable that the timer interrupt will overflow in
2^ 32 / 1000 [interrupts per second] / 86400 [seconds in a day]
which results in approximately 50 days.
Other irq counters for devices may wrap even faster for example
those for high speed networking devices.
This patch is needed to avoid the counter overflow by increasing
the counters to 64 bit.
Signed-off-by: Christoph Lameter <[email protected]>
Index: linux/include/linux/irqdesc.h
===================================================================
--- linux.orig/include/linux/irqdesc.h
+++ linux/include/linux/irqdesc.h
@@ -41,7 +41,7 @@ struct irq_desc;
*/
struct irq_desc {
struct irq_data irq_data;
- unsigned int __percpu *kstat_irqs;
+ u64 __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
irq_preflow_handler_t preflow_handler;
Index: linux/include/linux/kernel_stat.h
===================================================================
--- linux.orig/include/linux/kernel_stat.h
+++ linux/include/linux/kernel_stat.h
@@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct kernel_cpustat, k
extern unsigned long long nr_context_switches(void);
-extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+extern u64 kstat_irqs_cpu(unsigned int irq, int cpu);
extern void kstat_incr_irq_this_cpu(unsigned int irq);
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
Index: linux/kernel/irq/irqdesc.c
===================================================================
--- linux.orig/kernel/irq/irqdesc.c
+++ linux/kernel/irq/irqdesc.c
@@ -140,7 +140,7 @@ static struct irq_desc *alloc_desc(int i
if (!desc)
return NULL;
/* allocate based on nr_cpu_ids */
- desc->kstat_irqs = alloc_percpu(unsigned int);
+ desc->kstat_irqs = alloc_percpu(u64);
if (!desc->kstat_irqs)
goto err_desc;
@@ -532,7 +532,7 @@ void kstat_incr_irq_this_cpu(unsigned in
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
}
-unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+u64 kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
Index: linux/kernel/irq/proc.c
===================================================================
--- linux.orig/kernel/irq/proc.c
+++ linux/kernel/irq/proc.c
@@ -450,7 +450,7 @@ int show_interrupts(struct seq_file *p,
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+ seq_printf(p, "%10llu ", kstat_irqs_cpu(i, j));
if (desc->irq_data.chip) {
if (desc->irq_data.chip->irq_print_chip)
Index: linux/arch/m68k/sun3/sun3ints.c
===================================================================
--- linux.orig/arch/m68k/sun3/sun3ints.c
+++ linux/arch/m68k/sun3/sun3ints.c
@@ -51,7 +51,7 @@ void sun3_disable_irq(unsigned int irq)
static irqreturn_t sun3_int7(int irq, void *dev_id)
{
- unsigned int cnt;
+ u64 cnt;
cnt = kstat_irqs_cpu(irq, 0);
if (!(cnt % 2000))
Index: linux/arch/parisc/kernel/irq.c
===================================================================
--- linux.orig/arch/parisc/kernel/irq.c
+++ linux/arch/parisc/kernel/irq.c
@@ -222,7 +222,7 @@ int show_interrupts(struct seq_file *p,
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+ seq_printf(p, "%10llu ", kstat_irqs_cpu(i, j));
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
Index: linux/arch/s390/kernel/irq.c
===================================================================
--- linux.orig/arch/s390/kernel/irq.c
+++ linux/arch/s390/kernel/irq.c
@@ -136,7 +136,7 @@ int show_interrupts(struct seq_file *p,
seq_printf(p, "%s: ", irqclass_main_desc[index].name);
irq = irqclass_main_desc[index].irq;
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
+ seq_printf(p, "%10llu ", kstat_irqs_cpu(irq, cpu));
seq_putc(p, '\n');
goto out;
}
On Sun, 5 Oct 2014, Thomas Gleixner wrote:
>>
>> 64 bit counters on 32 bit machines are not an easy thing and could be
>
> Whats so hard about 64bit counters on 32bit machines?
>
>> expensive to handle in particular because these counters are used in
>> performance critical hotpaths.
>
> The expensive overhead is a single "adcl" instruction.
As I understand it, since the 64 bit math cannot be made atomic, it requires
protecting the counter with a lock so that it can't be read while half updated.
Aquiring a lock on every update is an expensive thing to do. It's not something
people like to see in a fast path, especially for something of as low an
importance as the counters.
David Lang
On Mon, 6 Oct 2014, David Lang wrote:
> On Sun, 5 Oct 2014, Thomas Gleixner wrote:
>
> > >
> > > 64 bit counters on 32 bit machines are not an easy thing and could be
> >
> > Whats so hard about 64bit counters on 32bit machines?
> >
> > > expensive to handle in particular because these counters are used in
> > > performance critical hotpaths.
> >
> > The expensive overhead is a single "adcl" instruction.
>
> As I understand it, since the 64 bit math cannot be made atomic, it requires
> protecting the counter with a lock so that it can't be read while half
> updated. Aquiring a lock on every update is an expensive thing to do. It's not
> something people like to see in a fast path, especially for something of as
> low an importance as the counters.
As I said before. Both reader and writer side are already protected by
the irq descriptor lock. We take that lock anyway.
Thanks,
tglx
On Mon, 6 Oct 2014, Christoph Lameter wrote:
> For example the timer interrupt occurs 1000 times per second, so
> it is predictable that the timer interrupt will overflow in
Now the bad news is, that the timer interrupt if it is serviced via
the local timer interrupt is still using a 32bit counter because the
local timer interrupt does not go through the core interrupt code.
So if you want to fix that as well, you really need to think about the
32 bit case because there is no serialization for the interrupts which
are delivered directly from their own vector. And no, we should not
diverge 32 and 64 bit artificially here simply because the same 50
days wrap applies to both.
I really start to wonder whether all this is worth the trouble. It has
been this way forever and 1k timer interrupts per second is not really
a new thing either. So we did not change anything which suddenly makes
tools confused.
Thanks,
tglx
On Mon, 6 Oct 2014, Thomas Gleixner wrote:
> So if you want to fix that as well, you really need to think about the
> 32 bit case because there is no serialization for the interrupts which
> are delivered directly from their own vector. And no, we should not
> diverge 32 and 64 bit artificially here simply because the same 50
> days wrap applies to both.
Is it a divergence if both 64bit and 32 bit are unsing unsigned long?
>
> I really start to wonder whether all this is worth the trouble. It has
> been this way forever and 1k timer interrupts per second is not really
> a new thing either. So we did not change anything which suddenly makes
> tools confused.
Tools expect the number of interrupt to increase linearly and not jump by
2^32 once in awhile. There are functions in the kernel (/proc/stat) that
sum up various interrupt counters and that are types unsigned long. These
larger numbers can suddenly jump by 2^32. Its pretty unusual for a 64 bit
conter to do that and it requires some head scratching until we figured
that one out.
On Mon, 6 Oct 2014, Christoph Lameter wrote:
> On Mon, 6 Oct 2014, Thomas Gleixner wrote:
>
> > So if you want to fix that as well, you really need to think about the
> > 32 bit case because there is no serialization for the interrupts which
> > are delivered directly from their own vector. And no, we should not
> > diverge 32 and 64 bit artificially here simply because the same 50
> > days wrap applies to both.
>
> Is it a divergence if both 64bit and 32 bit are unsing unsigned long?
Sigh, yes. Because unsigned long is 32bit on a 32bit architecture. So
the change would be NOP for 32bit and 32bit would still suffer from
the wrap arounds etc.
> >
> > I really start to wonder whether all this is worth the trouble. It has
> > been this way forever and 1k timer interrupts per second is not really
> > a new thing either. So we did not change anything which suddenly makes
> > tools confused.
>
> Tools expect the number of interrupt to increase linearly and not jump by
> 2^32 once in awhile. There are functions in the kernel (/proc/stat) that
> sum up various interrupt counters and that are types unsigned long. These
> larger numbers can suddenly jump by 2^32. Its pretty unusual for a 64 bit
> conter to do that and it requires some head scratching until we figured
> that one out.
I understand that, I just wonder why nobody noticed before. It's been
that way forever :)
Thanks,
tglx
On Mon, 6 Oct 2014, Christoph Lameter wrote:
> On Mon, 6 Oct 2014, Thomas Gleixner wrote:
>
>> So if you want to fix that as well, you really need to think about the
>> 32 bit case because there is no serialization for the interrupts which
>> are delivered directly from their own vector. And no, we should not
>> diverge 32 and 64 bit artificially here simply because the same 50
>> days wrap applies to both.
>
> Is it a divergence if both 64bit and 32 bit are unsing unsigned long?
>
>>
>> I really start to wonder whether all this is worth the trouble. It has
>> been this way forever and 1k timer interrupts per second is not really
>> a new thing either. So we did not change anything which suddenly makes
>> tools confused.
>
> Tools expect the number of interrupt to increase linearly and not jump by
> 2^32 once in awhile. There are functions in the kernel (/proc/stat) that
> sum up various interrupt counters and that are types unsigned long. These
> larger numbers can suddenly jump by 2^32. Its pretty unusual for a 64 bit
> conter to do that and it requires some head scratching until we figured
> that one out.
No, tools recognize that things happen (wraps, reboots, etc) and have some
threshold that they say "if this value changes more than the threshold,
something happened and it's not valid to use this delta"
This has been the case for decades. If you have a monitoring tool that does not
account for this sort of thing, you have an immature tool.
David Lang
> > Tools expect the number of interrupt to increase linearly and not jump by
> > 2^32 once in awhile. There are functions in the kernel (/proc/stat) that
...
> I understand that, I just wonder why nobody noticed before. It's been
> that way forever :)
Any proper tool that interfaces to snmp-like counters expects and deals with
worse: sudden counter resets.
And it has been that way forever :-) So people who use such tools would
hardly notice any warp-arounds...
So, the question becomes: which tools are misbehaving? Maybe they should
adopt snmp-like counter reset detection for peak filtering, and we could
leave 32-bit alone, and increase the counter size only on for 64-bit?
--
"One disk to rule them all, One disk to find them. One disk to bring
them all and in the darkness grind them. In the Land of Redmond
where the shadows lie." -- The Silicon Valley Tarot
Henrique Holschuh
On Mon, 6 Oct 2014, David Lang wrote:
> No, tools recognize that things happen (wraps, reboots, etc) and have some
> threshold that they say "if this value changes more than the threshold,
> something happened and it's not valid to use this delta"
>
> This has been the case for decades. If you have a monitoring tool that does
> not account for this sort of thing, you have an immature tool.
Well maybe put those statement somewhere to find for those writing
diagnostics tools that use the counters.