I'm reposting this with some suggested changes.
This patch allows the user to migrate currently queued
standard timers from one cpu to another. Migrating
timers off of select cpus allows those cpus to run
time critical threads with minimal timer induced latency
(which can reach 100's of usec for a single timer as shown
on an X86_64 test machine), thereby improving overall
determinance on those selected cpus.
This patch considers timers placed with add_timer_on()
to have 'cpu affinity' and does not move them, unless the
timers are being migrated off of a hotplug cpu that is
going down.
The changes in drivers/base/cpu.c provide a clean and
convenient interface for triggering the migration through
sysfs, via writing the destination cpu number to an owner
writeable file (0200 permissions) associated with the source
cpu. In additon, this functionality is available for kernel
module use.
Note that migrating timers will not, by itself, keep new
timers off of the chosen cpu. But with careful control of
thread affinity, one can control the affinity of new timers
and keep timer induced latencies off of the chosen cpu.
This particular patch does not affect the hrtimers. That
could be addressed later.
Signed-off-by: Dimitri Sivanich <[email protected]>
Index: linux/kernel/timer.c
===================================================================
--- linux.orig/kernel/timer.c
+++ linux/kernel/timer.c
@@ -147,6 +147,7 @@ void fastcall init_timer(struct timer_li
{
timer->entry.next = NULL;
timer->base = __raw_get_cpu_var(tvec_bases);
+ timer->is_bound_to_cpu = 0;
}
EXPORT_SYMBOL(init_timer);
@@ -250,6 +251,7 @@ void add_timer_on(struct timer_list *tim
BUG_ON(timer_pending(timer) || !timer->function);
spin_lock_irqsave(&base->lock, flags);
timer->base = base;
+ timer->is_bound_to_cpu = 1; /* Don't migrate if cpu online */
internal_add_timer(base, timer);
spin_unlock_irqrestore(&base->lock, flags);
}
@@ -1616,50 +1618,65 @@ static int __devinit init_timers_cpu(int
return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
+static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head, int cpu_down)
{
- struct timer_list *timer;
+ struct timer_list *timer, *t;
- while (!list_empty(head)) {
- timer = list_entry(head->next, struct timer_list, entry);
+ list_for_each_entry_safe(timer, t, head, entry) {
+ if (!cpu_down && timer->is_bound_to_cpu)
+ continue;
detach_timer(timer, 0);
timer->base = new_base;
internal_add_timer(new_base, timer);
}
}
-static void __devinit migrate_timers(int cpu)
+int migrate_timers(int dest_cpu, int source_cpu, int cpu_down)
{
tvec_base_t *old_base;
tvec_base_t *new_base;
+ spinlock_t *lock1, *lock2;
+ unsigned long flags;
int i;
- BUG_ON(cpu_online(cpu));
- old_base = per_cpu(tvec_bases, cpu);
- new_base = get_cpu_var(tvec_bases);
-
- local_irq_disable();
- spin_lock(&new_base->lock);
- spin_lock(&old_base->lock);
+ if (source_cpu == dest_cpu)
+ return -EINVAL;
+
+ if (!cpu_online(dest_cpu))
+ return -EINVAL;
+
+ if (cpu_down)
+ BUG_ON(cpu_online(source_cpu));
+ else if (!cpu_online(source_cpu))
+ return -EINVAL;
+
+ old_base = per_cpu(tvec_bases, source_cpu);
+ new_base = per_cpu(tvec_bases, dest_cpu);
- BUG_ON(old_base->running_timer);
+ /* Order locking based on relative cpu number */
+ lock1 = dest_cpu > source_cpu ? &old_base->lock : &new_base->lock;
+ lock2 = dest_cpu > source_cpu ? &new_base->lock : &old_base->lock;
+
+ spin_lock_irqsave(lock1, flags);
+ spin_lock(lock2);
+
+ if (cpu_down)
+ BUG_ON(old_base->running_timer);
for (i = 0; i < TVR_SIZE; i++)
- migrate_timer_list(new_base, old_base->tv1.vec + i);
+ migrate_timer_list(new_base, old_base->tv1.vec + i, cpu_down);
for (i = 0; i < TVN_SIZE; i++) {
- migrate_timer_list(new_base, old_base->tv2.vec + i);
- migrate_timer_list(new_base, old_base->tv3.vec + i);
- migrate_timer_list(new_base, old_base->tv4.vec + i);
- migrate_timer_list(new_base, old_base->tv5.vec + i);
+ migrate_timer_list(new_base, old_base->tv2.vec + i, cpu_down);
+ migrate_timer_list(new_base, old_base->tv3.vec + i, cpu_down);
+ migrate_timer_list(new_base, old_base->tv4.vec + i, cpu_down);
+ migrate_timer_list(new_base, old_base->tv5.vec + i, cpu_down);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
- local_irq_enable();
- put_cpu_var(tvec_bases);
+ spin_unlock(lock2);
+ spin_unlock_irqrestore(lock1, flags);
+ return 0;
}
-#endif /* CONFIG_HOTPLUG_CPU */
+EXPORT_SYMBOL_GPL(migrate_timers);
static int __cpuinit timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -1672,7 +1689,7 @@ static int __cpuinit timer_cpu_notify(st
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
- migrate_timers(cpu);
+ migrate_timers(smp_processor_id(), cpu, 1);
break;
#endif
default:
Index: linux/drivers/base/cpu.c
===================================================================
--- linux.orig/drivers/base/cpu.c
+++ linux/drivers/base/cpu.c
@@ -54,6 +54,25 @@ static ssize_t store_online(struct sys_d
}
static SYSDEV_ATTR(online, 0600, show_online, store_online);
+static ssize_t store_timer_migrate(struct sys_device *dev, const char *buf,
+ size_t count)
+{
+ unsigned int source_cpu = dev->id;
+ unsigned long dest_cpu;
+ int rc;
+
+ dest_cpu = simple_strtoul(buf, NULL, 10);
+ if (dest_cpu > INT_MAX)
+ return -EINVAL;
+
+ rc = migrate_timers(dest_cpu, source_cpu, 0);
+ if (rc < 0)
+ return rc;
+ else
+ return count;
+}
+static SYSDEV_ATTR(timer_migrate, 0200, NULL, store_timer_migrate);
+
static void __devinit register_cpu_control(struct cpu *cpu)
{
sysdev_create_file(&cpu->sysdev, &attr_online);
@@ -62,6 +81,8 @@ void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->sysdev.id;
+ sysdev_remove_file(&cpu->sysdev, &attr_timer_migrate);
+
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
sysdev_remove_file(&cpu->sysdev, &attr_online);
@@ -124,6 +145,8 @@ int __devinit register_cpu(struct cpu *c
cpu_sys_devices[num] = &cpu->sysdev;
if (!error)
register_cpu_under_node(num, cpu_to_node(num));
+ if (!error)
+ sysdev_create_file(&cpu->sysdev, &attr_timer_migrate);
#ifdef CONFIG_KEXEC
if (!error)
Index: linux/include/linux/timer.h
===================================================================
--- linux.orig/include/linux/timer.h
+++ linux/include/linux/timer.h
@@ -15,6 +15,8 @@ struct timer_list {
unsigned long data;
struct tvec_t_base_s *base;
+
+ char is_bound_to_cpu;
};
extern struct tvec_t_base_s boot_tvec_bases;
@@ -24,6 +26,7 @@ extern struct tvec_t_base_s boot_tvec_ba
.expires = (_expires), \
.data = (_data), \
.base = &boot_tvec_bases, \
+ .is_bound_to_cpu = 0, \
}
#define DEFINE_TIMER(_name, _function, _expires, _data) \
@@ -95,6 +98,7 @@ static inline void add_timer(struct time
extern void init_timers(void);
extern void run_local_timers(void);
+extern int migrate_timers(int dest_cpu, int source_cpu, int cpu_down);
struct hrtimer;
extern int it_real_fn(struct hrtimer *);
On Tue, 2006-09-19 at 10:29 -0500, Dimitri Sivanich wrote:
> This patch allows the user to migrate currently queued
> standard timers from one cpu to another. Migrating
> timers off of select cpus allows those cpus to run
> time critical threads with minimal timer induced latency
> (which can reach 100's of usec for a single timer as shown
> on an X86_64 test machine), thereby improving overall
> determinance on those selected cpus.
Which driver or subsystem is doing 100s of usecs of work in a timer?
Shouldn't another mechanism like a workqueue be used instead?
Lee
On Tue, Sep 19, 2006 at 12:33:37PM -0400, Lee Revell wrote:
> Which driver or subsystem is doing 100s of usecs of work in a timer?
The longest one I've captured so far results from:
rsp rip Function (args)
======================= <nmi>
0xffff810257822fd8 0xffffffff803a0e94 rt_check_expire+0x8c
======================= <interrupt>
0xffff81025781fee8 0xffffffff803a0e08 rt_check_expire
0xffff81025781ff08 0xffffffff802386b3 run_timer_softirq+0x133
0xffff81025781ff38 0xffffffff80235262 __do_softirq+0x5e
0xffff81025781ff68 0xffffffff8020a958 call_softirq+0x1c
0xffff81025781ff80 0xffffffff8020bea7 do_softirq+0x2c
0xffff81025781ff90 0xffffffff80235142 irq_exit+0x48
> Shouldn't another mechanism like a workqueue be used instead?
Not quite sure what you're asking here.
On Tue, 2006-09-19 at 11:41 -0500, Dimitri Sivanich wrote:
> On Tue, Sep 19, 2006 at 12:33:37PM -0400, Lee Revell wrote:
> > Which driver or subsystem is doing 100s of usecs of work in a timer?
>
> The longest one I've captured so far results from:
>
> rsp rip Function (args)
> ======================= <nmi>
> 0xffff810257822fd8 0xffffffff803a0e94 rt_check_expire+0x8c
> ======================= <interrupt>
> 0xffff81025781fee8 0xffffffff803a0e08 rt_check_expire
> 0xffff81025781ff08 0xffffffff802386b3 run_timer_softirq+0x133
> 0xffff81025781ff38 0xffffffff80235262 __do_softirq+0x5e
> 0xffff81025781ff68 0xffffffff8020a958 call_softirq+0x1c
> 0xffff81025781ff80 0xffffffff8020bea7 do_softirq+0x2c
> 0xffff81025781ff90 0xffffffff80235142 irq_exit+0x48
>
Ah, I remember that one. Eric Dumazet had some suggestions to fix it
6-12 months ago which never went anywhere - the thread was called "RCU
latency regression in 2.6.16-rc1".
That one is especially annoying as there's no workaround (shrinking the
route cache or reducing the GC interval via net.ipv4.route.* sysctls has
no effect)
Lee
On Tue, 19 Sep 2006 10:29:42 -0500
Dimitri Sivanich <[email protected]> wrote:
> This patch allows the user to migrate currently queued
> standard timers from one cpu to another. Migrating
> timers off of select cpus allows those cpus to run
> time critical threads with minimal timer induced latency
> (which can reach 100's of usec for a single timer as shown
> on an X86_64 test machine), thereby improving overall
> determinance on those selected cpus.
>
> This patch considers timers placed with add_timer_on()
> to have 'cpu affinity' and does not move them, unless the
> timers are being migrated off of a hotplug cpu that is
> going down.
>
> The changes in drivers/base/cpu.c provide a clean and
> convenient interface for triggering the migration through
> sysfs, via writing the destination cpu number to an owner
> writeable file (0200 permissions) associated with the source
> cpu. In additon, this functionality is available for kernel
> module use.
>
> Note that migrating timers will not, by itself, keep new
> timers off of the chosen cpu. But with careful control of
> thread affinity, one can control the affinity of new timers
> and keep timer induced latencies off of the chosen cpu.
>
> This particular patch does not affect the hrtimers. That
> could be addressed later.
I can't say I like this, sorry.
- It adds another word to the timer_list structure for a very obscure
application. And a lot of kernel data structures aggregate timer_lists.
- There are places in the kernel which assume that once a timer is added
on a CPU, it will stay there. The timer handler re-arms the timer,
confident in the knowledge that everything stays on this CPU.
I recall working on such code a couple of years ago, but I now forget where
it was.
It is reasonable to do add_timer() within the CPU_ONLINE handler (for
example), in the expectation that that timer will fire on this CPU.
The proposed change permits the administrator to break that assumption.
We would need to hunt down any code which makes that assumption and
convert it to add_timer_on(). And we'd need to be very vigilant in the
future, since people could easily add new code which had the old
assumption which worked just fine for them in testing.