need sched_clock for latency statistics
Signed-off-by: Martin Peschke <[email protected]>
---
alpha/kernel/time.c | 1 +
arm/kernel/time.c | 1 +
arm/mach-omap1/time.c | 1 +
arm/mach-realview/core.c | 1 +
arm/mach-sa1100/generic.c | 1 +
arm/mach-versatile/core.c | 1 +
arm/plat-omap/timer32k.c | 1 +
arm26/kernel/time.c | 1 +
cris/kernel/time.c | 1 +
frv/kernel/time.c | 1 +
h8300/kernel/time.c | 3 ++-
i386/kernel/timers/timer_tsc.c | 1 +
m32r/kernel/time.c | 1 +
m68k/kernel/time.c | 2 +-
m68knommu/kernel/time.c | 4 +++-
mips/kernel/time.c | 1 +
parisc/kernel/time.c | 1 +
powerpc/kernel/time.c | 1 +
ppc/kernel/time.c | 1 +
s390/kernel/time.c | 1 +
sh/kernel/time.c | 1 +
sh64/kernel/time.c | 2 +-
sparc/kernel/time.c | 1 +
sparc64/kernel/time.c | 1 +
um/kernel/time_kern.c | 1 +
v850/kernel/time.c | 1 +
x86_64/kernel/time.c | 1 +
xtensa/kernel/time.c | 1 +
28 files changed, 31 insertions(+), 4 deletions(-)
diff -Nurp a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
--- a/arch/alpha/kernel/time.c 2006-05-15 12:42:07.000000000 +0200
+++ b/arch/alpha/kernel/time.c 2006-05-15 17:36:14.000000000 +0200
@@ -101,6 +101,7 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
/*
diff -Nurp a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
--- a/arch/arm/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/arm/kernel/time.c 2006-05-15 17:36:51.000000000 +0200
@@ -84,6 +84,7 @@ unsigned long long __attribute__((weak))
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
static unsigned long next_rtc_update;
diff -Nurp a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
--- a/arch/arm/mach-omap1/time.c 2006-05-15 12:42:07.000000000 +0200
+++ b/arch/arm/mach-omap1/time.c 2006-05-15 17:37:24.000000000 +0200
@@ -220,6 +220,7 @@ unsigned long long sched_clock(void)
return cycles_2_ns(ticks64);
}
+EXPORT_SYMBOL_GPL(sched_clock);
/*
* ---------------------------------------------------------------------------
diff -Nurp a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
--- a/arch/arm/mach-realview/core.c 2006-05-15 12:42:07.000000000 +0200
+++ b/arch/arm/mach-realview/core.c 2006-05-15 17:38:47.000000000 +0200
@@ -62,6 +62,7 @@ unsigned long long sched_clock(void)
return v;
}
+EXPORT_SYMBOL_GPL(sched_clock);
#define REALVIEW_FLASHCTRL (__io_address(REALVIEW_SYS_BASE) +
REALVIEW_SYS_FLASH_OFFSET)
diff -Nurp a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
--- a/arch/arm/mach-sa1100/generic.c 2006-05-15 12:42:07.000000000 +0200
+++ b/arch/arm/mach-sa1100/generic.c 2006-05-15 17:39:43.000000000 +0200
@@ -131,6 +131,7 @@ unsigned long long sched_clock(void)
return v;
}
+EXPORT_SYMBOL_GPL(sched_clock);
/*
* Default power-off for SA1100
diff -Nurp a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
--- a/arch/arm/mach-versatile/core.c 2006-05-15 12:42:07.000000000 +0200
+++ b/arch/arm/mach-versatile/core.c 2006-05-15 17:40:17.000000000 +0200
@@ -239,6 +239,7 @@ unsigned long long sched_clock(void)
return v;
}
+EXPORT_SYMBOL_GPL(sched_clock);
#define VERSATILE_FLASHCTRL (__io_address(VERSATILE_SYS_BASE) +
VERSATILE_SYS_FLASH_OFFSET)
diff -Nurp a/arch/arm/plat-omap/timer32k.c b/arch/arm/plat-omap/timer32k.c
--- a/arch/arm/plat-omap/timer32k.c 2006-05-15 12:42:07.000000000 +0200
+++ b/arch/arm/plat-omap/timer32k.c 2006-05-15 17:41:02.000000000 +0200
@@ -188,6 +188,7 @@ unsigned long long sched_clock(void)
{
return omap_32k_ticks_to_nsecs(omap_32k_sync_timer_read());
}
+EXPORT_SYMBOL_GPL(sched_clock);
/*
* Timer interrupt for 32KHz timer. When dynamic tick is enabled, this
diff -Nurp a/arch/arm26/kernel/time.c b/arch/arm26/kernel/time.c
--- a/arch/arm26/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/arm26/kernel/time.c 2006-05-15 17:41:39.000000000 +0200
@@ -99,6 +99,7 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
static unsigned long next_rtc_update;
diff -Nurp a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
--- a/arch/cris/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/cris/kernel/time.c 2006-05-15 17:42:25.000000000 +0200
@@ -231,6 +231,7 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
static int
__init init_udelay(void)
diff -Nurp a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
--- a/arch/frv/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/frv/kernel/time.c 2006-05-15 17:42:56.000000000 +0200
@@ -230,3 +230,4 @@ unsigned long long sched_clock(void)
{
return jiffies_64 * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
diff -Nurp a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
--- a/arch/h8300/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/h8300/kernel/time.c 2006-05-15 17:43:40.000000000 +0200
@@ -123,5 +123,6 @@ EXPORT_SYMBOL(do_settimeofday);
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
-
}
+
+EXPORT_SYMBOL_GPL(sched_clock);
diff -Nurp a/arch/i386/kernel/timers/timer_tsc.c
b/arch/i386/kernel/timers/timer_tsc.c
--- a/arch/i386/kernel/timers/timer_tsc.c 2006-05-15 12:42:07.000000000 +0200
+++ b/arch/i386/kernel/timers/timer_tsc.c 2006-05-15 17:44:11.000000000 +0200
@@ -167,6 +167,7 @@ unsigned long long sched_clock(void)
/* return the value in ns */
return cycles_2_ns(this_offset);
}
+EXPORT_SYMBOL_GPL(sched_clock);
static void delay_tsc(unsigned long loops)
{
diff -Nurp a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
--- a/arch/m32r/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/m32r/kernel/time.c 2006-05-15 17:44:58.000000000 +0200
@@ -304,3 +304,4 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
diff -Nurp a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
--- a/arch/m68k/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/m68k/kernel/time.c 2006-05-15 17:45:29.000000000 +0200
@@ -177,4 +177,4 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies*(1000000000/HZ);
}
-
+EXPORT_SYMBOL_GPL(sched_clock);
diff -Nurp a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
--- a/arch/m68knommu/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/m68knommu/kernel/time.c 2006-05-15 17:46:26.000000000 +0200
@@ -180,6 +180,8 @@ int do_settimeofday(struct timespec *tv)
return 0;
}
+EXPORT_SYMBOL(do_settimeofday);
+
/*
* Scheduler clock - returns current time in nanosec units.
*/
@@ -188,4 +190,4 @@ unsigned long long sched_clock(void)
return (unsigned long long)jiffies * (1000000000 / HZ);
}
-EXPORT_SYMBOL(do_settimeofday);
+EXPORT_SYMBOL_GPL(sched_clock);
diff -Nurp a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
--- a/arch/mips/kernel/time.c 2006-05-15 12:42:07.000000000 +0200
+++ b/arch/mips/kernel/time.c 2006-05-15 17:47:00.000000000 +0200
@@ -778,3 +778,4 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies*(1000000000/HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
diff -Nurp a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
--- a/arch/parisc/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/parisc/kernel/time.c 2006-05-15 17:47:34.000000000 +0200
@@ -211,6 +211,7 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
void __init time_init(void)
diff -Nurp a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
--- a/arch/powerpc/kernel/time.c 2006-05-15 12:42:07.000000000 +0200
+++ b/arch/powerpc/kernel/time.c 2006-05-15 17:48:03.000000000 +0200
@@ -781,6 +781,7 @@ unsigned long long sched_clock(void)
return get_rtc();
return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
}
+EXPORT_SYMBOL_GPL(sched_clock);
int do_settimeofday(struct timespec *tv)
{
diff -Nurp a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
--- a/arch/ppc/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/ppc/kernel/time.c 2006-05-15 17:48:38.000000000 +0200
@@ -445,3 +445,4 @@ unsigned long long sched_clock(void)
}
return tb;
}
+EXPORT_SYMBOL_GPL(sched_clock);
diff -Nurp a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
--- a/arch/s390/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/s390/kernel/time.c 2006-05-15 17:49:11.000000000 +0200
@@ -63,6 +63,7 @@ unsigned long long sched_clock(void)
{
return ((get_clock() - jiffies_timer_cc) * 125) >> 9;
}
+EXPORT_SYMBOL_GPL(sched_clock);
/*
* Monotonic_clock - returns # of nanoseconds passed since time_init()
diff -Nurp a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
--- a/arch/sh/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/sh/kernel/time.c 2006-05-15 17:49:42.000000000 +0200
@@ -44,6 +44,7 @@ unsigned long long __attribute__ ((weak)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
void do_gettimeofday(struct timeval *tv)
{
diff -Nurp a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
--- a/arch/sh64/kernel/time.c 2006-05-15 12:42:08.000000000 +0200
+++ b/arch/sh64/kernel/time.c 2006-05-15 17:50:09.000000000 +0200
@@ -598,4 +598,4 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
-
+EXPORT_SYMBOL_GPL(sched_clock);
diff -Nurp a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
--- a/arch/sparc/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/sparc/kernel/time.c 2006-05-15 17:50:42.000000000 +0200
@@ -466,6 +466,7 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
/* Ok, my cute asm atomicity trick doesn't work anymore.
* There are just too many variables that need to be protected
diff -Nurp a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
--- a/arch/sparc64/kernel/time.c 2006-05-15 12:42:08.000000000 +0200
+++ b/arch/sparc64/kernel/time.c 2006-05-15 17:51:19.000000000 +0200
@@ -1135,6 +1135,7 @@ unsigned long long sched_clock(void)
return (ticks * timer_ticks_per_nsec_quotient)
>> SPARC64_NSEC_PER_CYC_SHIFT;
}
+EXPORT_SYMBOL_GPL(sched_clock);
static int set_rtc_mmss(unsigned long nowtime)
{
diff -Nurp a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
--- a/arch/um/kernel/time_kern.c 2006-05-15 12:42:08.000000000 +0200
+++ b/arch/um/kernel/time_kern.c 2006-05-15 17:51:46.000000000 +0200
@@ -34,6 +34,7 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies_64 * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
/* Changed at early boot */
int timer_irq_inited = 0;
diff -Nurp a/arch/v850/kernel/time.c b/arch/v850/kernel/time.c
--- a/arch/v850/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/v850/kernel/time.c 2006-05-15 17:52:11.000000000 +0200
@@ -35,6 +35,7 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
/*
* timer_interrupt() needs to keep up the real-time clock,
diff -Nurp a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
--- a/arch/x86_64/kernel/time.c 2006-05-15 12:42:08.000000000 +0200
+++ b/arch/x86_64/kernel/time.c 2006-05-15 17:52:39.000000000 +0200
@@ -501,6 +501,7 @@ unsigned long long sched_clock(void)
rdtscll(a);
return cycles_2_ns(a);
}
+EXPORT_SYMBOL_GPL(sched_clock);
static unsigned long get_cmos_time(void)
{
diff -Nurp a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
--- a/arch/xtensa/kernel/time.c 2006-03-20 06:53:29.000000000 +0100
+++ b/arch/xtensa/kernel/time.c 2006-05-15 17:53:03.000000000 +0200
@@ -49,6 +49,7 @@ unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static struct irqaction timer_irqaction = {
Martin Peschke <[email protected]> wrote:
>
> need sched_clock for latency statistics
sched_clock() probably isn't suitable for this application. It's a
scheduler thing and has a number of accuracy problems.
But I thought we discussed this last time around? Maybe not.
Maybe you've considered sched_clock()'s drawbacks and you've decided
they're all acceptable. If so, the changelog should have described the
reasoning.
But even if so, sched_clock() isn't something we want to be exporting to
modules.
Andrew Morton wrote:
> Martin Peschke <[email protected]> wrote:
>> need sched_clock for latency statistics
>
> sched_clock() probably isn't suitable for this application. It's a
> scheduler thing and has a number of accuracy problems.
>
> But I thought we discussed this last time around? Maybe not.
I have been too obsessed with the other issues to remember.
> Maybe you've considered sched_clock()'s drawbacks and you've decided
> they're all acceptable.
Admittedly, I didn't, but merely believed the comment above the function.
http://marc.theaimsgroup.com/?l=linux-kernel&m=114657675408686&w=2
sheds some light.
I would be happy to exploit an API that may result from that discussion.
I would plead for exporting such an API to modules. I don't see how
to implement statistics for latencies, otherwise.
Any other hints on how to replace my sched_clock() calls are welcome.
(I want to measure elapsed times in units that are understandable to
users without hardware manuals and calculator, such as milliseconds.)
Martin
Am Wednesday 17 May 2006 00:36 schrieb Martin Peschke:
> Any other hints on how to replace my sched_clock() calls are welcome.
> (I want to measure elapsed times in units that are understandable to
> users without hardware manuals and calculator, such as milliseconds.)
There are a number of APIs that allow you to get the time:
- do_gettimeofday
potentially slow, reliable TOD clock, microsecond resolution
- ktime_get_ts
monotonic clock, nanosecond resolution
- getnstimeofday
reliable, nanosecond TOD clock
- xtime
jiffie accurate TOD clock, with fast reads
- xtime + wall_to_monotonic
jiffie accurate monotonic clock, almost as fast
- get_cycles
highest supported resolution and accuracy, highly
HW-specific behaviour, may overflow.
Arnd <><
>>>>> "Martin" == Martin Peschke <[email protected]> writes:
Martin> I would be happy to exploit an API that may result from that
Martin> discussion. I would plead for exporting such an API to
Martin> modules. I don't see how to implement statistics for
Martin> latencies, otherwise.
Martin> Any other hints on how to replace my sched_clock() calls are
Martin> welcome. (I want to measure elapsed times in units that are
Martin> understandable to users without hardware manuals and
Martin> calculator, such as milliseconds.)
You may wish to look at the microstate accounting patches at
http://www.gelato.unsw.edu.au/cgi-bin/viewcvs.cgi/cvs/kernel/microstate/
I made the clock source configurable.
It's acually rather difficult to find a reliable cross-platform
monotonic clock with good resolution. And different statistics may
want different clocks; for my purposes a cycle counter is best
(because I'm running on a machine that varies the clock speed for
power management reasons), other people may want nanoseconds.
--
Dr Peter Chubb http://www.gelato.unsw.edu.au peterc AT gelato.unsw.edu.au
http://www.ertos.nicta.com.au ERTOS within National ICT Australia
Anrd> Am Wednesday 17 May 2006 00:36 schrieb Martin Peschke:
> Any other hints on how to replace my sched_clock() calls are welcome.
> (I want to measure elapsed times in units that are understandable to
> users without hardware manuals and calculator, such as milliseconds.)
Anrd> There are a number of APIs that allow you to get the time:
Anrd> - do_gettimeofday
Anrd> potentially slow, reliable TOD clock, microsecond resolution
Slow, not necessarily safe to call in IRQ context.
Anrd> - ktime_get_ts
Anrd> monotonic clock, nanosecond resolution
Actual resolution varies by platform, it may be as low as jiffy.
Anrd> - getnstimeofday
Anrd> reliable, nanosecond TOD clock
(which is only currently implemented with ns resolution on IA64 and
Sparc64, AFAIK)
Anrd> - xtime
Anrd> jiffie accurate TOD clock, with fast reads
Too coarse a resolution.
Anrd> - get_cycles
Anrd> highest supported resolution and accuracy, highly
Anrd> HW-specific behaviour, may overflow.
Not very usable on SMP if you want to measure across migration; may be
variable rate.
--
Dr Peter Chubb http://www.gelato.unsw.edu.au peterc AT gelato.unsw.edu.au
http://www.ertos.nicta.com.au ERTOS within National ICT Australia
> Anrd> - do_gettimeofday
> Anrd> potentially slow, reliable TOD clock, microsecond resolution
>
> Slow, not necessarily safe to call in IRQ context.
It's only slow if the platform can't do better. On good hardware it is
fast. And yes it is safe to call in IRQ context. Networking does
that all the time.
If the hardware doesn't have a good working timer for gettimeofday
then everything else will be also slow.
-Andi
>>>>> "Andi" == Andi Kleen <[email protected]> writes:
Anrd> - do_gettimeofday potentially slow, reliable TOD clock,
Anrd> microsecond resolution
>> Slow, not necessarily safe to call in IRQ context.
Andi> It's only slow if the platform can't do better. On good hardware
Andi> it is fast. And yes it is safe to call in IRQ
Andi> context. Networking does that all the time.
Thanks for the clarification.
I measured do_gettimeofday on IA64 at around 120 cycles (mind you that
was some time ago now, before the last lot of time function revisions
in the kernel). Whether that's slow or not depends on your application.
--
Dr Peter Chubb http://www.gelato.unsw.edu.au peterc AT gelato.unsw.edu.au
http://www.ertos.nicta.com.au ERTOS within National ICT Australia
On Wed, 17 May 2006, Peter Chubb wrote:
> I measured do_gettimeofday on IA64 at around 120 cycles (mind you that
> was some time ago now, before the last lot of time function revisions
> in the kernel). Whether that's slow or not depends on your application.
What I did for time critical statistics in core vm paths is to use
get_cycles() but associate each cycle value with a processor id when the
measurements starts. If the processor id has changed when we end the
measurement then discard the measurement and just note that we missed one.
Here is a piece of a description for a patch that I have used in the past
for these statistics:
----
The patch puts three performance counters into critical kernel paths to
show how its done.
The measurements will show up in /proc/perf/all. Processor specific
statistics may be obtained via /proc/perf/<nr-of-processor>. Writing to
/proc/perf/reset
will reset all counters. F.e.
echo >/proc/perf/reset
Sample output:
AllocPages 786882 (+ 0) 9.9s(223ns/12.6us/48.6us) 12.9gb(16.4kb/16.4kb/32.8kb)
FaultTime 786855 (+192) 10.4s(198ns/13.2us/323.6us)
PrepZeroPage 786765 (+ 0) 9.2s(549ns/11.7us/48.1us) 12.9gb(16.4kb/16.4kb/16.4kb)
The first countr is the number of times that the time measurement was
performed.(+ xx) is the number of samples that were thrown away since the
processor on which the process is running changed. Cycle counters are not
consistent across different processors.
Then follows the sum of the time spend in the code segment followed in
parenthesesby the minimum / average / maximum time spent there. The
second block are the sizes of data processed. In this sample 12.9 GB was
allocated via AllocPages. Most allocations were 16k = 1 page although
there were also 32K (2 page) allocations.
Andrew Morton wrote:
>Martin Peschke <[email protected]> wrote:
>
>>need sched_clock for latency statistics
>>
>
>sched_clock() probably isn't suitable for this application. It's a
>scheduler thing and has a number of accuracy problems.
>
>But I thought we discussed this last time around? Maybe not.
>
>Maybe you've considered sched_clock()'s drawbacks and you've decided
>they're all acceptable. If so, the changelog should have described the
>reasoning.
>
Yeah; please, no more users of sched_clock(). Definitely don't export.
Even if it is what you want today (which it probably isn't), the scheduler
might want the ability to change it at short notice, depending on new
algorithms / new hardware etc. in future.
Make a new function if none of the suggested alternatives work.
--
Send instant messages to your online friends http://au.messenger.yahoo.com