2007-08-28 21:38:46

by Daniel Walker

[permalink] [raw]
Subject: [PATCH -rt 5/8] latency tracing: use now() consistently

Just get_monotonic_cycles() switched to now() ..

Signed-off-by: Daniel Walker <[email protected]>

---
kernel/latency_trace.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)

Index: linux-2.6.22/kernel/latency_trace.c
===================================================================
--- linux-2.6.22.orig/kernel/latency_trace.c
+++ linux-2.6.22/kernel/latency_trace.c
@@ -1751,7 +1751,7 @@ check_critical_timing(int cpu, struct cp
* as long as possible:
*/
T0 = tr->preempt_timestamp;
- T1 = get_monotonic_cycles();
+ T1 = now();
delta = T1-T0;

local_save_flags(flags);
@@ -1765,7 +1765,7 @@ check_critical_timing(int cpu, struct cp
* might change it (it can only get larger so the latency
* is fair to be reported):
*/
- T2 = get_monotonic_cycles();
+ T2 = now();

delta = T2-T0;

@@ -1815,7 +1815,7 @@ check_critical_timing(int cpu, struct cp
printk(" => ended at timestamp %lu: ", t1);
print_symbol("<%s>\n", tr->critical_end);
dump_stack();
- t1 = cycles_to_usecs(get_monotonic_cycles());
+ t1 = cycles_to_usecs(now());
printk(" => dump-end timestamp %lu\n\n", t1);
#endif

@@ -1825,7 +1825,7 @@ check_critical_timing(int cpu, struct cp

out:
tr->critical_sequence = max_sequence;
- tr->preempt_timestamp = get_monotonic_cycles();
+ tr->preempt_timestamp = now();
tr->early_warning = 0;
reset_trace_idx(cpu, tr);
_trace_cmdline(cpu, tr);
@@ -1874,7 +1874,7 @@ __start_critical_timing(unsigned long ei
atomic_inc(&tr->disabled);

tr->critical_sequence = max_sequence;
- tr->preempt_timestamp = get_monotonic_cycles();
+ tr->preempt_timestamp = now();
tr->critical_start = eip;
reset_trace_idx(cpu, tr);
tr->latency_type = latency_type;
@@ -2196,7 +2196,7 @@ check_wakeup_timing(struct cpu_trace *tr
goto out;

T0 = tr->preempt_timestamp;
- T1 = get_monotonic_cycles();
+ T1 = now();
/*
* Any wraparound or time warp and we are out:
*/
@@ -2314,7 +2314,7 @@ void __trace_start_sched_wakeup(struct t
// if (!atomic_read(&tr->disabled)) {
atomic_inc(&tr->disabled);
tr->critical_sequence = max_sequence;
- tr->preempt_timestamp = get_monotonic_cycles();
+ tr->preempt_timestamp = now();
tr->latency_type = WAKEUP_LATENCY;
tr->critical_start = CALLER_ADDR0;
_trace_cmdline(raw_smp_processor_id(), tr);
@@ -2426,7 +2426,7 @@ long user_trace_start(void)

atomic_inc(&tr->disabled);
tr->critical_sequence = max_sequence;
- tr->preempt_timestamp = get_monotonic_cycles();
+ tr->preempt_timestamp = now();
tr->critical_start = CALLER_ADDR0;
_trace_cmdline(cpu, tr);
atomic_dec(&tr->disabled);
@@ -2486,7 +2486,7 @@ long user_trace_stop(void)
unsigned long long tmp0;

T0 = tr->preempt_timestamp;
- T1 = get_monotonic_cycles();
+ T1 = now();
tmp0 = preempt_max_latency;
if (T1 < T0)
T0 = T1;

--