Currently the VDSO does not handle
clock_gettime( CLOCK_MONOTONIC_RAW, &ts )
on Intel / AMD - it calls
vdso_fallback_gettime()
for this clock, which issues a syscall, having an unacceptably high
latency (minimum measurable time or time between measurements)
of 300-700ns on 2 2.8-3.9ghz Haswell x86_64 Family'_'Model : 06_3C
machines under various versions of Linux.
This patch handles CLOCK_MONOTONIC_RAW clock_gettime() in the VDSO ,
by exporting the raw clock calibration, last cycles, last xtime_nsec,
and last raw_sec value in the vsyscall_gtod_data during vsyscall_update() .
Now the new do_monotonic_raw() function in the vDSO has a latency of @ 24ns
on average, and the test program:
tools/testing/selftest/timers/inconsistency-check.c
succeeds with arguments: '-c 4 -t 120' or any arbitrary -t value.
The patch is against Linus' latest 4.16-rc4 tree,
current HEAD of :
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
.
The patch affects only files:
arch/x86/include/asm/vgtod.h
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/entry/vsyscall/vsyscall_gtod.c
Best Regards,
Jason Vas Dias .
---
diff -up linux-4.16-rc4/arch/x86/entry/vdso/vclock_gettime.c.4.16-rc4linux-4.16-rc4/arch/x86/entry/vdso/vclock_gettime.c
--- linux-4.16-rc4/arch/x86/entry/vdso/vclock_gettime.c.4.16-rc4 2018-03-04 22:54:11.000000000 +0000
+++ linux-4.16-rc4/arch/x86/entry/vdso/vclock_gettime.c 2018-03-11 05:08:31.137681337 +0000
@@ -182,6 +182,29 @@ notrace static u64 vread_tsc(void)
return last;
}
+notrace static u64 vread_tsc_raw(void)
+{
+ u64 tsc, last od->raw_cycle_last;
+ if( likely( gtod->has_rdtscp ) ) {
+ u32 tsc_lo, tsc_hi,
+ tsc_cpu __attribute__((unused));
+ asm volatile
+ ( "rdtscp"
+ /* ^- has built-in cancellation point / pipeline stall"barrier" */
+ : "? (tsc_lo)
+ , "? (tsc_hi)
+ , "? (tsc_cpu)
+ ); // since all variables 32-bit, eax, edx, ecx used - NOT rax, rdx, rcx
+ tsc (((u64)tsc_hi) & 0xffffffffUL) << 32) | (((u64)tsc_lo) & 0xffffffffUL);
+ } else {
+ tsc dtsc_ordered();
+ }
+ if (likely(tsc >