Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754680AbcKUWNA (ORCPT ); Mon, 21 Nov 2016 17:13:00 -0500 Received: from mail-pg0-f44.google.com ([74.125.83.44]:35115 "EHLO mail-pg0-f44.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753763AbcKUWM7 (ORCPT ); Mon, 21 Nov 2016 17:12:59 -0500 From: Joel Fernandes To: linux-kernel@vger.kernel.org Cc: Joel Fernandes , Steven Rostedt , Thomas Gleixner , John Stultz , Ingo Molnar Subject: [PATCH v2 1/2] timekeeping: Introduce a fast boot clock derived from fast monotonic clock Date: Mon, 21 Nov 2016 14:12:49 -0800 Message-Id: <1479766370-31311-1-git-send-email-joelaf@google.com> X-Mailer: git-send-email 2.8.0.rc3.226.g39d4020 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3334 Lines: 105 Introduce a fast boot clock. It accounts for suspend time and is based on the fast monotonic clock so that its safe to use for tracing. Next patch adds the clock to trace clock. Cc: Steven Rostedt Cc: Thomas Gleixner Cc: John Stultz Cc: Ingo Molnar Signed-off-by: Joel Fernandes --- kernel/time/timekeeping.c | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 37dec7e..5a3f5ff 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -56,6 +56,12 @@ static struct timekeeper shadow_timekeeper; struct tk_fast { seqcount_t seq; struct tk_read_base base[2]; + + /* + * first dimension is based on lower seq bit, + * second dimension is for offset type (real, boot, tai) + */ + ktime_t offsets[2][TK_OFFS_MAX]; }; static struct tk_fast tk_fast_mono ____cacheline_aligned; @@ -350,14 +356,20 @@ static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf /* Force readers off to base[1] */ raw_write_seqcount_latch(&tkf->seq); - /* Update base[0] */ + /* Update base[0] and offsets*/ memcpy(base, tkr, sizeof(*base)); + tkf->offsets[0][TK_OFFS_REAL] = tk_core.timekeeper.offs_real; + tkf->offsets[0][TK_OFFS_BOOT] = tk_core.timekeeper.offs_boot; + tkf->offsets[0][TK_OFFS_TAI] = tk_core.timekeeper.offs_tai; /* Force readers back to base[0] */ raw_write_seqcount_latch(&tkf->seq); - /* Update base[1] */ + /* Update base[1] and offsets*/ memcpy(base + 1, base, sizeof(*base)); + tkf->offsets[1][TK_OFFS_REAL] = tk_core.timekeeper.offs_real; + tkf->offsets[1][TK_OFFS_BOOT] = tk_core.timekeeper.offs_boot; + tkf->offsets[1][TK_OFFS_TAI] = tk_core.timekeeper.offs_tai; } /** @@ -392,16 +404,23 @@ static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf * of the following timestamps. Callers need to be aware of that and * deal with it. */ -static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) +static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf, int offset) { struct tk_read_base *tkr; unsigned int seq; u64 now; + ktime_t *off; do { seq = raw_read_seqcount_latch(&tkf->seq); tkr = tkf->base + (seq & 0x01); - now = ktime_to_ns(tkr->base); + + if (unlikely((offset >= 0))) { + off = tkf->offsets[seq & 0x01]; + now = ktime_to_ns(ktime_add(tkr->base, off[offset])); + } else { + now = ktime_to_ns(tkr->base); + } now += timekeeping_delta_to_ns(tkr, clocksource_delta( @@ -415,16 +434,21 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) u64 ktime_get_mono_fast_ns(void) { - return __ktime_get_fast_ns(&tk_fast_mono); + return __ktime_get_fast_ns(&tk_fast_mono, -1); } EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns); u64 ktime_get_raw_fast_ns(void) { - return __ktime_get_fast_ns(&tk_fast_raw); + return __ktime_get_fast_ns(&tk_fast_raw, -1); } EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns); +u64 ktime_get_boot_fast_ns(void) +{ + return __ktime_get_fast_ns(&tk_fast_mono, TK_OFFS_BOOT); +} + /* Suspend-time cycles value for halted fast timekeeper. */ static cycle_t cycles_at_suspend; -- 2.8.0.rc3.226.g39d4020