Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752699AbdGEKeU (ORCPT ); Wed, 5 Jul 2017 06:34:20 -0400 Received: from terminus.zytor.com ([65.50.211.136]:52747 "EHLO terminus.zytor.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752357AbdGEKeS (ORCPT ); Wed, 5 Jul 2017 06:34:18 -0400 Date: Wed, 5 Jul 2017 03:28:57 -0700 From: tip-bot for Frederic Weisbecker Message-ID: Cc: mingo@kernel.org, linux-kernel@vger.kernel.org, hpa@zytor.com, kernellwp@gmail.com, lcapitulino@redhat.com, tglx@linutronix.de, fweisbec@gmail.com, riel@redhat.com, peterz@infradead.org, torvalds@linux-foundation.org Reply-To: torvalds@linux-foundation.org, peterz@infradead.org, riel@redhat.com, tglx@linutronix.de, fweisbec@gmail.com, linux-kernel@vger.kernel.org, mingo@kernel.org, lcapitulino@redhat.com, kernellwp@gmail.com, hpa@zytor.com In-Reply-To: <1498756511-11714-4-git-send-email-fweisbec@gmail.com> References: <1498756511-11714-4-git-send-email-fweisbec@gmail.com> To: linux-tip-commits@vger.kernel.org Subject: [tip:sched/urgent] sched/cputime: Rename vtime fields Git-Commit-ID: 60a9ce57e7c5ac1df3a39fb941022bbfa40c0862 X-Mailer: tip-git-log-daemon Robot-ID: Robot-Unsubscribe: Contact to get blacklisted from these emails MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain; charset=UTF-8 Content-Disposition: inline Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 6530 Lines: 182 Commit-ID: 60a9ce57e7c5ac1df3a39fb941022bbfa40c0862 Gitweb: http://git.kernel.org/tip/60a9ce57e7c5ac1df3a39fb941022bbfa40c0862 Author: Frederic Weisbecker AuthorDate: Thu, 29 Jun 2017 19:15:09 +0200 Committer: Ingo Molnar CommitDate: Wed, 5 Jul 2017 09:54:14 +0200 sched/cputime: Rename vtime fields The current "snapshot" based naming on vtime fields suggests we record some past event but that's a low level picture of their actual purpose which comes out blurry. The real point of these fields is to run a basic state machine that tracks down cputime entry while switching between contexts. So lets reflect that with more meaningful names. Tested-by: Luiz Capitulino Signed-off-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Acked-by: Rik van Riel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1498756511-11714-4-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 4 ++-- include/linux/sched.h | 4 ++-- kernel/fork.c | 4 ++-- kernel/sched/cputime.c | 30 +++++++++++++++--------------- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e049526..3d53733 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -171,8 +171,8 @@ extern struct cred init_cred; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN # define INIT_VTIME(tsk) \ .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ - .vtime_snap = 0, \ - .vtime_snap_whence = VTIME_SYS, + .vtime_starttime = 0, \ + .vtime_state = VTIME_SYS, #else # define INIT_VTIME(tsk) #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c4ca74..ff00164 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -689,7 +689,7 @@ struct task_struct { struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqcount_t vtime_seqcount; - unsigned long long vtime_snap; + unsigned long long vtime_starttime; enum { /* Task is sleeping or running in a CPU with VTIME inactive: */ VTIME_INACTIVE = 0, @@ -697,7 +697,7 @@ struct task_struct { VTIME_USER, /* Task runs in kernelspace in a CPU with VTIME active: */ VTIME_SYS, - } vtime_snap_whence; + } vtime_state; #endif #ifdef CONFIG_NO_HZ_FULL diff --git a/kernel/fork.c b/kernel/fork.c index e53770d..83c4f9b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1638,8 +1638,8 @@ static __latent_entropy struct task_struct *copy_process( #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqcount_init(&p->vtime_seqcount); - p->vtime_snap = 0; - p->vtime_snap_whence = VTIME_INACTIVE; + p->vtime_starttime = 0; + p->vtime_state = VTIME_INACTIVE; #endif #if defined(SPLIT_RSS_COUNTING) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index ab68927..8c64753 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -683,10 +683,10 @@ static u64 vtime_delta(struct task_struct *tsk) { unsigned long now = READ_ONCE(jiffies); - if (time_before(now, (unsigned long)tsk->vtime_snap)) + if (time_before(now, (unsigned long)tsk->vtime_starttime)) return 0; - return jiffies_to_nsecs(now - tsk->vtime_snap); + return jiffies_to_nsecs(now - tsk->vtime_starttime); } static u64 get_vtime_delta(struct task_struct *tsk) @@ -701,10 +701,10 @@ static u64 get_vtime_delta(struct task_struct *tsk) * elapsed time. Limit account_other_time to prevent rounding * errors from causing elapsed vtime to go negative. */ - delta = jiffies_to_nsecs(now - tsk->vtime_snap); + delta = jiffies_to_nsecs(now - tsk->vtime_starttime); other = account_other_time(delta); - WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); - tsk->vtime_snap = now; + WARN_ON_ONCE(tsk->vtime_state == VTIME_INACTIVE); + tsk->vtime_starttime = now; return delta - other; } @@ -746,7 +746,7 @@ void vtime_guest_enter(struct task_struct *tsk) { /* * The flags must be updated under the lock with - * the vtime_snap flush and update. + * the vtime_starttime flush and update. * That enforces a right ordering and update sequence * synchronization against the reader (task_gtime()) * that can thus safely catch up with a tickless delta. @@ -776,12 +776,12 @@ void vtime_account_idle(struct task_struct *tsk) void arch_vtime_task_switch(struct task_struct *prev) { write_seqcount_begin(&prev->vtime_seqcount); - prev->vtime_snap_whence = VTIME_INACTIVE; + prev->vtime_state = VTIME_INACTIVE; write_seqcount_end(&prev->vtime_seqcount); write_seqcount_begin(¤t->vtime_seqcount); - current->vtime_snap_whence = VTIME_SYS; - current->vtime_snap = jiffies; + current->vtime_state = VTIME_SYS; + current->vtime_starttime = jiffies; write_seqcount_end(¤t->vtime_seqcount); } @@ -791,8 +791,8 @@ void vtime_init_idle(struct task_struct *t, int cpu) local_irq_save(flags); write_seqcount_begin(&t->vtime_seqcount); - t->vtime_snap_whence = VTIME_SYS; - t->vtime_snap = jiffies; + t->vtime_state = VTIME_SYS; + t->vtime_starttime = jiffies; write_seqcount_end(&t->vtime_seqcount); local_irq_restore(flags); } @@ -809,7 +809,7 @@ u64 task_gtime(struct task_struct *t) seq = read_seqcount_begin(&t->vtime_seqcount); gtime = t->gtime; - if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU) + if (t->vtime_state == VTIME_SYS && t->flags & PF_VCPU) gtime += vtime_delta(t); } while (read_seqcount_retry(&t->vtime_seqcount, seq)); @@ -840,7 +840,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) *stime = t->stime; /* Task is sleeping, nothing to add */ - if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t)) + if (t->vtime_state == VTIME_INACTIVE || is_idle_task(t)) continue; delta = vtime_delta(t); @@ -849,9 +849,9 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) * Task runs either in user or kernel space, add pending nohz time to * the right place. */ - if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) + if (t->vtime_state == VTIME_USER || t->flags & PF_VCPU) *utime += delta; - else if (t->vtime_snap_whence == VTIME_SYS) + else if (t->vtime_state == VTIME_SYS) *stime += delta; } while (read_seqcount_retry(&t->vtime_seqcount, seq)); }