Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S965487AbcKQSJf (ORCPT ); Thu, 17 Nov 2016 13:09:35 -0500 Received: from mail-wm0-f65.google.com ([74.125.82.65]:34324 "EHLO mail-wm0-f65.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S965429AbcKQSJa (ORCPT ); Thu, 17 Nov 2016 13:09:30 -0500 From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Tony Luck , Wanpeng Li , Peter Zijlstra , Michael Ellerman , Heiko Carstens , Benjamin Herrenschmidt , Thomas Gleixner , Paul Mackerras , Ingo Molnar , Fenghua Yu , Rik van Riel , Martin Schwidefsky , Stanislaw Gruszka Subject: [PATCH 19/36] posix-timers: Convert internals to use nsecs Date: Thu, 17 Nov 2016 19:08:26 +0100 Message-Id: <1479406123-24785-20-git-send-email-fweisbec@gmail.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1479406123-24785-1-git-send-email-fweisbec@gmail.com> References: <1479406123-24785-1-git-send-email-fweisbec@gmail.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 25801 Lines: 749 Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Also convert posix-cpu-timers to use nsec based internal counters to simplify it. Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Tony Luck Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Stanislaw Gruszka Cc: Wanpeng Li Signed-off-by: Frederic Weisbecker --- include/linux/posix-timers.h | 12 +-- include/linux/sched.h | 6 +- kernel/fork.c | 2 +- kernel/sched/cputime.c | 6 +- kernel/sched/stats.h | 4 +- kernel/time/itimer.c | 6 +- kernel/time/posix-cpu-timers.c | 210 +++++++++++++++++------------------------ 7 files changed, 100 insertions(+), 146 deletions(-) diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 62d44c1..890de52 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -8,19 +8,9 @@ #include -static inline unsigned long long cputime_to_expires(cputime_t expires) -{ - return (__force unsigned long long)expires; -} - -static inline cputime_t expires_to_cputime(unsigned long long expires) -{ - return (__force cputime_t)expires; -} - struct cpu_timer_list { struct list_head entry; - unsigned long long expires, incr; + u64 expires, incr; struct task_struct *task; int firing; }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 394e85d..148a0a6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -753,7 +753,7 @@ struct signal_struct { struct thread_group_cputimer cputimer; /* Earliest-expiration cache. */ - struct task_cputime_t cputime_expires; + struct task_cputime cputime_expires; #ifdef CONFIG_NO_HZ_FULL atomic_t tick_dep_mask; @@ -1675,7 +1675,7 @@ struct task_struct { /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsigned long min_flt, maj_flt; - struct task_cputime_t cputime_expires; + struct task_cputime cputime_expires; struct list_head cpu_timers[3]; /* process credentials */ @@ -3498,7 +3498,7 @@ static __always_inline bool need_resched(void) * Thread group CPU time accounting. */ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); static inline void thread_group_cputime_t(struct task_struct *tsk, struct task_cputime_t *cputime) diff --git a/kernel/fork.c b/kernel/fork.c index 600e93b..2d36cd8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1308,7 +1308,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (cpu_limit != RLIM_INFINITY) { - sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); + sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC; sig->cputimer.running = true; } diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 2e28ae2..31c457f 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) /* Add user time to process. */ p->utime += cputime_to_nsecs(cputime); - account_group_user_time(p, cputime); + account_group_user_time(p, cputime_to_nsecs(cputime)); index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; @@ -144,7 +144,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime) /* Add guest time to process. */ p->utime += cputime_to_nsecs(cputime); - account_group_user_time(p, cputime); + account_group_user_time(p, cputime_to_nsecs(cputime)); p->gtime += cputime_to_nsecs(cputime); /* Add guest time to cpustat. */ @@ -168,7 +168,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime, int index) { /* Add system time to process. */ p->stime += cputime_to_nsecs(cputime); - account_group_system_time(p, cputime); + account_group_system_time(p, cputime_to_nsecs(cputime)); /* Add system time to cpustat. */ task_group_account_field(p, index, cputime_to_nsecs(cputime)); diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 34659a8..9788478 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -216,7 +216,7 @@ static inline bool cputimer_running(struct task_struct *tsk) * running CPU and update the utime field there. */ static inline void account_group_user_time(struct task_struct *tsk, - cputime_t cputime) + u64 cputime) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; @@ -237,7 +237,7 @@ static inline void account_group_user_time(struct task_struct *tsk, * running CPU and update the stime field there. */ static inline void account_group_system_time(struct task_struct *tsk, - cputime_t cputime) + u64 cputime) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index bc4ba4c..20aa205 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -53,15 +53,15 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, cval = it->expires; cinterval = it->incr; if (cval) { - struct task_cputime_t cputime; + struct task_cputime cputime; cputime_t t; thread_group_cputimer(tsk, &cputime); if (clock_id == CPUCLOCK_PROF) - t = cputime.utime + cputime.stime; + t = nsecs_to_cputime(cputime.utime + cputime.stime); else /* CPUCLOCK_VIRT */ - t = cputime.utime; + t = nsecs_to_cputime(cputime.utime); if (cval < t) /* about to fire */ diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 9e7a5be..a02f012 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -51,39 +51,14 @@ static int check_clock(const clockid_t which_clock) return error; } -static inline unsigned long long -timespec_to_sample(const clockid_t which_clock, const struct timespec *tp) -{ - unsigned long long ret; - - ret = 0; /* high half always zero when .cpu used */ - if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { - ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; - } else { - ret = cputime_to_expires(timespec_to_cputime(tp)); - } - return ret; -} - -static void sample_to_timespec(const clockid_t which_clock, - unsigned long long expires, - struct timespec *tp) -{ - if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) - *tp = ns_to_timespec(expires); - else - cputime_to_timespec((__force cputime_t)expires, tp); -} - /* * Update expiry time from increment, and increase overrun count, * given the current clock sample. */ -static void bump_cpu_timer(struct k_itimer *timer, - unsigned long long now) +static void bump_cpu_timer(struct k_itimer *timer, u64 now) { int i; - unsigned long long delta, incr; + u64 delta, incr; if (timer->it.cpu.incr == 0) return; @@ -116,28 +91,28 @@ static void bump_cpu_timer(struct k_itimer *timer, * Checks @cputime to see if all fields are zero. Returns true if all fields * are zero, false if any field is nonzero. */ -static inline int task_cputime_zero(const struct task_cputime_t *cputime) +static inline int task_cputime_zero(const struct task_cputime *cputime) { if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) return 1; return 0; } -static inline unsigned long long prof_ticks(struct task_struct *p) +static inline u64 prof_ticks(struct task_struct *p) { - cputime_t utime, stime; + u64 utime, stime; - task_cputime_t(p, &utime, &stime); + task_cputime(p, &utime, &stime); - return cputime_to_expires(utime + stime); + return utime + stime; } -static inline unsigned long long virt_ticks(struct task_struct *p) +static inline u64 virt_ticks(struct task_struct *p) { - cputime_t utime, stime; + u64 utime, stime; - task_cputime_t(p, &utime, &stime); + task_cputime(p, &utime, &stime); - return cputime_to_expires(utime); + return utime; } static int @@ -177,8 +152,8 @@ posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) /* * Sample a per-thread clock for the given task. */ -static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, - unsigned long long *sample) +static int cpu_clock_sample(const clockid_t which_clock, + struct task_struct *p, u64 *sample) { switch (CPUCLOCK_WHICH(which_clock)) { default: @@ -211,7 +186,7 @@ static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) } } -static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime_t *sum) +static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) { __update_gt_cputime(&cputime_atomic->utime, sum->utime); __update_gt_cputime(&cputime_atomic->stime, sum->stime); @@ -219,7 +194,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct } /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ -static inline void sample_cputime_atomic(struct task_cputime_t *times, +static inline void sample_cputime_atomic(struct task_cputime *times, struct task_cputime_atomic *atomic_times) { times->utime = atomic64_read(&atomic_times->utime); @@ -227,10 +202,10 @@ static inline void sample_cputime_atomic(struct task_cputime_t *times, times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); } -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times) +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; - struct task_cputime_t sum; + struct task_cputime sum; /* Check if cputimer isn't running. This is accessed without locking. */ if (!READ_ONCE(cputimer->running)) { @@ -239,7 +214,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times * values through the TIMER_ABSTIME flag, therefore we have * to synchronize the timer to the clock every time we start it. */ - thread_group_cputime_t(tsk, &sum); + thread_group_cputime(tsk, &sum); update_gt_cputime(&cputimer->cputime_atomic, &sum); /* @@ -261,23 +236,23 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times */ static int cpu_clock_sample_group(const clockid_t which_clock, struct task_struct *p, - unsigned long long *sample) + u64 *sample) { - struct task_cputime_t cputime; + struct task_cputime cputime; switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: - thread_group_cputime_t(p, &cputime); - *sample = cputime_to_expires(cputime.utime + cputime.stime); + thread_group_cputime(p, &cputime); + *sample = cputime.utime + cputime.stime; break; case CPUCLOCK_VIRT: - thread_group_cputime_t(p, &cputime); - *sample = cputime_to_expires(cputime.utime); + thread_group_cputime(p, &cputime); + *sample = cputime.utime; break; case CPUCLOCK_SCHED: - thread_group_cputime_t(p, &cputime); + thread_group_cputime(p, &cputime); *sample = cputime.sum_exec_runtime; break; } @@ -289,7 +264,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk, struct timespec *tp) { int err = -EINVAL; - unsigned long long rtn; + u64 rtn; if (CPUCLOCK_PERTHREAD(which_clock)) { if (same_thread_group(tsk, current)) @@ -300,7 +275,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk, } if (!err) - sample_to_timespec(which_clock, rtn, tp); + *tp = ns_to_timespec(rtn); return err; } @@ -457,7 +432,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) cleanup_timers(tsk->signal->cpu_timers); } -static inline int expires_gt(cputime_t expires, cputime_t new_exp) +static inline int expires_gt(u64 expires, u64 new_exp) { return expires == 0 || expires > new_exp; } @@ -470,7 +445,7 @@ static void arm_timer(struct k_itimer *timer) { struct task_struct *p = timer->it.cpu.task; struct list_head *head, *listpos; - struct task_cputime_t *cputime_expires; + struct task_cputime *cputime_expires; struct cpu_timer_list *const nt = &timer->it.cpu; struct cpu_timer_list *next; @@ -492,7 +467,7 @@ static void arm_timer(struct k_itimer *timer) list_add(&nt->entry, listpos); if (listpos == head) { - unsigned long long exp = nt->expires; + u64 exp = nt->expires; /* * We are the new earliest-expiring POSIX 1.b timer, hence @@ -503,16 +478,15 @@ static void arm_timer(struct k_itimer *timer) switch (CPUCLOCK_WHICH(timer->it_clock)) { case CPUCLOCK_PROF: - if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp))) - cputime_expires->prof_exp = expires_to_cputime(exp); + if (expires_gt(cputime_expires->prof_exp, exp)) + cputime_expires->prof_exp = exp; break; case CPUCLOCK_VIRT: - if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp))) - cputime_expires->virt_exp = expires_to_cputime(exp); + if (expires_gt(cputime_expires->virt_exp, exp)) + cputime_expires->virt_exp = exp; break; case CPUCLOCK_SCHED: - if (cputime_expires->sched_exp == 0 || - cputime_expires->sched_exp > exp) + if (expires_gt(cputime_expires->sched_exp, exp)) cputime_expires->sched_exp = exp; break; } @@ -563,20 +537,19 @@ static void cpu_timer_fire(struct k_itimer *timer) * traversal. */ static int cpu_timer_sample_group(const clockid_t which_clock, - struct task_struct *p, - unsigned long long *sample) + struct task_struct *p, u64 *sample) { - struct task_cputime_t cputime; + struct task_cputime cputime; thread_group_cputimer(p, &cputime); switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: - *sample = cputime_to_expires(cputime.utime + cputime.stime); + *sample = cputime.utime + cputime.stime; break; case CPUCLOCK_VIRT: - *sample = cputime_to_expires(cputime.utime); + *sample = cputime.utime; break; case CPUCLOCK_SCHED: *sample = cputime.sum_exec_runtime; @@ -597,12 +570,12 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, unsigned long flags; struct sighand_struct *sighand; struct task_struct *p = timer->it.cpu.task; - unsigned long long old_expires, new_expires, old_incr, val; + u64 old_expires, new_expires, old_incr, val; int ret; WARN_ON_ONCE(p == NULL); - new_expires = timespec_to_sample(timer->it_clock, &new->it_value); + new_expires = timespec_to_ns(&new->it_value); /* * Protect against sighand release/switch in exit/exec and p->cpu_timers @@ -663,9 +636,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, bump_cpu_timer(timer, val); if (val < timer->it.cpu.expires) { old_expires = timer->it.cpu.expires - val; - sample_to_timespec(timer->it_clock, - old_expires, - &old->it_value); + old->it_value = ns_to_timespec(old_expires); } else { old->it_value.tv_nsec = 1; old->it_value.tv_sec = 0; @@ -703,8 +674,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, * Install the new reload setting, and * set up the signal and overrun bookkeeping. */ - timer->it.cpu.incr = timespec_to_sample(timer->it_clock, - &new->it_interval); + timer->it.cpu.incr = timespec_to_ns(&new->it_interval); /* * This acts as a modification timestamp for the timer, @@ -727,17 +697,15 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, ret = 0; out: - if (old) { - sample_to_timespec(timer->it_clock, - old_incr, &old->it_interval); - } + if (old) + old->it_interval = ns_to_timespec(old_incr); return ret; } static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) { - unsigned long long now; + u64 now; struct task_struct *p = timer->it.cpu.task; WARN_ON_ONCE(p == NULL); @@ -745,8 +713,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) /* * Easy part: convert the reload time. */ - sample_to_timespec(timer->it_clock, - timer->it.cpu.incr, &itp->it_interval); + itp->it_interval = ns_to_timespec(timer->it.cpu.incr); if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; @@ -765,7 +732,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) /* * Protect against sighand release/switch in exit/exec and * also make timer sampling safe if it ends up calling - * thread_group_cputime_t(). + * thread_group_cputime(). */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { @@ -775,8 +742,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) * Call the timer disarmed, nothing else to do. */ timer->it.cpu.expires = 0; - sample_to_timespec(timer->it_clock, timer->it.cpu.expires, - &itp->it_value); + itp->it_value = ns_to_timespec(timer->it.cpu.expires); return; } else { cpu_timer_sample_group(timer->it_clock, p, &now); @@ -785,9 +751,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) } if (now < timer->it.cpu.expires) { - sample_to_timespec(timer->it_clock, - timer->it.cpu.expires - now, - &itp->it_value); + itp->it_value = ns_to_timespec(timer->it.cpu.expires - now); } else { /* * The timer should have expired already, but the firing @@ -830,8 +794,8 @@ static void check_thread_timers(struct task_struct *tsk, { struct list_head *timers = tsk->cpu_timers; struct signal_struct *const sig = tsk->signal; - struct task_cputime_t *tsk_expires = &tsk->cputime_expires; - unsigned long long expires; + struct task_cputime *tsk_expires = &tsk->cputime_expires; + u64 expires; unsigned long soft; /* @@ -842,10 +806,10 @@ static void check_thread_timers(struct task_struct *tsk, return; expires = check_timers_list(timers, firing, prof_ticks(tsk)); - tsk_expires->prof_exp = expires_to_cputime(expires); + tsk_expires->prof_exp = expires; expires = check_timers_list(++timers, firing, virt_ticks(tsk)); - tsk_expires->virt_exp = expires_to_cputime(expires); + tsk_expires->virt_exp = expires; tsk_expires->sched_exp = check_timers_list(++timers, firing, tsk->se.sum_exec_runtime); @@ -895,13 +859,12 @@ static inline void stop_process_timers(struct signal_struct *sig) } static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, - unsigned long long *expires, - unsigned long long cur_time, int signo) + u64 *expires, u64 cur_time, int signo) { if (!it->expires) return; - if (cur_time >= it->expires) { + if (cur_time >= cputime_to_nsecs(it->expires)) { if (it->incr) { it->expires += it->incr; it->error += it->incr_error; @@ -919,8 +882,8 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); } - if (it->expires && (!*expires || it->expires < *expires)) { - *expires = it->expires; + if (it->expires && (!*expires || cputime_to_nsecs(it->expires) < *expires)) { + *expires = cputime_to_nsecs(it->expires); } } @@ -933,10 +896,10 @@ static void check_process_timers(struct task_struct *tsk, struct list_head *firing) { struct signal_struct *const sig = tsk->signal; - unsigned long long utime, ptime, virt_expires, prof_expires; - unsigned long long sum_sched_runtime, sched_expires; + u64 utime, ptime, virt_expires, prof_expires; + u64 sum_sched_runtime, sched_expires; struct list_head *timers = sig->cpu_timers; - struct task_cputime_t cputime; + struct task_cputime cputime; unsigned long soft; /* @@ -956,8 +919,8 @@ static void check_process_timers(struct task_struct *tsk, * Collect the current process totals. */ thread_group_cputimer(tsk, &cputime); - utime = cputime_to_expires(cputime.utime); - ptime = utime + cputime_to_expires(cputime.stime); + utime = cputime.utime; + ptime = utime + cputime.stime; sum_sched_runtime = cputime.sum_exec_runtime; prof_expires = check_timers_list(timers, firing, ptime); @@ -973,10 +936,10 @@ static void check_process_timers(struct task_struct *tsk, SIGVTALRM); soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (soft != RLIM_INFINITY) { - unsigned long psecs = cputime_to_secs(ptime); + unsigned long psecs = div_u64(ptime, NSEC_PER_SEC); unsigned long hard = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); - cputime_t x; + u64 x; if (psecs >= hard) { /* * At the hard limit, we just die. @@ -995,14 +958,13 @@ static void check_process_timers(struct task_struct *tsk, sig->rlim[RLIMIT_CPU].rlim_cur = soft; } } - x = secs_to_cputime(soft); - if (!prof_expires || x < prof_expires) { + x = soft * NSEC_PER_SEC; + if (!prof_expires || x < prof_expires) prof_expires = x; - } } - sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires); - sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires); + sig->cputime_expires.prof_exp = prof_expires; + sig->cputime_expires.virt_exp = virt_expires; sig->cputime_expires.sched_exp = sched_expires; if (task_cputime_zero(&sig->cputime_expires)) stop_process_timers(sig); @@ -1019,7 +981,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) struct sighand_struct *sighand; unsigned long flags; struct task_struct *p = timer->it.cpu.task; - unsigned long long now; + u64 now; WARN_ON_ONCE(p == NULL); @@ -1039,7 +1001,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) } else { /* * Protect arm_timer() and timer sampling in case of call to - * thread_group_cputime_t(). + * thread_group_cputime(). */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { @@ -1082,8 +1044,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) * Returns true if any field of the former is greater than the corresponding * field of the latter if the latter field is set. Otherwise returns false. */ -static inline int task_cputime_expired(const struct task_cputime_t *sample, - const struct task_cputime_t *expires) +static inline int task_cputime_expired(const struct task_cputime *sample, + const struct task_cputime *expires) { if (expires->utime && sample->utime >= expires->utime) return 1; @@ -1110,9 +1072,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk) struct signal_struct *sig; if (!task_cputime_zero(&tsk->cputime_expires)) { - struct task_cputime_t task_sample; + struct task_cputime task_sample; - task_cputime_t(tsk, &task_sample.utime, &task_sample.stime); + task_cputime(tsk, &task_sample.utime, &task_sample.stime); task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) return 1; @@ -1135,7 +1097,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) */ if (READ_ONCE(sig->cputimer.running) && !READ_ONCE(sig->cputimer.checking_timer)) { - struct task_cputime_t group_sample; + struct task_cputime group_sample; sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); @@ -1218,7 +1180,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, cputime_t *newval, cputime_t *oldval) { - unsigned long long now; + u64 now, new; WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); cpu_timer_sample_group(clock_idx, tsk, &now); @@ -1230,31 +1192,33 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, * it to be absolute. */ if (*oldval) { - if (*oldval <= now) { + if (cputime_to_nsecs(*oldval) <= now) { /* Just about to fire. */ *oldval = cputime_one_jiffy; } else { - *oldval -= now; + *oldval -= nsecs_to_cputime(now); } } if (!*newval) return; - *newval += now; + *newval += nsecs_to_cputime(now); } + new = cputime_to_nsecs(*newval); + /* * Update expiration cache if we are the earliest timer, or eventually * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. */ switch (clock_idx) { case CPUCLOCK_PROF: - if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) - tsk->signal->cputime_expires.prof_exp = *newval; + if (expires_gt(tsk->signal->cputime_expires.prof_exp, new)) + tsk->signal->cputime_expires.prof_exp = new; break; case CPUCLOCK_VIRT: - if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) - tsk->signal->cputime_expires.virt_exp = *newval; + if (expires_gt(tsk->signal->cputime_expires.virt_exp, new)) + tsk->signal->cputime_expires.virt_exp = new; break; } @@ -1312,7 +1276,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, /* * We were interrupted by a signal. */ - sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); + *rqtp = ns_to_timespec(timer.it.cpu.expires); error = posix_cpu_timer_set(&timer, 0, &zero_it, it); if (!error) { /* -- 2.7.4