Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932526AbbDNXKV (ORCPT ); Tue, 14 Apr 2015 19:10:21 -0400 Received: from g9t5008.houston.hp.com ([15.240.92.66]:56041 "EHLO g9t5008.houston.hp.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932355AbbDNXKK (ORCPT ); Tue, 14 Apr 2015 19:10:10 -0400 From: Jason Low To: Linus Torvalds , Peter Zijlstra , Ingo Molnar , Thomas Gleixner Cc: linux-kernel@vger.kernel.org, "Paul E. McKenney" , Andrew Morton , Oleg Nesterov , Mike Galbraith , Frederic Weisbecker , Mel Gorman , Steven Rostedt , Preeti U Murthy , hideaki.kimura@hp.com, Aswin Chandramouleeswaran , Scott J Norton , Jason Low Subject: [PATCH 1/3] sched, timer: Remove usages of ACCESS_ONCE in the scheduler Date: Tue, 14 Apr 2015 16:09:44 -0700 Message-Id: <1429052986-9420-2-git-send-email-jason.low2@hp.com> X-Mailer: git-send-email 1.7.9.5 In-Reply-To: <1429052986-9420-1-git-send-email-jason.low2@hp.com> References: <1429052986-9420-1-git-send-email-jason.low2@hp.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 10656 Lines: 297 ACCESS_ONCE doesn't work reliably on non-scalar types. This patch removes the rest of the existing usages of ACCESS_ONCE in the scheduler, and use the new READ_ONCE and WRITE_ONCE APIs. Signed-off-by: Jason Low --- include/linux/sched.h | 4 ++-- kernel/fork.c | 2 +- kernel/sched/auto_group.c | 2 +- kernel/sched/auto_group.h | 2 +- kernel/sched/core.c | 4 ++-- kernel/sched/cputime.c | 2 +- kernel/sched/deadline.c | 2 +- kernel/sched/fair.c | 14 +++++++------- kernel/sched/proc.c | 4 ++-- kernel/sched/rt.c | 2 +- kernel/sched/sched.h | 2 +- kernel/sched/wait.c | 4 ++-- kernel/time/posix-cpu-timers.c | 8 ++++---- 13 files changed, 26 insertions(+), 26 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 6d77432..379fb3b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -3070,13 +3070,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm) static inline unsigned long task_rlimit(const struct task_struct *tsk, unsigned int limit) { - return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); + return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); } static inline unsigned long task_rlimit_max(const struct task_struct *tsk, unsigned int limit) { - return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); + return READ_ONCE(tsk->signal->rlim[limit].rlim_max); } static inline unsigned long rlimit(unsigned int limit) diff --git a/kernel/fork.c b/kernel/fork.c index cf65139..d96a0ca 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1045,7 +1045,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) /* Thread group counters. */ thread_group_cputime_init(sig); - cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); + cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (cpu_limit != RLIM_INFINITY) { sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); sig->cputimer.running = 1; diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index eae160d..a8653c2 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -141,7 +141,7 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) p->signal->autogroup = autogroup_kref_get(ag); - if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled)) + if (!READ_ONCE(sysctl_sched_autogroup_enabled)) goto out; for_each_thread(p, t) diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h index 8bd0471..890c95f 100644 --- a/kernel/sched/auto_group.h +++ b/kernel/sched/auto_group.h @@ -29,7 +29,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); static inline struct task_group * autogroup_task_group(struct task_struct *p, struct task_group *tg) { - int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); + int enabled = READ_ONCE(sysctl_sched_autogroup_enabled); if (enabled && task_wants_autogroup(p, tg)) return p->signal->autogroup->tg; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f0f831e..4131c24 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -508,7 +508,7 @@ static bool set_nr_and_not_polling(struct task_struct *p) static bool set_nr_if_polling(struct task_struct *p) { struct thread_info *ti = task_thread_info(p); - typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags); + typeof(ti->flags) old, val = READ_ONCE(ti->flags); for (;;) { if (!(val & _TIF_POLLING_NRFLAG)) @@ -2505,7 +2505,7 @@ void scheduler_tick(void) u64 scheduler_tick_max_deferment(void) { struct rq *rq = this_rq(); - unsigned long next, now = ACCESS_ONCE(jiffies); + unsigned long next, now = READ_ONCE(jiffies); next = rq->last_sched_tick + HZ; diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 8394b1e..f5a64ff 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -567,7 +567,7 @@ static void cputime_advance(cputime_t *counter, cputime_t new) { cputime_t old; - while (new > (old = ACCESS_ONCE(*counter))) + while (new > (old = READ_ONCE(*counter))) cmpxchg_cputime(counter, old, new); } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 3fa8fa6..fa43e3f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -932,7 +932,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) rq = cpu_rq(cpu); rcu_read_lock(); - curr = ACCESS_ONCE(rq->curr); /* unlocked access */ + curr = READ_ONCE(rq->curr); /* unlocked access */ /* * If we are dealing with a -deadline task, we must diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7ce18f3..6acfda0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -832,7 +832,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p) static unsigned int task_scan_min(struct task_struct *p) { - unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size); + unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); unsigned int scan, floor; unsigned int windows = 1; @@ -1777,7 +1777,7 @@ static void task_numa_placement(struct task_struct *p) u64 runtime, period; spinlock_t *group_lock = NULL; - seq = ACCESS_ONCE(p->mm->numa_scan_seq); + seq = READ_ONCE(p->mm->numa_scan_seq); if (p->numa_scan_seq == seq) return; p->numa_scan_seq = seq; @@ -1921,7 +1921,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, } rcu_read_lock(); - tsk = ACCESS_ONCE(cpu_rq(cpu)->curr); + tsk = READ_ONCE(cpu_rq(cpu)->curr); if (!cpupid_match_pid(tsk, cpupid)) goto no_join; @@ -2088,7 +2088,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) static void reset_ptenuma_scan(struct task_struct *p) { - ACCESS_ONCE(p->mm->numa_scan_seq)++; + WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); p->mm->numa_scan_offset = 0; } @@ -4301,7 +4301,7 @@ static unsigned long capacity_of(int cpu) static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); - unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running); + unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); unsigned long load_avg = rq->cfs.runnable_load_avg; if (nr_running) @@ -5946,8 +5946,8 @@ static unsigned long scale_rt_capacity(int cpu) * Since we're reading these variables without serialization make sure * we read them once before doing sanity checks on them. */ - age_stamp = ACCESS_ONCE(rq->age_stamp); - avg = ACCESS_ONCE(rq->rt_avg); + age_stamp = READ_ONCE(rq->age_stamp); + avg = READ_ONCE(rq->rt_avg); delta = __rq_clock_broken(rq) - age_stamp; if (unlikely(delta < 0)) diff --git a/kernel/sched/proc.c b/kernel/sched/proc.c index 8ecd552..361e6f1 100644 --- a/kernel/sched/proc.c +++ b/kernel/sched/proc.c @@ -526,7 +526,7 @@ static inline unsigned long get_rq_runnable_load(struct rq *rq) */ void update_idle_cpu_load(struct rq *this_rq) { - unsigned long curr_jiffies = ACCESS_ONCE(jiffies); + unsigned long curr_jiffies = READ_ONCE(jiffies); unsigned long load = get_rq_runnable_load(this_rq); unsigned long pending_updates; @@ -548,7 +548,7 @@ void update_idle_cpu_load(struct rq *this_rq) void update_cpu_load_nohz(void) { struct rq *this_rq = this_rq(); - unsigned long curr_jiffies = ACCESS_ONCE(jiffies); + unsigned long curr_jiffies = READ_ONCE(jiffies); unsigned long pending_updates; if (curr_jiffies == this_rq->last_load_update_tick) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f4d4b07..7b00333 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1311,7 +1311,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) rq = cpu_rq(cpu); rcu_read_lock(); - curr = ACCESS_ONCE(rq->curr); /* unlocked access */ + curr = READ_ONCE(rq->curr); /* unlocked access */ /* * If the current task on @p's runqueue is an RT task, then diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index dc0f435..949d46f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -688,7 +688,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static inline u64 __rq_clock_broken(struct rq *rq) { - return ACCESS_ONCE(rq->clock); + return READ_ONCE(rq->clock); } static inline u64 rq_clock(struct rq *rq) diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 852143a..2ccec98 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -601,7 +601,7 @@ EXPORT_SYMBOL(bit_wait_io); __sched int bit_wait_timeout(struct wait_bit_key *word) { - unsigned long now = ACCESS_ONCE(jiffies); + unsigned long now = READ_ONCE(jiffies); if (signal_pending_state(current->state, current)) return 1; if (time_after_eq(now, word->timeout)) @@ -613,7 +613,7 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout); __sched int bit_wait_io_timeout(struct wait_bit_key *word) { - unsigned long now = ACCESS_ONCE(jiffies); + unsigned long now = READ_ONCE(jiffies); if (signal_pending_state(current->state, current)) return 1; if (time_after_eq(now, word->timeout)) diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 0075da7..e072d98 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -852,10 +852,10 @@ static void check_thread_timers(struct task_struct *tsk, /* * Check for the special case thread timers. */ - soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); + soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); if (soft != RLIM_INFINITY) { unsigned long hard = - ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); + READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); if (hard != RLIM_INFINITY && tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { @@ -958,11 +958,11 @@ static void check_process_timers(struct task_struct *tsk, SIGPROF); check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, SIGVTALRM); - soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); + soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (soft != RLIM_INFINITY) { unsigned long psecs = cputime_to_secs(ptime); unsigned long hard = - ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); + READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); cputime_t x; if (psecs >= hard) { /* -- 1.7.2.5 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/