Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S964871AbbBIRbe (ORCPT ); Mon, 9 Feb 2015 12:31:34 -0500 Received: from mx1.redhat.com ([209.132.183.28]:44939 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932308AbbBIRbd (ORCPT ); Mon, 9 Feb 2015 12:31:33 -0500 From: Josh Poimboeuf To: Seth Jennings , Jiri Kosina , Vojtech Pavlik Cc: Masami Hiramatsu , live-patching@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [RFC PATCH 5/9] sched: move task rq locking functions to sched.h Date: Mon, 9 Feb 2015 11:31:17 -0600 Message-Id: <3f90fabd487411e694209447cf097fb0fbcce412.1423499826.git.jpoimboe@redhat.com> In-Reply-To: References: Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3152 Lines: 111 Move task_rq_lock/unlock() to sched.h so they can be used elsewhere. The livepatch code needs to lock each task's rq in order to safely examine its stack and switch it to a new patch universe. Signed-off-by: Josh Poimboeuf --- kernel/sched/core.c | 32 -------------------------------- kernel/sched/sched.h | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b5797b7..78d91e6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -326,44 +326,12 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) } } -/* - * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. - */ -static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) - __acquires(p->pi_lock) - __acquires(rq->lock) -{ - struct rq *rq; - - for (;;) { - raw_spin_lock_irqsave(&p->pi_lock, *flags); - rq = task_rq(p); - raw_spin_lock(&rq->lock); - if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) - return rq; - raw_spin_unlock(&rq->lock); - raw_spin_unlock_irqrestore(&p->pi_lock, *flags); - - while (unlikely(task_on_rq_migrating(p))) - cpu_relax(); - } -} - static void __task_rq_unlock(struct rq *rq) __releases(rq->lock) { raw_spin_unlock(&rq->lock); } -static inline void -task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) - __releases(rq->lock) - __releases(p->pi_lock) -{ - raw_spin_unlock(&rq->lock); - raw_spin_unlock_irqrestore(&p->pi_lock, *flags); -} - /* * this_rq_lock - lock this runqueue and disable interrupts. */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9a2a45c..ae514c9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1542,6 +1542,39 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) #endif +/* + * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. + */ +static inline struct rq *task_rq_lock(struct task_struct *p, + unsigned long *flags) + __acquires(p->pi_lock) + __acquires(rq->lock) +{ + struct rq *rq; + + for (;;) { + raw_spin_lock_irqsave(&p->pi_lock, *flags); + rq = task_rq(p); + raw_spin_lock(&rq->lock); + if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) + return rq; + raw_spin_unlock(&rq->lock); + raw_spin_unlock_irqrestore(&p->pi_lock, *flags); + + while (unlikely(task_on_rq_migrating(p))) + cpu_relax(); + } +} + +static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, + unsigned long *flags) + __releases(rq->lock) + __releases(p->pi_lock) +{ + raw_spin_unlock(&rq->lock); + raw_spin_unlock_irqrestore(&p->pi_lock, *flags); +} + extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); extern void print_cfs_stats(struct seq_file *m, int cpu); -- 2.1.0 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/