This patch checks if we can set the rt_runtime_us to 0. If there is a
realtime task in the group, we don't want to set the rt_runtime_us to 0
otherwise bad things will happen.
Signed-off-by: Dhaval Giani <[email protected]>
---
kernel/sched.c | 19 +++++++++++++++++++
1 files changed, 19 insertions(+)
Index: linux-2.6.25-rc3/kernel/sched.c
===================================================================
--- linux-2.6.25-rc3.orig/kernel/sched.c
+++ linux-2.6.25-rc3/kernel/sched.c
@@ -7960,6 +7960,17 @@ static int __rt_schedulable(struct task_
return total + to_ratio(period, runtime) < global_ratio;
}
+/* Must be called with tasklist_lock held */
+static inline int tg_has_rt_tasks(struct task_group *tg)
+{
+ struct task_struct *p;
+ for_each_process(p) {
+ if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
+ return 1;
+ }
+ return 0;
+}
+
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
@@ -7971,6 +7982,13 @@ int sched_group_set_rt_runtime(struct ta
rt_runtime = rt_period;
mutex_lock(&rt_constraints_mutex);
+ read_lock(&tasklist_lock);
+ if (rt_runtime_us == 0) {
+ if (tg_has_rt_tasks(tg)) {
+ err = -EINVAL;
+ goto unlock;
+ }
+ }
if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
err = -EINVAL;
goto unlock;
@@ -7979,6 +7997,7 @@ int sched_group_set_rt_runtime(struct ta
rt_runtime = RUNTIME_INF;
tg->rt_runtime = rt_runtime;
unlock:
+ read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return err;
--
regards,
Dhaval
On Wed, 2008-02-27 at 16:38 +0530, Dhaval Giani wrote:
> This patch checks if we can set the rt_runtime_us to 0. If there is a
> realtime task in the group, we don't want to set the rt_runtime_us to 0
> otherwise bad things will happen.
I had considered this a: don't do that then, thing. But sure, helping
the admin seems like a valid option.
> Signed-off-by: Dhaval Giani <[email protected]>
> ---
> kernel/sched.c | 19 +++++++++++++++++++
> 1 files changed, 19 insertions(+)
>
> Index: linux-2.6.25-rc3/kernel/sched.c
> ===================================================================
> --- linux-2.6.25-rc3.orig/kernel/sched.c
> +++ linux-2.6.25-rc3/kernel/sched.c
> @@ -7960,6 +7960,17 @@ static int __rt_schedulable(struct task_
> return total + to_ratio(period, runtime) < global_ratio;
> }
>
> +/* Must be called with tasklist_lock held */
> +static inline int tg_has_rt_tasks(struct task_group *tg)
> +{
> + struct task_struct *p;
> + for_each_process(p) {
> + if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
> + return 1;
> + }
> + return 0;
> +}
> +
> int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
> {
> u64 rt_runtime, rt_period;
> @@ -7971,6 +7982,13 @@ int sched_group_set_rt_runtime(struct ta
> rt_runtime = rt_period;
>
> mutex_lock(&rt_constraints_mutex);
> + read_lock(&tasklist_lock);
> + if (rt_runtime_us == 0) {
> + if (tg_has_rt_tasks(tg)) {
&&
> + err = -EINVAL;
-EBUSY perhaps?
> + goto unlock;
> + }
> + }
> if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
> err = -EINVAL;
> goto unlock;
> @@ -7979,6 +7997,7 @@ int sched_group_set_rt_runtime(struct ta
> rt_runtime = RUNTIME_INF;
> tg->rt_runtime = rt_runtime;
> unlock:
> + read_unlock(&tasklist_lock);
> mutex_unlock(&rt_constraints_mutex);
>
> return err;
On Wed, Feb 27, 2008 at 01:46:56PM +0100, Peter Zijlstra wrote:
>
> On Wed, 2008-02-27 at 16:38 +0530, Dhaval Giani wrote:
> > This patch checks if we can set the rt_runtime_us to 0. If there is a
> > realtime task in the group, we don't want to set the rt_runtime_us to 0
> > otherwise bad things will happen.
>
> I had considered this a: don't do that then, thing. But sure, helping
> the admin seems like a valid option.
>
Well, if it can be done, it shall be done :)
<snip>
>
> > + err = -EINVAL;
>
> -EBUSY perhaps?
>
Well, the group is certainly not busy. I will go with invalid as it is
definitly an invalid value when the groups have rt tasks. But if someone
disagrees, I have no issue with it being something else.
Corrected patch as follows,
--
This patch checks if we can set the rt_runtime_us to 0. If there is a
realtime task in the group, we don't want to set the rt_runtime_us as 0
or bad things will happen.
Signed-off-by: Dhaval Giani <[email protected]>
---
kernel/sched.c | 17 +++++++++++++++++
1 files changed, 17 insertions(+)
Index: linux-2.6.25-rc3/kernel/sched.c
===================================================================
--- linux-2.6.25-rc3.orig/kernel/sched.c
+++ linux-2.6.25-rc3/kernel/sched.c
@@ -7960,6 +7960,17 @@ static int __rt_schedulable(struct task_
return total + to_ratio(period, runtime) < global_ratio;
}
+/* Must be called with tasklist_lock held */
+static inline int tg_has_rt_tasks(struct task_group *tg)
+{
+ struct task_struct *p;
+ for_each_process(p) {
+ if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
+ return 1;
+ }
+ return 0;
+}
+
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
@@ -7971,6 +7982,11 @@ int sched_group_set_rt_runtime(struct ta
rt_runtime = rt_period;
mutex_lock(&rt_constraints_mutex);
+ read_lock(&tasklist_lock);
+ if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
+ err = -EINVAL;
+ goto unlock;
+ }
if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
err = -EINVAL;
goto unlock;
@@ -7979,6 +7995,7 @@ int sched_group_set_rt_runtime(struct ta
rt_runtime = RUNTIME_INF;
tg->rt_runtime = rt_runtime;
unlock:
+ read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return err;
--
regards,
Dhaval
On Wed, 2008-02-27 at 18:54 +0530, Dhaval Giani wrote:
> Well, if it can be done, it shall be done :)
May I suggest: rm -rf /
/me runs
On Wed, 2008-02-27 at 18:54 +0530, Dhaval Giani wrote:
> This patch checks if we can set the rt_runtime_us to 0. If there is a
> realtime task in the group, we don't want to set the rt_runtime_us as 0
> or bad things will happen.
>
> Signed-off-by: Dhaval Giani <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
> ---
> kernel/sched.c | 17 +++++++++++++++++
> 1 files changed, 17 insertions(+)
>
> Index: linux-2.6.25-rc3/kernel/sched.c
> ===================================================================
> --- linux-2.6.25-rc3.orig/kernel/sched.c
> +++ linux-2.6.25-rc3/kernel/sched.c
> @@ -7960,6 +7960,17 @@ static int __rt_schedulable(struct task_
> return total + to_ratio(period, runtime) < global_ratio;
> }
>
> +/* Must be called with tasklist_lock held */
> +static inline int tg_has_rt_tasks(struct task_group *tg)
> +{
> + struct task_struct *p;
> + for_each_process(p) {
> + if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
> + return 1;
> + }
> + return 0;
> +}
> +
> int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
> {
> u64 rt_runtime, rt_period;
> @@ -7971,6 +7982,11 @@ int sched_group_set_rt_runtime(struct ta
> rt_runtime = rt_period;
>
> mutex_lock(&rt_constraints_mutex);
> + read_lock(&tasklist_lock);
> + if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
> + err = -EINVAL;
> + goto unlock;
> + }
> if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
> err = -EINVAL;
> goto unlock;
> @@ -7979,6 +7995,7 @@ int sched_group_set_rt_runtime(struct ta
> rt_runtime = RUNTIME_INF;
> tg->rt_runtime = rt_runtime;
> unlock:
> + read_unlock(&tasklist_lock);
> mutex_unlock(&rt_constraints_mutex);
>
> return err;
* Peter Zijlstra <[email protected]> wrote:
>
> On Wed, 2008-02-27 at 18:54 +0530, Dhaval Giani wrote:
>
> > This patch checks if we can set the rt_runtime_us to 0. If there is a
> > realtime task in the group, we don't want to set the rt_runtime_us as 0
> > or bad things will happen.
> >
> > Signed-off-by: Dhaval Giani <[email protected]>
>
> Acked-by: Peter Zijlstra <[email protected]>
doesnt apply to sched-devel.git - what's the plan here?
Ingo
On Wed, 2008-02-27 at 16:03 +0100, Ingo Molnar wrote:
> * Peter Zijlstra <[email protected]> wrote:
>
> >
> > On Wed, 2008-02-27 at 18:54 +0530, Dhaval Giani wrote:
> >
> > > This patch checks if we can set the rt_runtime_us to 0. If there is a
> > > realtime task in the group, we don't want to set the rt_runtime_us as 0
> > > or bad things will happen.
> > >
> > > Signed-off-by: Dhaval Giani <[email protected]>
> >
> > Acked-by: Peter Zijlstra <[email protected]>
>
> doesnt apply to sched-devel.git - what's the plan here?
stick in front, drop the other two sched-rt-group patches for which I'll
send you an update somewhere today.
These two patches would be candidates to push to linus..
On Wed, Feb 27, 2008 at 04:03:50PM +0100, Ingo Molnar wrote:
>
> * Peter Zijlstra <[email protected]> wrote:
>
> >
> > On Wed, 2008-02-27 at 18:54 +0530, Dhaval Giani wrote:
> >
> > > This patch checks if we can set the rt_runtime_us to 0. If there is a
> > > realtime task in the group, we don't want to set the rt_runtime_us as 0
> > > or bad things will happen.
> > >
> > > Signed-off-by: Dhaval Giani <[email protected]>
> >
> > Acked-by: Peter Zijlstra <[email protected]>
>
> doesnt apply to sched-devel.git - what's the plan here?
>
Belongs to mainline.
--
regards,
Dhaval
On Wed, 2008-02-27 at 18:54 +0530, Dhaval Giani wrote:
> This patch checks if we can set the rt_runtime_us to 0. If there is a
> realtime task in the group, we don't want to set the rt_runtime_us as 0
> or bad things will happen.
>
> Signed-off-by: Dhaval Giani <[email protected]>
> ---
> kernel/sched.c | 17 +++++++++++++++++
> 1 files changed, 17 insertions(+)
>
> Index: linux-2.6.25-rc3/kernel/sched.c
> ===================================================================
> --- linux-2.6.25-rc3.orig/kernel/sched.c
> +++ linux-2.6.25-rc3/kernel/sched.c
> @@ -7960,6 +7960,17 @@ static int __rt_schedulable(struct task_
> return total + to_ratio(period, runtime) < global_ratio;
> }
>
> +/* Must be called with tasklist_lock held */
> +static inline int tg_has_rt_tasks(struct task_group *tg)
> +{
> + struct task_struct *p;
> + for_each_process(p) {
> + if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
> + return 1;
> + }
This should be a do_each_thread() { } while_each_thread(); iteration
loop;
cgroups are thread oriented, not task.
Sorry for missing this earlier.
> + return 0;
> +}
> +
> int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
> {
> u64 rt_runtime, rt_period;
> @@ -7971,6 +7982,11 @@ int sched_group_set_rt_runtime(struct ta
> rt_runtime = rt_period;
>
> mutex_lock(&rt_constraints_mutex);
> + read_lock(&tasklist_lock);
> + if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
> + err = -EINVAL;
> + goto unlock;
> + }
> if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
> err = -EINVAL;
> goto unlock;
> @@ -7979,6 +7995,7 @@ int sched_group_set_rt_runtime(struct ta
> rt_runtime = RUNTIME_INF;
> tg->rt_runtime = rt_runtime;
> unlock:
> + read_unlock(&tasklist_lock);
> mutex_unlock(&rt_constraints_mutex);
>
> return err;
On Wed, Feb 27, 2008 at 10:47:19PM +0100, Peter Zijlstra wrote:
>
> On Wed, 2008-02-27 at 18:54 +0530, Dhaval Giani wrote:
> > + struct task_struct *p;
> > + for_each_process(p) {
> > + if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
> > + return 1;
> > + }
>
> This should be a do_each_thread() { } while_each_thread(); iteration
> loop;
>
> cgroups are thread oriented, not task.
>
> Sorry for missing this earlier.
>
Nice catch! Sorry for the mistake, corrected patch follows.
---
This patch checks if we can set the rt_runtime_us to 0. If there is a
realtime task in the group, we don't want to set the rt_runtime_us as 0
or bad things will happen.
Signed-off-by: Dhaval Giani <[email protected]>
---
kernel/sched.c | 17 +++++++++++++++++
1 files changed, 17 insertions(+)
Index: linux-2.6.25-rc3/kernel/sched.c
===================================================================
--- linux-2.6.25-rc3.orig/kernel/sched.c
+++ linux-2.6.25-rc3/kernel/sched.c
@@ -7960,6 +7960,17 @@ static int __rt_schedulable(struct task_
return total + to_ratio(period, runtime) < global_ratio;
}
+/* Must be called with tasklist_lock held */
+static inline int tg_has_rt_tasks(struct task_group *tg)
+{
+ struct task_struct *g, *p;
+ do_each_thread(g, p) {
+ if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
+ return 1;
+ } while_each_thread(g, p);
+ return 0;
+}
+
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
@@ -7971,6 +7982,11 @@ int sched_group_set_rt_runtime(struct ta
rt_runtime = rt_period;
mutex_lock(&rt_constraints_mutex);
+ read_lock(&tasklist_lock);
+ if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
+ err = -EINVAL;
+ goto unlock;
+ }
if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
err = -EINVAL;
goto unlock;
@@ -7979,6 +7995,7 @@ int sched_group_set_rt_runtime(struct ta
rt_runtime = RUNTIME_INF;
tg->rt_runtime = rt_runtime;
unlock:
+ read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return err;
--
regards,
Dhaval