Extend walk_tg_tree to accept a positional argument
static int walk_tg_tree_from(struct task_group *from,
tg_visitor down, tg_visitor up, void *data)
Existing semantics are preserved, caller must hold rcu_lock() or sufficient
analogue.
Signed-off-by: Paul Turner <[email protected]>
---
kernel/sched.c | 34 +++++++++++++++++++++++-----------
1 file changed, 23 insertions(+), 11 deletions(-)
Index: tip/kernel/sched.c
===================================================================
--- tip.orig/kernel/sched.c
+++ tip/kernel/sched.c
@@ -1430,21 +1430,19 @@ static inline void dec_cpu_load(struct r
#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
typedef int (*tg_visitor)(struct task_group *, void *);
-/*
- * Iterate the full tree, calling @down when first entering a node and @up when
- * leaving it for the final time.
- */
-static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
+/* Iterate task_group tree rooted at *from */
+static int walk_tg_tree_from(struct task_group *from,
+ tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
int ret;
- rcu_read_lock();
- parent = &root_task_group;
+ parent = from;
+
down:
ret = (*down)(parent, data);
if (ret)
- goto out_unlock;
+ goto out;
list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child;
goto down;
@@ -1453,14 +1451,28 @@ up:
continue;
}
ret = (*up)(parent, data);
- if (ret)
- goto out_unlock;
+ if (ret || parent == from)
+ goto out;
child = parent;
parent = parent->parent;
if (parent)
goto up;
-out_unlock:
+out:
+ return ret;
+}
+
+/*
+ * Iterate the full tree, calling @down when first entering a node and @up when
+ * leaving it for the final time.
+ */
+
+static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
+{
+ int ret;
+
+ rcu_read_lock();
+ ret = walk_tg_tree_from(&root_task_group, down, up, data);
rcu_read_unlock();
return ret;
(2011/05/03 18:28), Paul Turner wrote:
> Extend walk_tg_tree to accept a positional argument
>
> static int walk_tg_tree_from(struct task_group *from,
> tg_visitor down, tg_visitor up, void *data)
>
> Existing semantics are preserved, caller must hold rcu_lock() or sufficient
> analogue.
>
> Signed-off-by: Paul Turner <[email protected]>
> ---
Reviewed-by: Hidetoshi Seto <[email protected]>
Yeah, it's nice to have.
Thanks,
H.Seto
On Tue, 2011-05-03 at 02:28 -0700, Paul Turner wrote:
> plain text document attachment (sched-bwc-refactor-walk_tg_tree.patch)
> Extend walk_tg_tree to accept a positional argument
>
> static int walk_tg_tree_from(struct task_group *from,
> tg_visitor down, tg_visitor up, void *data)
>
> Existing semantics are preserved, caller must hold rcu_lock() or sufficient
> analogue.
>
> Signed-off-by: Paul Turner <[email protected]>
> ---
> kernel/sched.c | 34 +++++++++++++++++++++++-----------
> 1 file changed, 23 insertions(+), 11 deletions(-)
>
> Index: tip/kernel/sched.c
> ===================================================================
> --- tip.orig/kernel/sched.c
> +++ tip/kernel/sched.c
> @@ -1430,21 +1430,19 @@ static inline void dec_cpu_load(struct r
> #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
> typedef int (*tg_visitor)(struct task_group *, void *);
>
> -/*
> - * Iterate the full tree, calling @down when first entering a node and @up when
> - * leaving it for the final time.
> - */
> -static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
> +/* Iterate task_group tree rooted at *from */
> +static int walk_tg_tree_from(struct task_group *from,
> + tg_visitor down, tg_visitor up, void *data)
> {
> struct task_group *parent, *child;
> int ret;
>
> - rcu_read_lock();
> - parent = &root_task_group;
> + parent = from;
> +
> down:
> ret = (*down)(parent, data);
> if (ret)
> - goto out_unlock;
> + goto out;
> list_for_each_entry_rcu(child, &parent->children, siblings) {
> parent = child;
> goto down;
> @@ -1453,14 +1451,28 @@ up:
> continue;
> }
> ret = (*up)(parent, data);
> - if (ret)
> - goto out_unlock;
> + if (ret || parent == from)
> + goto out;
>
> child = parent;
> parent = parent->parent;
> if (parent)
> goto up;
> -out_unlock:
> +out:
> + return ret;
> +}
> +
> +/*
> + * Iterate the full tree, calling @down when first entering a node and @up when
> + * leaving it for the final time.
> + */
> +
> +static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
> +{
> + int ret;
> +
> + rcu_read_lock();
> + ret = walk_tg_tree_from(&root_task_group, down, up, data);
> rcu_read_unlock();
>
> return ret;
I don't much like the different locking rules for these two functions. I
don't much care which you pick, but please make them consistent.
On Tue, May 17, 2011 at 6:31 AM, Peter Zijlstra <[email protected]> wrote:
> On Tue, 2011-05-03 at 02:28 -0700, Paul Turner wrote:
>> plain text document attachment (sched-bwc-refactor-walk_tg_tree.patch)
>> Extend walk_tg_tree to accept a positional argument
>>
>> static int walk_tg_tree_from(struct task_group *from,
>> ? ? ? ? ? ? ? ? ? ? ? ? ? ?tg_visitor down, tg_visitor up, void *data)
>>
>> Existing semantics are preserved, caller must hold rcu_lock() or sufficient
>> analogue.
>>
>> Signed-off-by: Paul Turner <[email protected]>
>> ---
>> ?kernel/sched.c | ? 34 +++++++++++++++++++++++-----------
>> ?1 file changed, 23 insertions(+), 11 deletions(-)
>>
>> Index: tip/kernel/sched.c
>> ===================================================================
>> --- tip.orig/kernel/sched.c
>> +++ tip/kernel/sched.c
>> @@ -1430,21 +1430,19 @@ static inline void dec_cpu_load(struct r
>> ?#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
>> ?typedef int (*tg_visitor)(struct task_group *, void *);
>>
>> -/*
>> - * Iterate the full tree, calling @down when first entering a node and @up when
>> - * leaving it for the final time.
>> - */
>> -static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
>> +/* Iterate task_group tree rooted at *from */
>> +static int walk_tg_tree_from(struct task_group *from,
>> + ? ? ? ? ? ? ? ? ? ? ? ? ?tg_visitor down, tg_visitor up, void *data)
>> ?{
>> ? ? ? struct task_group *parent, *child;
>> ? ? ? int ret;
>>
>> - ? ? rcu_read_lock();
>> - ? ? parent = &root_task_group;
>> + ? ? parent = from;
>> +
>> ?down:
>> ? ? ? ret = (*down)(parent, data);
>> ? ? ? if (ret)
>> - ? ? ? ? ? ? goto out_unlock;
>> + ? ? ? ? ? ? goto out;
>> ? ? ? list_for_each_entry_rcu(child, &parent->children, siblings) {
>> ? ? ? ? ? ? ? parent = child;
>> ? ? ? ? ? ? ? goto down;
>> @@ -1453,14 +1451,28 @@ up:
>> ? ? ? ? ? ? ? continue;
>> ? ? ? }
>> ? ? ? ret = (*up)(parent, data);
>> - ? ? if (ret)
>> - ? ? ? ? ? ? goto out_unlock;
>> + ? ? if (ret || parent == from)
>> + ? ? ? ? ? ? goto out;
>>
>> ? ? ? child = parent;
>> ? ? ? parent = parent->parent;
>> ? ? ? if (parent)
>> ? ? ? ? ? ? ? goto up;
>> -out_unlock:
>> +out:
>> + ? ? return ret;
>> +}
>> +
>> +/*
>> + * Iterate the full tree, calling @down when first entering a node and @up when
>> + * leaving it for the final time.
>> + */
>> +
>> +static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
>> +{
>> + ? ? int ret;
>> +
>> + ? ? rcu_read_lock();
>> + ? ? ret = walk_tg_tree_from(&root_task_group, down, up, data);
>> ? ? ? rcu_read_unlock();
>>
>> ? ? ? return ret;
>
> I don't much like the different locking rules for these two functions. I
> don't much care which you pick, but please make them consistent.
>
Reasonable, given the call sites it would seem to make more sense to
make things consistent in the direction of depending on having the
caller do the locking. Will update.
>