2024-01-03 21:35:23

by Stephan Gerhold

[permalink] [raw]
Subject: [PATCH v2] PM: domains: Scale down parent performance states in reverse order

Power domains might have parent domains assigned that are automatically
managed by the PM domain core. In particular, parent domains are
automatically powered on/off and setting performance states on child
domains is translated to parent domains (e.g. using an OPP table from
the device tree).

Currently parent performance states are always adjusted before the
performance state of the child domain is changed.

However, typically a parent/child relationship between two power
domains with performance states models the requirement to keep the
parent domain at a performance state equal or higher to the child
domain. When scaling down there is a brief moment where the parent
domain will end up having a lower performance state than required by
the child domain.

To avoid this, we need to differentiate between scaling up/down and
adjust the order of operations:

- When scaling up, parent domains should be adjusted before the child
domain. In case of an error, the rollback happens in reverse order.

- When scaling down, parent domains should be adjusted after the child
domain, in reverse order, just as if we would rollback scaling up.
In case of an error, the rollback happens in normal order (just as
if we would normally scale up).

Implement this by moving the existing functionality of
_genpd_set_performance_state() to two separate functions that are
called in the proper iteration order.

Signed-off-by: Stephan Gerhold <[email protected]>
---
Changes in v2:
- Rebase to adjust for move of drivers/base/power/domain.c
to drivers/pmdomain/core.c
- Regenerate CC list
- No code changes
- Link to v1: https://lore.kernel.org/r/[email protected]
---
Related discussion: https://lore.kernel.org/linux-pm/[email protected]/
---
drivers/pmdomain/core.c | 124 ++++++++++++++++++++++++++++++------------------
1 file changed, 77 insertions(+), 47 deletions(-)

diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index a1f6cba3ae6c..fec9dc6ab828 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -311,72 +311,102 @@ static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
}

static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
- unsigned int state, int depth)
+ unsigned int state, int depth);
+
+static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
{
- struct generic_pm_domain *parent;
- struct gpd_link *link;
- int parent_state, ret;
+ struct generic_pm_domain *parent = link->parent;
+ int parent_state;

- if (state == genpd->performance_state)
- return 0;
+ genpd_lock_nested(parent, depth + 1);

- /* Propagate to parents of genpd */
- list_for_each_entry(link, &genpd->child_links, child_node) {
- parent = link->parent;
+ parent_state = link->prev_performance_state;
+ link->performance_state = parent_state;

- /* Find parent's performance state */
- ret = genpd_xlate_performance_state(genpd, parent, state);
- if (unlikely(ret < 0))
- goto err;
+ parent_state = _genpd_reeval_performance_state(parent, parent_state);
+ if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
+ pr_err("%s: Failed to roll back to %d performance state\n",
+ parent->name, parent_state);
+ }

- parent_state = ret;
+ genpd_unlock(parent);
+}

- genpd_lock_nested(parent, depth + 1);
+static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
+ struct gpd_link *link,
+ unsigned int state, int depth)
+{
+ struct generic_pm_domain *parent = link->parent;
+ int parent_state, ret;

- link->prev_performance_state = link->performance_state;
- link->performance_state = parent_state;
- parent_state = _genpd_reeval_performance_state(parent,
- parent_state);
- ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
- if (ret)
- link->performance_state = link->prev_performance_state;
+ /* Find parent's performance state */
+ ret = genpd_xlate_performance_state(genpd, parent, state);
+ if (unlikely(ret < 0))
+ return ret;

- genpd_unlock(parent);
+ parent_state = ret;

- if (ret)
- goto err;
- }
+ genpd_lock_nested(parent, depth + 1);

- if (genpd->set_performance_state) {
- ret = genpd->set_performance_state(genpd, state);
- if (ret)
- goto err;
- }
+ link->prev_performance_state = link->performance_state;
+ link->performance_state = parent_state;

- genpd->performance_state = state;
- return 0;
+ parent_state = _genpd_reeval_performance_state(parent, parent_state);
+ ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
+ if (ret)
+ link->performance_state = link->prev_performance_state;

-err:
- /* Encountered an error, lets rollback */
- list_for_each_entry_continue_reverse(link, &genpd->child_links,
- child_node) {
- parent = link->parent;
+ genpd_unlock(parent);

- genpd_lock_nested(parent, depth + 1);
+ return ret;
+}
+
+static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state, int depth)
+{
+ struct gpd_link *link = NULL;
+ int ret;
+
+ if (state == genpd->performance_state)
+ return 0;

- parent_state = link->prev_performance_state;
- link->performance_state = parent_state;
+ /* When scaling up, propagate to parents first in normal order */
+ if (state > genpd->performance_state) {
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ ret = _genpd_set_parent_state(genpd, link, state, depth);
+ if (ret)
+ goto rollback_parents_up;
+ }
+ }

- parent_state = _genpd_reeval_performance_state(parent,
- parent_state);
- if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
- pr_err("%s: Failed to roll back to %d performance state\n",
- parent->name, parent_state);
+ if (genpd->set_performance_state) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret) {
+ if (link)
+ goto rollback_parents_up;
+ return ret;
}
+ }

- genpd_unlock(parent);
+ /* When scaling down, propagate to parents last in reverse order */
+ if (state < genpd->performance_state) {
+ list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
+ ret = _genpd_set_parent_state(genpd, link, state, depth);
+ if (ret)
+ goto rollback_parents_down;
+ }
}

+ genpd->performance_state = state;
+ return 0;
+
+rollback_parents_up:
+ list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
+ _genpd_rollback_parent_state(link, depth);
+ return ret;
+rollback_parents_down:
+ list_for_each_entry_continue(link, &genpd->child_links, child_node)
+ _genpd_rollback_parent_state(link, depth);
return ret;
}


---
base-commit: 0fef202ac2f8e6d9ad21aead648278f1226b9053
change-id: 20231205-genpd-perf-order-bf33029c25ac

Best regards,
--
Stephan Gerhold <[email protected]>



2024-01-17 17:07:21

by Ulf Hansson

[permalink] [raw]
Subject: Re: [PATCH v2] PM: domains: Scale down parent performance states in reverse order

On Wed, 3 Jan 2024 at 22:10, Stephan Gerhold <[email protected]> wrote:
>
> Power domains might have parent domains assigned that are automatically
> managed by the PM domain core. In particular, parent domains are
> automatically powered on/off and setting performance states on child
> domains is translated to parent domains (e.g. using an OPP table from
> the device tree).
>
> Currently parent performance states are always adjusted before the
> performance state of the child domain is changed.
>
> However, typically a parent/child relationship between two power
> domains with performance states models the requirement to keep the
> parent domain at a performance state equal or higher to the child
> domain. When scaling down there is a brief moment where the parent
> domain will end up having a lower performance state than required by
> the child domain.
>
> To avoid this, we need to differentiate between scaling up/down and
> adjust the order of operations:
>
> - When scaling up, parent domains should be adjusted before the child
> domain. In case of an error, the rollback happens in reverse order.
>
> - When scaling down, parent domains should be adjusted after the child
> domain, in reverse order, just as if we would rollback scaling up.
> In case of an error, the rollback happens in normal order (just as
> if we would normally scale up).
>
> Implement this by moving the existing functionality of
> _genpd_set_performance_state() to two separate functions that are
> called in the proper iteration order.
>
> Signed-off-by: Stephan Gerhold <[email protected]>

Thanks for posting this - and sorry for the delay Stephan!

I just wanted to let you know that I am looking at this right now and
will be testing this on my end too. Allow me a few more days - I will
get back to you again, asap.

Kind regards
Uffe

> ---
> Changes in v2:
> - Rebase to adjust for move of drivers/base/power/domain.c
> to drivers/pmdomain/core.c
> - Regenerate CC list
> - No code changes
> - Link to v1: https://lore.kernel.org/r/[email protected]
> ---
> Related discussion: https://lore.kernel.org/linux-pm/[email protected]/
> ---
> drivers/pmdomain/core.c | 124 ++++++++++++++++++++++++++++++------------------
> 1 file changed, 77 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
> index a1f6cba3ae6c..fec9dc6ab828 100644
> --- a/drivers/pmdomain/core.c
> +++ b/drivers/pmdomain/core.c
> @@ -311,72 +311,102 @@ static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
> }
>
> static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
> - unsigned int state, int depth)
> + unsigned int state, int depth);
> +
> +static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
> {
> - struct generic_pm_domain *parent;
> - struct gpd_link *link;
> - int parent_state, ret;
> + struct generic_pm_domain *parent = link->parent;
> + int parent_state;
>
> - if (state == genpd->performance_state)
> - return 0;
> + genpd_lock_nested(parent, depth + 1);
>
> - /* Propagate to parents of genpd */
> - list_for_each_entry(link, &genpd->child_links, child_node) {
> - parent = link->parent;
> + parent_state = link->prev_performance_state;
> + link->performance_state = parent_state;
>
> - /* Find parent's performance state */
> - ret = genpd_xlate_performance_state(genpd, parent, state);
> - if (unlikely(ret < 0))
> - goto err;
> + parent_state = _genpd_reeval_performance_state(parent, parent_state);
> + if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
> + pr_err("%s: Failed to roll back to %d performance state\n",
> + parent->name, parent_state);
> + }
>
> - parent_state = ret;
> + genpd_unlock(parent);
> +}
>
> - genpd_lock_nested(parent, depth + 1);
> +static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
> + struct gpd_link *link,
> + unsigned int state, int depth)
> +{
> + struct generic_pm_domain *parent = link->parent;
> + int parent_state, ret;
>
> - link->prev_performance_state = link->performance_state;
> - link->performance_state = parent_state;
> - parent_state = _genpd_reeval_performance_state(parent,
> - parent_state);
> - ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
> - if (ret)
> - link->performance_state = link->prev_performance_state;
> + /* Find parent's performance state */
> + ret = genpd_xlate_performance_state(genpd, parent, state);
> + if (unlikely(ret < 0))
> + return ret;
>
> - genpd_unlock(parent);
> + parent_state = ret;
>
> - if (ret)
> - goto err;
> - }
> + genpd_lock_nested(parent, depth + 1);
>
> - if (genpd->set_performance_state) {
> - ret = genpd->set_performance_state(genpd, state);
> - if (ret)
> - goto err;
> - }
> + link->prev_performance_state = link->performance_state;
> + link->performance_state = parent_state;
>
> - genpd->performance_state = state;
> - return 0;
> + parent_state = _genpd_reeval_performance_state(parent, parent_state);
> + ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
> + if (ret)
> + link->performance_state = link->prev_performance_state;
>
> -err:
> - /* Encountered an error, lets rollback */
> - list_for_each_entry_continue_reverse(link, &genpd->child_links,
> - child_node) {
> - parent = link->parent;
> + genpd_unlock(parent);
>
> - genpd_lock_nested(parent, depth + 1);
> + return ret;
> +}
> +
> +static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
> + unsigned int state, int depth)
> +{
> + struct gpd_link *link = NULL;
> + int ret;
> +
> + if (state == genpd->performance_state)
> + return 0;
>
> - parent_state = link->prev_performance_state;
> - link->performance_state = parent_state;
> + /* When scaling up, propagate to parents first in normal order */
> + if (state > genpd->performance_state) {
> + list_for_each_entry(link, &genpd->child_links, child_node) {
> + ret = _genpd_set_parent_state(genpd, link, state, depth);
> + if (ret)
> + goto rollback_parents_up;
> + }
> + }
>
> - parent_state = _genpd_reeval_performance_state(parent,
> - parent_state);
> - if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
> - pr_err("%s: Failed to roll back to %d performance state\n",
> - parent->name, parent_state);
> + if (genpd->set_performance_state) {
> + ret = genpd->set_performance_state(genpd, state);
> + if (ret) {
> + if (link)
> + goto rollback_parents_up;
> + return ret;
> }
> + }
>
> - genpd_unlock(parent);
> + /* When scaling down, propagate to parents last in reverse order */
> + if (state < genpd->performance_state) {
> + list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
> + ret = _genpd_set_parent_state(genpd, link, state, depth);
> + if (ret)
> + goto rollback_parents_down;
> + }
> }
>
> + genpd->performance_state = state;
> + return 0;
> +
> +rollback_parents_up:
> + list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
> + _genpd_rollback_parent_state(link, depth);
> + return ret;
> +rollback_parents_down:
> + list_for_each_entry_continue(link, &genpd->child_links, child_node)
> + _genpd_rollback_parent_state(link, depth);
> return ret;
> }
>
>
> ---
> base-commit: 0fef202ac2f8e6d9ad21aead648278f1226b9053
> change-id: 20231205-genpd-perf-order-bf33029c25ac
>
> Best regards,
> --
> Stephan Gerhold <[email protected]>
>

2024-01-22 16:48:46

by Ulf Hansson

[permalink] [raw]
Subject: Re: [PATCH v2] PM: domains: Scale down parent performance states in reverse order

On Wed, 3 Jan 2024 at 22:10, Stephan Gerhold <[email protected]> wrote:
>
> Power domains might have parent domains assigned that are automatically
> managed by the PM domain core. In particular, parent domains are
> automatically powered on/off and setting performance states on child
> domains is translated to parent domains (e.g. using an OPP table from
> the device tree).
>
> Currently parent performance states are always adjusted before the
> performance state of the child domain is changed.
>
> However, typically a parent/child relationship between two power
> domains with performance states models the requirement to keep the
> parent domain at a performance state equal or higher to the child
> domain. When scaling down there is a brief moment where the parent
> domain will end up having a lower performance state than required by
> the child domain.
>
> To avoid this, we need to differentiate between scaling up/down and
> adjust the order of operations:
>
> - When scaling up, parent domains should be adjusted before the child
> domain. In case of an error, the rollback happens in reverse order.
>
> - When scaling down, parent domains should be adjusted after the child
> domain, in reverse order, just as if we would rollback scaling up.
> In case of an error, the rollback happens in normal order (just as
> if we would normally scale up).
>
> Implement this by moving the existing functionality of
> _genpd_set_performance_state() to two separate functions that are
> called in the proper iteration order.
>
> Signed-off-by: Stephan Gerhold <[email protected]>

I have tested this with my local set of test drivers - and things are
now working as they should. Thanks a lot for fixing this!

I have applied the patch for next, but it's probably a good idea to
add a fixes/stable tag to it and queue it as a fix instead. What do
you think?

Note that, I took the liberty of slightly updating the commit message
a bit, to try to make things even more clear.

Kind regards
Uffe



> ---
> Changes in v2:
> - Rebase to adjust for move of drivers/base/power/domain.c
> to drivers/pmdomain/core.c
> - Regenerate CC list
> - No code changes
> - Link to v1: https://lore.kernel.org/r/[email protected]
> ---
> Related discussion: https://lore.kernel.org/linux-pm/[email protected]/
> ---
> drivers/pmdomain/core.c | 124 ++++++++++++++++++++++++++++++------------------
> 1 file changed, 77 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
> index a1f6cba3ae6c..fec9dc6ab828 100644
> --- a/drivers/pmdomain/core.c
> +++ b/drivers/pmdomain/core.c
> @@ -311,72 +311,102 @@ static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
> }
>
> static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
> - unsigned int state, int depth)
> + unsigned int state, int depth);
> +
> +static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
> {
> - struct generic_pm_domain *parent;
> - struct gpd_link *link;
> - int parent_state, ret;
> + struct generic_pm_domain *parent = link->parent;
> + int parent_state;
>
> - if (state == genpd->performance_state)
> - return 0;
> + genpd_lock_nested(parent, depth + 1);
>
> - /* Propagate to parents of genpd */
> - list_for_each_entry(link, &genpd->child_links, child_node) {
> - parent = link->parent;
> + parent_state = link->prev_performance_state;
> + link->performance_state = parent_state;
>
> - /* Find parent's performance state */
> - ret = genpd_xlate_performance_state(genpd, parent, state);
> - if (unlikely(ret < 0))
> - goto err;
> + parent_state = _genpd_reeval_performance_state(parent, parent_state);
> + if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
> + pr_err("%s: Failed to roll back to %d performance state\n",
> + parent->name, parent_state);
> + }
>
> - parent_state = ret;
> + genpd_unlock(parent);
> +}
>
> - genpd_lock_nested(parent, depth + 1);
> +static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
> + struct gpd_link *link,
> + unsigned int state, int depth)
> +{
> + struct generic_pm_domain *parent = link->parent;
> + int parent_state, ret;
>
> - link->prev_performance_state = link->performance_state;
> - link->performance_state = parent_state;
> - parent_state = _genpd_reeval_performance_state(parent,
> - parent_state);
> - ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
> - if (ret)
> - link->performance_state = link->prev_performance_state;
> + /* Find parent's performance state */
> + ret = genpd_xlate_performance_state(genpd, parent, state);
> + if (unlikely(ret < 0))
> + return ret;
>
> - genpd_unlock(parent);
> + parent_state = ret;
>
> - if (ret)
> - goto err;
> - }
> + genpd_lock_nested(parent, depth + 1);
>
> - if (genpd->set_performance_state) {
> - ret = genpd->set_performance_state(genpd, state);
> - if (ret)
> - goto err;
> - }
> + link->prev_performance_state = link->performance_state;
> + link->performance_state = parent_state;
>
> - genpd->performance_state = state;
> - return 0;
> + parent_state = _genpd_reeval_performance_state(parent, parent_state);
> + ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
> + if (ret)
> + link->performance_state = link->prev_performance_state;
>
> -err:
> - /* Encountered an error, lets rollback */
> - list_for_each_entry_continue_reverse(link, &genpd->child_links,
> - child_node) {
> - parent = link->parent;
> + genpd_unlock(parent);
>
> - genpd_lock_nested(parent, depth + 1);
> + return ret;
> +}
> +
> +static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
> + unsigned int state, int depth)
> +{
> + struct gpd_link *link = NULL;
> + int ret;
> +
> + if (state == genpd->performance_state)
> + return 0;
>
> - parent_state = link->prev_performance_state;
> - link->performance_state = parent_state;
> + /* When scaling up, propagate to parents first in normal order */
> + if (state > genpd->performance_state) {
> + list_for_each_entry(link, &genpd->child_links, child_node) {
> + ret = _genpd_set_parent_state(genpd, link, state, depth);
> + if (ret)
> + goto rollback_parents_up;
> + }
> + }
>
> - parent_state = _genpd_reeval_performance_state(parent,
> - parent_state);
> - if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
> - pr_err("%s: Failed to roll back to %d performance state\n",
> - parent->name, parent_state);
> + if (genpd->set_performance_state) {
> + ret = genpd->set_performance_state(genpd, state);
> + if (ret) {
> + if (link)
> + goto rollback_parents_up;
> + return ret;
> }
> + }
>
> - genpd_unlock(parent);
> + /* When scaling down, propagate to parents last in reverse order */
> + if (state < genpd->performance_state) {
> + list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
> + ret = _genpd_set_parent_state(genpd, link, state, depth);
> + if (ret)
> + goto rollback_parents_down;
> + }
> }
>
> + genpd->performance_state = state;
> + return 0;
> +
> +rollback_parents_up:
> + list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
> + _genpd_rollback_parent_state(link, depth);
> + return ret;
> +rollback_parents_down:
> + list_for_each_entry_continue(link, &genpd->child_links, child_node)
> + _genpd_rollback_parent_state(link, depth);
> return ret;
> }
>
>
> ---
> base-commit: 0fef202ac2f8e6d9ad21aead648278f1226b9053
> change-id: 20231205-genpd-perf-order-bf33029c25ac
>
> Best regards,
> --
> Stephan Gerhold <[email protected]>
>