2022-11-07 18:46:34

by Mario Limonciello

[permalink] [raw]
Subject: Re: [PATCH v3 5/8] cpufreq: amd_pstate: implement amd pstate cpu online and offline callback

On 11/7/2022 11:57, Perry Yuan wrote:
> Adds online and offline driver callback support to allow cpu cores go
> offline and help to restore the previous working states when core goes
> back online later for EPP driver mode.
>
> Signed-off-by: Perry Yuan <[email protected]>

Reviewed-by: Mario Limonciello <[email protected]>

> ---
> drivers/cpufreq/amd-pstate.c | 89 ++++++++++++++++++++++++++++++++++++
> include/linux/amd-pstate.h | 1 +
> 2 files changed, 90 insertions(+)
>
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index eb82bc6a7f66..6ce9fca0a128 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -1195,6 +1195,93 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
> return 0;
> }
>
> +static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
> +{
> + struct cppc_perf_ctrls perf_ctrls;
> + u64 value, max_perf;
> + int ret;
> +
> + ret = amd_pstate_enable(true);
> + if (ret)
> + pr_err("failed to enable amd pstate during resume, return %d\n", ret);
> +
> + value = READ_ONCE(cpudata->cppc_req_cached);
> + max_perf = READ_ONCE(cpudata->highest_perf);
> +
> + if (boot_cpu_has(X86_FEATURE_CPPC)) {
> + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
> + } else {
> + perf_ctrls.max_perf = max_perf;
> + perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
> + cppc_set_perf(cpudata->cpu, &perf_ctrls);
> + }
> +}
> +
> +static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
> +{
> + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
> +
> + pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
> +
> + if (cppc_active) {
> + amd_pstate_epp_reenable(cpudata);
> + cpudata->suspended = false;
> + }
> +
> + return 0;
> +}
> +
> +static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
> +{
> + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
> + struct cppc_perf_ctrls perf_ctrls;
> + int min_perf;
> + u64 value;
> +
> + min_perf = READ_ONCE(cpudata->lowest_perf);
> + value = READ_ONCE(cpudata->cppc_req_cached);
> +
> + mutex_lock(&amd_pstate_limits_lock);
> + if (boot_cpu_has(X86_FEATURE_CPPC)) {
> + cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
> +
> + /* Set max perf same as min perf */
> + value &= ~AMD_CPPC_MAX_PERF(~0L);
> + value |= AMD_CPPC_MAX_PERF(min_perf);
> + value &= ~AMD_CPPC_MIN_PERF(~0L);
> + value |= AMD_CPPC_MIN_PERF(min_perf);
> + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
> + } else {
> + perf_ctrls.desired_perf = 0;
> + perf_ctrls.max_perf = min_perf;
> + perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(AMD_CPPC_EPP_POWERSAVE);
> + cppc_set_perf(cpudata->cpu, &perf_ctrls);
> + }
> + mutex_unlock(&amd_pstate_limits_lock);
> +}
> +
> +static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
> +{
> + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
> +
> + pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
> +
> + if (cpudata->suspended)
> + return 0;
> +
> + if (cppc_active)
> + amd_pstate_epp_offline(policy);
> +
> + return 0;
> +}
> +
> +static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
> +{
> + amd_pstate_clear_update_util_hook(policy->cpu);
> +
> + return amd_pstate_cpu_offline(policy);
> +}
> +
> static void amd_pstate_verify_cpu_policy(struct amd_cpudata *cpudata,
> struct cpufreq_policy_data *policy)
> {
> @@ -1229,6 +1316,8 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
> .init = amd_pstate_epp_cpu_init,
> .exit = amd_pstate_epp_cpu_exit,
> .update_limits = amd_pstate_epp_update_limits,
> + .offline = amd_pstate_epp_cpu_offline,
> + .online = amd_pstate_epp_cpu_online,
> .name = "amd_pstate_epp",
> .attr = amd_pstate_epp_attr,
> };
> diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
> index 7e6e8cab97b3..c0ad7eedcae3 100644
> --- a/include/linux/amd-pstate.h
> +++ b/include/linux/amd-pstate.h
> @@ -99,6 +99,7 @@ struct amd_cpudata {
> u64 cppc_cap1_cached;
> struct update_util_data update_util;
> struct amd_aperf_mperf sample;
> + bool suspended;
> };
>
> /**



2022-11-13 16:24:51

by Yuan, Perry

[permalink] [raw]
Subject: RE: [PATCH v3 5/8] cpufreq: amd_pstate: implement amd pstate cpu online and offline callback

[AMD Official Use Only - General]



> -----Original Message-----
> From: Limonciello, Mario <[email protected]>
> Sent: Tuesday, November 8, 2022 2:22 AM
> To: Yuan, Perry <[email protected]>; [email protected]; Huang,
> Ray <[email protected]>; [email protected]
> Cc: Sharma, Deepak <[email protected]>; Fontenot, Nathan
> <[email protected]>; Deucher, Alexander
> <[email protected]>; Huang, Shimmer
> <[email protected]>; Du, Xiaojian <[email protected]>; Meng,
> Li (Jassmine) <[email protected]>; [email protected]; linux-
> [email protected]
> Subject: Re: [PATCH v3 5/8] cpufreq: amd_pstate: implement amd pstate cpu
> online and offline callback
>
> On 11/7/2022 11:57, Perry Yuan wrote:
> > Adds online and offline driver callback support to allow cpu cores go
> > offline and help to restore the previous working states when core goes
> > back online later for EPP driver mode.
> >
> > Signed-off-by: Perry Yuan <[email protected]>
>
> Reviewed-by: Mario Limonciello <[email protected]>

Thank you.
I pick up the RB tag in the V4.

Perry.

>
> > ---
> > drivers/cpufreq/amd-pstate.c | 89
> ++++++++++++++++++++++++++++++++++++
> > include/linux/amd-pstate.h | 1 +
> > 2 files changed, 90 insertions(+)
> >
> > diff --git a/drivers/cpufreq/amd-pstate.c
> > b/drivers/cpufreq/amd-pstate.c index eb82bc6a7f66..6ce9fca0a128 100644
> > --- a/drivers/cpufreq/amd-pstate.c
> > +++ b/drivers/cpufreq/amd-pstate.c
> > @@ -1195,6 +1195,93 @@ static int amd_pstate_epp_set_policy(struct
> cpufreq_policy *policy)
> > return 0;
> > }
> >
> > +static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) {
> > + struct cppc_perf_ctrls perf_ctrls;
> > + u64 value, max_perf;
> > + int ret;
> > +
> > + ret = amd_pstate_enable(true);
> > + if (ret)
> > + pr_err("failed to enable amd pstate during resume,
> return %d\n",
> > +ret);
> > +
> > + value = READ_ONCE(cpudata->cppc_req_cached);
> > + max_perf = READ_ONCE(cpudata->highest_perf);
> > +
> > + if (boot_cpu_has(X86_FEATURE_CPPC)) {
> > + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
> > + } else {
> > + perf_ctrls.max_perf = max_perf;
> > + perf_ctrls.energy_perf =
> AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
> > + cppc_set_perf(cpudata->cpu, &perf_ctrls);
> > + }
> > +}
> > +
> > +static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) {
> > + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
> > +
> > + pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
> > +
> > + if (cppc_active) {
> > + amd_pstate_epp_reenable(cpudata);
> > + cpudata->suspended = false;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static void amd_pstate_epp_offline(struct cpufreq_policy *policy) {
> > + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
> > + struct cppc_perf_ctrls perf_ctrls;
> > + int min_perf;
> > + u64 value;
> > +
> > + min_perf = READ_ONCE(cpudata->lowest_perf);
> > + value = READ_ONCE(cpudata->cppc_req_cached);
> > +
> > + mutex_lock(&amd_pstate_limits_lock);
> > + if (boot_cpu_has(X86_FEATURE_CPPC)) {
> > + cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
> > +
> > + /* Set max perf same as min perf */
> > + value &= ~AMD_CPPC_MAX_PERF(~0L);
> > + value |= AMD_CPPC_MAX_PERF(min_perf);
> > + value &= ~AMD_CPPC_MIN_PERF(~0L);
> > + value |= AMD_CPPC_MIN_PERF(min_perf);
> > + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
> > + } else {
> > + perf_ctrls.desired_perf = 0;
> > + perf_ctrls.max_perf = min_perf;
> > + perf_ctrls.energy_perf =
> AMD_CPPC_ENERGY_PERF_PREF(AMD_CPPC_EPP_POWERSAVE);
> > + cppc_set_perf(cpudata->cpu, &perf_ctrls);
> > + }
> > + mutex_unlock(&amd_pstate_limits_lock);
> > +}
> > +
> > +static int amd_pstate_cpu_offline(struct cpufreq_policy *policy) {
> > + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
> > +
> > + pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
> > +
> > + if (cpudata->suspended)
> > + return 0;
> > +
> > + if (cppc_active)
> > + amd_pstate_epp_offline(policy);
> > +
> > + return 0;
> > +}
> > +
> > +static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
> > +{
> > + amd_pstate_clear_update_util_hook(policy->cpu);
> > +
> > + return amd_pstate_cpu_offline(policy); }
> > +
> > static void amd_pstate_verify_cpu_policy(struct amd_cpudata *cpudata,
> > struct cpufreq_policy_data *policy)
> > {
> > @@ -1229,6 +1316,8 @@ static struct cpufreq_driver
> amd_pstate_epp_driver = {
> > .init = amd_pstate_epp_cpu_init,
> > .exit = amd_pstate_epp_cpu_exit,
> > .update_limits = amd_pstate_epp_update_limits,
> > + .offline = amd_pstate_epp_cpu_offline,
> > + .online = amd_pstate_epp_cpu_online,
> > .name = "amd_pstate_epp",
> > .attr = amd_pstate_epp_attr,
> > };
> > diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
> > index 7e6e8cab97b3..c0ad7eedcae3 100644
> > --- a/include/linux/amd-pstate.h
> > +++ b/include/linux/amd-pstate.h
> > @@ -99,6 +99,7 @@ struct amd_cpudata {
> > u64 cppc_cap1_cached;
> > struct update_util_data update_util;
> > struct amd_aperf_mperf sample;
> > + bool suspended;
> > };
> >
> > /**