2023-12-14 17:00:42

by Zhipeng Lu

[permalink] [raw]
Subject: [PATCH] drm/amd/pm: fix a double-free in amdgpu_parse_extended_power_table

The amdgpu_free_extended_power_table is called in every error-handling
paths of amdgpu_parse_extended_power_table. However, after the following
call chain of returning:

amdgpu_parse_extended_power_table
|-> kv_dpm_init / si_dpm_init
(the only two caller of amdgpu_parse_extended_power_table)
|-> kv_dpm_sw_init / si_dpm_sw_init
(the only caller of kv_dpm_init / si_dpm_init, accordingly)
|-> kv_dpm_fini / si_dpm_fini
(goto dpm_failed in xx_dpm_sw_init)
|-> amdgpu_free_extended_power_table

As above, the amdgpu_free_extended_power_table is called twice in this
returning chain and thus a double-free is triggered. Similarily, the
last kfree in amdgpu_parse_extended_power_table also cause a double free
with amdgpu_free_extended_power_table in kv_dpm_fini.

Fixes: 84176663e70d ("drm/amd/pm: create a new holder for those APIs used only by legacy ASICs(si/kv)")
Signed-off-by: Zhipeng Lu <[email protected]>
---
.../gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c | 52 +++++--------------
1 file changed, 13 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index 81fb4e5dd804..60377747bab4 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
@@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
kcalloc(psl->ucNumEntries,
sizeof(struct amdgpu_phase_shedding_limits_entry),
GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
return -ENOMEM;
- }

entry = &psl->entries[0];
for (i = 0; i < psl->ucNumEntries; i++) {
@@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PPLIB_CAC_Leakage_Record *entry;
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
return -ENOMEM;
- }
entry = &cac_table->entries[0];
for (i = 0; i < cac_table->ucNumEntries; i++) {
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
@@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(ext_hdr->usPPMTableOffset));
adev->pm.dpm.dyn_state.ppm_table =
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.ppm_table) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.ppm_table)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
le16_to_cpu(ppm->usCpuCoreNumber);
@@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PowerTune_Table *pt;
adev->pm.dpm.dyn_state.cac_tdp_table =
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.cac_tdp_table)
return -ENOMEM;
- }
if (rev > 0) {
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
(mode_info->atom_context->bios + data_offset +
@@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ret = amdgpu_parse_clk_voltage_dep_table(
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
dep_table);
- if (ret) {
- kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
+ if (ret)
return ret;
- }
}
}

--
2.34.1


2023-12-14 17:34:39

by Alex Deucher

[permalink] [raw]
Subject: Re: [PATCH] drm/amd/pm: fix a double-free in amdgpu_parse_extended_power_table

Applied. Thanks!

On Thu, Dec 14, 2023 at 12:20 PM Zhipeng Lu <[email protected]> wrote:
>
> The amdgpu_free_extended_power_table is called in every error-handling
> paths of amdgpu_parse_extended_power_table. However, after the following
> call chain of returning:
>
> amdgpu_parse_extended_power_table
> |-> kv_dpm_init / si_dpm_init
> (the only two caller of amdgpu_parse_extended_power_table)
> |-> kv_dpm_sw_init / si_dpm_sw_init
> (the only caller of kv_dpm_init / si_dpm_init, accordingly)
> |-> kv_dpm_fini / si_dpm_fini
> (goto dpm_failed in xx_dpm_sw_init)
> |-> amdgpu_free_extended_power_table
>
> As above, the amdgpu_free_extended_power_table is called twice in this
> returning chain and thus a double-free is triggered. Similarily, the
> last kfree in amdgpu_parse_extended_power_table also cause a double free
> with amdgpu_free_extended_power_table in kv_dpm_fini.
>
> Fixes: 84176663e70d ("drm/amd/pm: create a new holder for those APIs used only by legacy ASICs(si/kv)")
> Signed-off-by: Zhipeng Lu <[email protected]>
> ---
> .../gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c | 52 +++++--------------
> 1 file changed, 13 insertions(+), 39 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> index 81fb4e5dd804..60377747bab4 100644
> --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> @@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
> ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
> dep_table);
> - if (ret) {
> - amdgpu_free_extended_power_table(adev);
> + if (ret)
> return ret;
> - }
> }
> if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
> dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
> @@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
> ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
> dep_table);
> - if (ret) {
> - amdgpu_free_extended_power_table(adev);
> + if (ret)
> return ret;
> - }
> }
> if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
> dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
> @@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
> ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
> dep_table);
> - if (ret) {
> - amdgpu_free_extended_power_table(adev);
> + if (ret)
> return ret;
> - }
> }
> if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
> dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
> @@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
> ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
> dep_table);
> - if (ret) {
> - amdgpu_free_extended_power_table(adev);
> + if (ret)
> return ret;
> - }
> }
> if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
> ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
> @@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> kcalloc(psl->ucNumEntries,
> sizeof(struct amdgpu_phase_shedding_limits_entry),
> GFP_KERNEL);
> - if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
> - amdgpu_free_extended_power_table(adev);
> + if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
> return -ENOMEM;
> - }
>
> entry = &psl->entries[0];
> for (i = 0; i < psl->ucNumEntries; i++) {
> @@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> ATOM_PPLIB_CAC_Leakage_Record *entry;
> u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
> adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
> - if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
> - amdgpu_free_extended_power_table(adev);
> + if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
> return -ENOMEM;
> - }
> entry = &cac_table->entries[0];
> for (i = 0; i < cac_table->ucNumEntries; i++) {
> if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
> @@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
> adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
> kzalloc(size, GFP_KERNEL);
> - if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
> - amdgpu_free_extended_power_table(adev);
> + if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
> return -ENOMEM;
> - }
> adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
> limits->numEntries;
> entry = &limits->entries[0];
> @@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
> adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
> kzalloc(size, GFP_KERNEL);
> - if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
> - amdgpu_free_extended_power_table(adev);
> + if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
> return -ENOMEM;
> - }
> adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
> limits->numEntries;
> entry = &limits->entries[0];
> @@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> sizeof(struct amdgpu_clock_voltage_dependency_entry);
> adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
> kzalloc(size, GFP_KERNEL);
> - if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
> - amdgpu_free_extended_power_table(adev);
> + if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
> return -ENOMEM;
> - }
> adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
> limits->numEntries;
> entry = &limits->entries[0];
> @@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> le16_to_cpu(ext_hdr->usPPMTableOffset));
> adev->pm.dpm.dyn_state.ppm_table =
> kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
> - if (!adev->pm.dpm.dyn_state.ppm_table) {
> - amdgpu_free_extended_power_table(adev);
> + if (!adev->pm.dpm.dyn_state.ppm_table)
> return -ENOMEM;
> - }
> adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
> adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
> le16_to_cpu(ppm->usCpuCoreNumber);
> @@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> sizeof(struct amdgpu_clock_voltage_dependency_entry);
> adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
> kzalloc(size, GFP_KERNEL);
> - if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
> - amdgpu_free_extended_power_table(adev);
> + if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
> return -ENOMEM;
> - }
> adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
> limits->numEntries;
> entry = &limits->entries[0];
> @@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> ATOM_PowerTune_Table *pt;
> adev->pm.dpm.dyn_state.cac_tdp_table =
> kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
> - if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
> - amdgpu_free_extended_power_table(adev);
> + if (!adev->pm.dpm.dyn_state.cac_tdp_table)
> return -ENOMEM;
> - }
> if (rev > 0) {
> ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
> (mode_info->atom_context->bios + data_offset +
> @@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
> ret = amdgpu_parse_clk_voltage_dep_table(
> &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
> dep_table);
> - if (ret) {
> - kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
> + if (ret)
> return ret;
> - }
> }
> }
>
> --
> 2.34.1
>