2023-12-17 17:32:39

by Sandipan Das

[permalink] [raw]
Subject: [PATCH 0/3] perf/x86/amd: Miscellaneous fixes

Contains fixes w.r.t usage of LBR Freeze, Erratum 1452 and a bug in the
CPU offline path where PMU-related registers are reset.

Sandipan Das (3):
perf/x86/amd/lbr: Use freeze based on availability
perf/x86/amd/lbr: Discard erroneous branch entries
perf/x86/amd/core: Avoid register reset when CPU is dead

arch/x86/events/amd/core.c | 5 ++---
arch/x86/events/amd/lbr.c | 22 ++++++++++++++--------
arch/x86/include/asm/cpufeatures.h | 2 +-
arch/x86/kernel/cpu/scattered.c | 1 +
4 files changed, 18 insertions(+), 12 deletions(-)

--
2.34.1



2023-12-17 17:32:57

by Sandipan Das

[permalink] [raw]
Subject: [PATCH 1/3] perf/x86/amd/lbr: Use freeze based on availability

Currently, it is assumed that LBR Freeze is supported on all processors
which have CPUID leaf 0x80000022[EAX] bit 1 set. This is incorrect as
the feature availability is additionally dependent on CPUID leaf
0x80000022[EAX] bit 2 being set which may not be set for all Zen 4
processors. Define a new feature bit for LBR and PMC freeze and set the
freeze enable bit (FLBRI) in DebugCtl (MSR 0x1d9) conditionally.

It should still be possible to use LBR without freeze for profile-guided
optimization of user programs by using an user-only branch filter during
profiling. When the user-only filter is enabled, branches are no longer
recorded after the transition to CPL 0 upon PMI arrival. When branch
entries are read in the PMI handler, the branch stack does not change.

E.g.

$ perf record -j any,u -e ex_ret_brn_tkn ./workload

Fixes: ca5b7c0d9621 ("perf/x86/amd/lbr: Add LbrExtV2 branch record support")
Signed-off-by: Sandipan Das <[email protected]>
Cc: [email protected]
---
arch/x86/events/amd/core.c | 4 ++--
arch/x86/events/amd/lbr.c | 16 ++++++++++------
arch/x86/include/asm/cpufeatures.h | 2 +-
arch/x86/kernel/cpu/scattered.c | 1 +
4 files changed, 14 insertions(+), 9 deletions(-)

diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 4ee6390b45c9..ffdfaee08b08 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -905,8 +905,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
if (!status)
goto done;

- /* Read branch records before unfreezing */
- if (status & GLOBAL_STATUS_LBRS_FROZEN) {
+ /* Read branch records */
+ if (x86_pmu.lbr_nr) {
amd_pmu_lbr_read();
status &= ~GLOBAL_STATUS_LBRS_FROZEN;
}
diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
index eb31f850841a..110e34c59643 100644
--- a/arch/x86/events/amd/lbr.c
+++ b/arch/x86/events/amd/lbr.c
@@ -400,10 +400,12 @@ void amd_pmu_lbr_enable_all(void)
wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
}

- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
- rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+ if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ }

- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
}

@@ -416,10 +418,12 @@ void amd_pmu_lbr_disable_all(void)
return;

rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
-
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+
+ if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ }
}

__init int amd_pmu_lbr_init(void)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 4af140cf5719..9790e906d5e5 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -97,7 +97,7 @@
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
-/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
+#define X86_FEATURE_AMD_LBR_PMC_FREEZE ( 3*32+18) /* "" AMD LBR and PMC Freeze */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 0dad49a09b7a..a515328d9d7d 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -49,6 +49,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
+ { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
{ 0, 0, 0, 0, 0 }
};

--
2.34.1


2023-12-17 17:38:13

by Sandipan Das

[permalink] [raw]
Subject: [PATCH 2/3] perf/x86/amd/lbr: Discard erroneous branch entries

The Revision Guide for AMD Family 19h Model 10-1Fh processors, found at
the link below, declares Erratum 1452 which states that non-branch
entries may erroneously be recorded in the Last Branch Record (LBR)
stack with the valid and spec bits set. Such entries can be recognized
by inspecting bit 61 of the corresponding LastBranchStackToIp register.
This bit is currently reserved but if found to be set, the associated
branch entry should be discarded.

Link: https://bugzilla.kernel.org/attachment.cgi?id=305518
Signed-off-by: Sandipan Das <[email protected]>
Cc: [email protected]
---
arch/x86/events/amd/lbr.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
index 110e34c59643..43bf2dbcdb82 100644
--- a/arch/x86/events/amd/lbr.c
+++ b/arch/x86/events/amd/lbr.c
@@ -173,9 +173,11 @@ void amd_pmu_lbr_read(void)

/*
* Check if a branch has been logged; if valid = 0, spec = 0
- * then no branch was recorded
+ * then no branch was recorded; if reserved = 1 then an
+ * erroneous branch was recorded (see erratum 1452)
*/
- if (!entry.to.split.valid && !entry.to.split.spec)
+ if ((!entry.to.split.valid && !entry.to.split.spec) ||
+ entry.to.split.reserved)
continue;

perf_clear_branch_entry_bitfields(br + out);
--
2.34.1


2023-12-17 17:42:16

by Sandipan Das

[permalink] [raw]
Subject: [PATCH 3/3] perf/x86/amd/core: Avoid register reset when CPU is dead

When bringing a CPU online, some of the PMC and LBR related registers
are reset. The same is done when a CPU is taken offline although that
is unnecessary. This currently happens in the "cpu_dead" callback which
is also incorrect as the callback runs on a control CPU instead of the
one that is being taken offline. This also affects hibernation and
suspend to RAM on some platforms as reported in the link below.

Link: https://lore.kernel.org/all/[email protected]/
Reported-by: Mario Limonciello <[email protected]>
Fixes: 21d59e3e2c40 ("perf/x86/amd/core: Detect PerfMonV2 support")
Signed-off-by: Sandipan Das <[email protected]>
Cc: [email protected]
---
arch/x86/events/amd/core.c | 1 -
1 file changed, 1 deletion(-)

diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index ffdfaee08b08..63514c311f44 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -604,7 +604,6 @@ static void amd_pmu_cpu_dead(int cpu)

kfree(cpuhw->lbr_sel);
cpuhw->lbr_sel = NULL;
- amd_pmu_cpu_reset(cpu);

if (!x86_pmu.amd_nb_constraints)
return;
--
2.34.1


2023-12-18 20:43:41

by Stephane Eranian

[permalink] [raw]
Subject: Re: [PATCH 1/3] perf/x86/amd/lbr: Use freeze based on availability

On Sun, Dec 17, 2023 at 9:26 AM Sandipan Das <[email protected]> wrote:
>
> Currently, it is assumed that LBR Freeze is supported on all processors
> which have CPUID leaf 0x80000022[EAX] bit 1 set. This is incorrect as
> the feature availability is additionally dependent on CPUID leaf
> 0x80000022[EAX] bit 2 being set which may not be set for all Zen 4
> processors. Define a new feature bit for LBR and PMC freeze and set the
> freeze enable bit (FLBRI) in DebugCtl (MSR 0x1d9) conditionally.


Is this new feature bit visible to users?
I think it is useful to know whether or not LBR freeze is supported.
Imagine I want to do kernel FDO, then the user-only LBR trick to freeze LBR
does not work and I need actual LBR freeze support.

Thanks.

> It should still be possible to use LBR without freeze for profile-guided
> optimization of user programs by using an user-only branch filter during
> profiling. When the user-only filter is enabled, branches are no longer
> recorded after the transition to CPL 0 upon PMI arrival. When branch
> entries are read in the PMI handler, the branch stack does not change.
>
> E.g.
>
> $ perf record -j any,u -e ex_ret_brn_tkn ./workload
>
> Fixes: ca5b7c0d9621 ("perf/x86/amd/lbr: Add LbrExtV2 branch record support")
> Signed-off-by: Sandipan Das <[email protected]>
> Cc: [email protected]
> ---
> arch/x86/events/amd/core.c | 4 ++--
> arch/x86/events/amd/lbr.c | 16 ++++++++++------
> arch/x86/include/asm/cpufeatures.h | 2 +-
> arch/x86/kernel/cpu/scattered.c | 1 +
> 4 files changed, 14 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
> index 4ee6390b45c9..ffdfaee08b08 100644
> --- a/arch/x86/events/amd/core.c
> +++ b/arch/x86/events/amd/core.c
> @@ -905,8 +905,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
> if (!status)
> goto done;
>
> - /* Read branch records before unfreezing */
> - if (status & GLOBAL_STATUS_LBRS_FROZEN) {
> + /* Read branch records */
> + if (x86_pmu.lbr_nr) {
> amd_pmu_lbr_read();
> status &= ~GLOBAL_STATUS_LBRS_FROZEN;
> }
> diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
> index eb31f850841a..110e34c59643 100644
> --- a/arch/x86/events/amd/lbr.c
> +++ b/arch/x86/events/amd/lbr.c
> @@ -400,10 +400,12 @@ void amd_pmu_lbr_enable_all(void)
> wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
> }
>
> - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
> - rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
> + if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
> + rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
> + wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
> + }
>
> - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
> + rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
> wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
> }
>
> @@ -416,10 +418,12 @@ void amd_pmu_lbr_disable_all(void)
> return;
>
> rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
> - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
> -
> wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
> - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
> +
> + if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
> + rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
> + wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
> + }
> }
>
> __init int amd_pmu_lbr_init(void)
> diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
> index 4af140cf5719..9790e906d5e5 100644
> --- a/arch/x86/include/asm/cpufeatures.h
> +++ b/arch/x86/include/asm/cpufeatures.h
> @@ -97,7 +97,7 @@
> #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
> #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
> #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
> -/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
> +#define X86_FEATURE_AMD_LBR_PMC_FREEZE ( 3*32+18) /* "" AMD LBR and PMC Freeze */
> #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
> #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
> #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
> diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
> index 0dad49a09b7a..a515328d9d7d 100644
> --- a/arch/x86/kernel/cpu/scattered.c
> +++ b/arch/x86/kernel/cpu/scattered.c
> @@ -49,6 +49,7 @@ static const struct cpuid_bit cpuid_bits[] = {
> { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
> { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
> { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
> + { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
> { 0, 0, 0, 0, 0 }
> };
>
> --
> 2.34.1
>

2023-12-19 08:07:44

by Sandipan Das

[permalink] [raw]
Subject: Re: [PATCH 1/3] perf/x86/amd/lbr: Use freeze based on availability

On 12/19/2023 2:13 AM, Stephane Eranian wrote:
> On Sun, Dec 17, 2023 at 9:26 AM Sandipan Das <[email protected]> wrote:
>>
>> Currently, it is assumed that LBR Freeze is supported on all processors
>> which have CPUID leaf 0x80000022[EAX] bit 1 set. This is incorrect as
>> the feature availability is additionally dependent on CPUID leaf
>> 0x80000022[EAX] bit 2 being set which may not be set for all Zen 4
>> processors. Define a new feature bit for LBR and PMC freeze and set the
>> freeze enable bit (FLBRI) in DebugCtl (MSR 0x1d9) conditionally.
>
>
> Is this new feature bit visible to users?
> I think it is useful to know whether or not LBR freeze is supported.
> Imagine I want to do kernel FDO, then the user-only LBR trick to freeze LBR
> does not work and I need actual LBR freeze support.
>
> Thanks.
>

Agreed. Will make it a visible flag in /proc/cpuinfo.

- Sandipan

>> It should still be possible to use LBR without freeze for profile-guided
>> optimization of user programs by using an user-only branch filter during
>> profiling. When the user-only filter is enabled, branches are no longer
>> recorded after the transition to CPL 0 upon PMI arrival. When branch
>> entries are read in the PMI handler, the branch stack does not change.
>>
>> E.g.
>>
>> $ perf record -j any,u -e ex_ret_brn_tkn ./workload
>>
>> Fixes: ca5b7c0d9621 ("perf/x86/amd/lbr: Add LbrExtV2 branch record support")
>> Signed-off-by: Sandipan Das <[email protected]>
>> Cc: [email protected]
>> ---
>> arch/x86/events/amd/core.c | 4 ++--
>> arch/x86/events/amd/lbr.c | 16 ++++++++++------
>> arch/x86/include/asm/cpufeatures.h | 2 +-
>> arch/x86/kernel/cpu/scattered.c | 1 +
>> 4 files changed, 14 insertions(+), 9 deletions(-)
>>
>> diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
>> index 4ee6390b45c9..ffdfaee08b08 100644
>> --- a/arch/x86/events/amd/core.c
>> +++ b/arch/x86/events/amd/core.c
>> @@ -905,8 +905,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
>> if (!status)
>> goto done;
>>
>> - /* Read branch records before unfreezing */
>> - if (status & GLOBAL_STATUS_LBRS_FROZEN) {
>> + /* Read branch records */
>> + if (x86_pmu.lbr_nr) {
>> amd_pmu_lbr_read();
>> status &= ~GLOBAL_STATUS_LBRS_FROZEN;
>> }
>> diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
>> index eb31f850841a..110e34c59643 100644
>> --- a/arch/x86/events/amd/lbr.c
>> +++ b/arch/x86/events/amd/lbr.c
>> @@ -400,10 +400,12 @@ void amd_pmu_lbr_enable_all(void)
>> wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
>> }
>>
>> - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
>> - rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
>> + if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
>> + rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
>> + wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
>> + }
>>
>> - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
>> + rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
>> wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
>> }
>>
>> @@ -416,10 +418,12 @@ void amd_pmu_lbr_disable_all(void)
>> return;
>>
>> rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
>> - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
>> -
>> wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
>> - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
>> +
>> + if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
>> + rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
>> + wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
>> + }
>> }
>>
>> __init int amd_pmu_lbr_init(void)
>> diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
>> index 4af140cf5719..9790e906d5e5 100644
>> --- a/arch/x86/include/asm/cpufeatures.h
>> +++ b/arch/x86/include/asm/cpufeatures.h
>> @@ -97,7 +97,7 @@
>> #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
>> #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
>> #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
>> -/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
>> +#define X86_FEATURE_AMD_LBR_PMC_FREEZE ( 3*32+18) /* "" AMD LBR and PMC Freeze */
>> #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
>> #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
>> #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
>> diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
>> index 0dad49a09b7a..a515328d9d7d 100644
>> --- a/arch/x86/kernel/cpu/scattered.c
>> +++ b/arch/x86/kernel/cpu/scattered.c
>> @@ -49,6 +49,7 @@ static const struct cpuid_bit cpuid_bits[] = {
>> { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
>> { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
>> { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
>> + { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
>> { 0, 0, 0, 0, 0 }
>> };
>>
>> --
>> 2.34.1
>>