From: Stephane Eranian <[email protected]>
In AMD PQoS Versions 1.0 and 2.0, IA32_QM_EVTSEL MSR is shared by all
processors in a QOS domain. So there's a chance it can read a different
event when two processors are reading the counter concurrently. Add a
spinlock to prevent this race.
Co-developed-by: Peter Newman <[email protected]>
Signed-off-by: Peter Newman <[email protected]>
Signed-off-by: Stephane Eranian <[email protected]>
---
arch/x86/kernel/cpu/resctrl/core.c | 41 ++++++++++++++++++++++++++
arch/x86/kernel/cpu/resctrl/internal.h | 5 ++++
arch/x86/kernel/cpu/resctrl/monitor.c | 14 +++++++--
3 files changed, 57 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 030d3b409768..47b1c37a81f8 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -25,6 +25,8 @@
#include <asm/resctrl.h>
#include "internal.h"
+DEFINE_STATIC_KEY_FALSE(rmid_read_locked);
+
/* Mutex to protect rdtgroup access. */
DEFINE_MUTEX(rdtgroup_mutex);
@@ -529,6 +531,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
d->id = id;
cpumask_set_cpu(cpu, &d->cpu_mask);
+ raw_spin_lock_init(&hw_dom->evtsel_lock);
+
rdt_domain_reconfigure_cdp(r);
if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
@@ -829,6 +833,41 @@ static __init bool get_rdt_mon_resources(void)
return !rdt_get_mon_l3_config(r);
}
+static __init bool amd_shared_qm_evtsel(void)
+{
+ /*
+ * From AMD64 Technology Platform Quality of Service Extensions,
+ * Revision 1.03:
+ *
+ * "For PQoS Version 1.0 and 2.0, as identified by Family/Model, the
+ * QM_EVTSEL register is shared by all the processors in a QOS domain."
+ *
+ * Check the inclusive Family/Model ranges for PQoS Extension versions
+ * 1.0 and 2.0 from the PQoS Extension Versions table.
+ */
+ if (boot_cpu_data.x86 == 0x17)
+ /* V1.0 */
+ return boot_cpu_data.x86_model >= 0x30 &&
+ boot_cpu_data.x86_model <= 0x9f;
+
+ if (boot_cpu_data.x86 == 0x19)
+ /* V2.0 */
+ return (boot_cpu_data.x86_model <= 0xf) ||
+ ((boot_cpu_data.x86_model >= 0x20) &&
+ (boot_cpu_data.x86_model <= 0x5f));
+
+ return false;
+}
+
+static __init void __check_quirks_amd(void)
+{
+ if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL) ||
+ rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) {
+ if (amd_shared_qm_evtsel())
+ static_branch_enable(&rmid_read_locked);
+ }
+}
+
static __init void __check_quirks_intel(void)
{
switch (boot_cpu_data.x86_model) {
@@ -852,6 +891,8 @@ static __init void check_quirks(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
__check_quirks_intel();
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ __check_quirks_amd();
}
static __init bool get_rdt_resources(void)
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 85ceaf9a31ac..02a062558c67 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -325,6 +325,7 @@ struct arch_mbm_state {
* @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
* @arch_mbm_total: arch private state for MBM total bandwidth
* @arch_mbm_local: arch private state for MBM local bandwidth
+ * @lock: serializes counter reads when QM_EVTSEL MSR is shared per-domain
*
* Members of this structure are accessed via helpers that provide abstraction.
*/
@@ -333,6 +334,7 @@ struct rdt_hw_domain {
u32 *ctrl_val;
struct arch_mbm_state *arch_mbm_total;
struct arch_mbm_state *arch_mbm_local;
+ raw_spinlock_t evtsel_lock;
};
static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
@@ -428,6 +430,9 @@ extern struct rdt_hw_resource rdt_resources_all[];
extern struct rdtgroup rdtgroup_default;
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
+/* Serialization required in resctrl_arch_rmid_read(). */
+DECLARE_STATIC_KEY_FALSE(rmid_read_locked);
+
extern struct dentry *debugfs_resctrl;
enum resctrl_res_level {
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 20952419be75..2de8397f91cd 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -146,10 +146,15 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
return entry;
}
-static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
+static int __rmid_read(struct rdt_hw_domain *hw_dom, u32 rmid,
+ enum resctrl_event_id eventid, u64 *val)
{
+ unsigned long flags;
u64 msr_val;
+ if (static_branch_likely(&rmid_read_locked))
+ raw_spin_lock_irqsave(&hw_dom->evtsel_lock, flags);
+
/*
* As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
* with a valid event code for supported resource type and the bits
@@ -161,6 +166,9 @@ static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
rdmsrl(MSR_IA32_QM_CTR, msr_val);
+ if (static_branch_likely(&rmid_read_locked))
+ raw_spin_unlock_irqrestore(&hw_dom->evtsel_lock, flags);
+
if (msr_val & RMID_VAL_ERROR)
return -EIO;
if (msr_val & RMID_VAL_UNAVAIL)
@@ -200,7 +208,7 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
memset(am, 0, sizeof(*am));
/* Record any initial, non-zero count value. */
- __rmid_read(rmid, eventid, &am->prev_msr);
+ __rmid_read(hw_dom, rmid, eventid, &am->prev_msr);
}
}
@@ -241,7 +249,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
return -EINVAL;
- ret = __rmid_read(rmid, eventid, &msr_val);
+ ret = __rmid_read(hw_dom, rmid, eventid, &msr_val);
if (ret)
return ret;
--
2.40.0.634.g4ca3ef3211-goog
Hi Peter,
On 4/21/2023 7:17 AM, Peter Newman wrote:
> From: Stephane Eranian <[email protected]>
>
> In AMD PQoS Versions 1.0 and 2.0, IA32_QM_EVTSEL MSR is shared by all
> processors in a QOS domain. So there's a chance it can read a different
> event when two processors are reading the counter concurrently. Add a
> spinlock to prevent this race.
This is unclear to me. As I understand it this changelog is written as
though there is a race that is being fixed. I believe that rdtgroup_mutex
currently protects against such races. I thus at first thought that
this is a prep patch for the introduction of the new soft RMID feature,
but instead this new spinlock is used independent of the soft RMID feature.
I think the spinlock is unnecessary when the soft RMID feature is disabled.
> Co-developed-by: Peter Newman <[email protected]>
> Signed-off-by: Peter Newman <[email protected]>
> Signed-off-by: Stephane Eranian <[email protected]>
> ---
> arch/x86/kernel/cpu/resctrl/core.c | 41 ++++++++++++++++++++++++++
> arch/x86/kernel/cpu/resctrl/internal.h | 5 ++++
> arch/x86/kernel/cpu/resctrl/monitor.c | 14 +++++++--
> 3 files changed, 57 insertions(+), 3 deletions(-)
>
...
> diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
> index 85ceaf9a31ac..02a062558c67 100644
> --- a/arch/x86/kernel/cpu/resctrl/internal.h
> +++ b/arch/x86/kernel/cpu/resctrl/internal.h
> @@ -325,6 +325,7 @@ struct arch_mbm_state {
> * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
> * @arch_mbm_total: arch private state for MBM total bandwidth
> * @arch_mbm_local: arch private state for MBM local bandwidth
> + * @lock: serializes counter reads when QM_EVTSEL MSR is shared per-domain
> *
> * Members of this structure are accessed via helpers that provide abstraction.
> */
> @@ -333,6 +334,7 @@ struct rdt_hw_domain {
> u32 *ctrl_val;
> struct arch_mbm_state *arch_mbm_total;
> struct arch_mbm_state *arch_mbm_local;
> + raw_spinlock_t evtsel_lock;
> };
Please note the difference between the member name in the struct ("evtsel_lock")
and its description ("lock").
>
> static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
> @@ -428,6 +430,9 @@ extern struct rdt_hw_resource rdt_resources_all[];
> extern struct rdtgroup rdtgroup_default;
> DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
>
> +/* Serialization required in resctrl_arch_rmid_read(). */
> +DECLARE_STATIC_KEY_FALSE(rmid_read_locked);
> +
> extern struct dentry *debugfs_resctrl;
>
> enum resctrl_res_level {
> diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
> index 20952419be75..2de8397f91cd 100644
> --- a/arch/x86/kernel/cpu/resctrl/monitor.c
> +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
> @@ -146,10 +146,15 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
> return entry;
> }
>
> -static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
> +static int __rmid_read(struct rdt_hw_domain *hw_dom, u32 rmid,
> + enum resctrl_event_id eventid, u64 *val)
> {
> + unsigned long flags;
> u64 msr_val;
>
> + if (static_branch_likely(&rmid_read_locked))
Why static_branch_likely() as opposed to static_branch_unlikely()?
> + raw_spin_lock_irqsave(&hw_dom->evtsel_lock, flags);
> +
> /*
> * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
> * with a valid event code for supported resource type and the bits
> @@ -161,6 +166,9 @@ static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
> wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
> rdmsrl(MSR_IA32_QM_CTR, msr_val);
>
> + if (static_branch_likely(&rmid_read_locked))
> + raw_spin_unlock_irqrestore(&hw_dom->evtsel_lock, flags);
> +
If the first "if (static_branch_likely(&rmid_read_locked))" was taken then the second
if branch _has_ to be taken. It should not be optional to release a lock if it was taken. I
think it would be more robust if a single test of the static key decides whether the
spinlock should be used.
> if (msr_val & RMID_VAL_ERROR)
> return -EIO;
> if (msr_val & RMID_VAL_UNAVAIL)
> @@ -200,7 +208,7 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
> memset(am, 0, sizeof(*am));
>
> /* Record any initial, non-zero count value. */
> - __rmid_read(rmid, eventid, &am->prev_msr);
> + __rmid_read(hw_dom, rmid, eventid, &am->prev_msr);
> }
> }
>
> @@ -241,7 +249,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
> if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
> return -EINVAL;
>
> - ret = __rmid_read(rmid, eventid, &msr_val);
> + ret = __rmid_read(hw_dom, rmid, eventid, &msr_val);
> if (ret)
> return ret;
>
Reinette
Hi Reinette,
On Thu, May 11, 2023 at 11:36 PM Reinette Chatre
<[email protected]> wrote:
> On 4/21/2023 7:17 AM, Peter Newman wrote:
> > From: Stephane Eranian <[email protected]>
> >
> > In AMD PQoS Versions 1.0 and 2.0, IA32_QM_EVTSEL MSR is shared by all
> > processors in a QOS domain. So there's a chance it can read a different
> > event when two processors are reading the counter concurrently. Add a
> > spinlock to prevent this race.
>
> This is unclear to me. As I understand it this changelog is written as
> though there is a race that is being fixed. I believe that rdtgroup_mutex
> currently protects against such races. I thus at first thought that
> this is a prep patch for the introduction of the new soft RMID feature,
> but instead this new spinlock is used independent of the soft RMID feature.
>
> I think the spinlock is unnecessary when the soft RMID feature is disabled.
My understanding was that the race would happen a lot more when
simultaneously IPI'ing all CPUs in a domain, but I had apparently
overlooked that all of the counter reads were already serialized.
> > + * @lock: serializes counter reads when QM_EVTSEL MSR is shared per-domain
> > *
> > * Members of this structure are accessed via helpers that provide abstraction.
> > */
> > @@ -333,6 +334,7 @@ struct rdt_hw_domain {
> > u32 *ctrl_val;
> > struct arch_mbm_state *arch_mbm_total;
> > struct arch_mbm_state *arch_mbm_local;
> > + raw_spinlock_t evtsel_lock;
> > };
>
> Please note the difference between the member name in the struct ("evtsel_lock")
> and its description ("lock").
Will fix, thanks.
> > -static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
> > +static int __rmid_read(struct rdt_hw_domain *hw_dom, u32 rmid,
> > + enum resctrl_event_id eventid, u64 *val)
> > {
> > + unsigned long flags;
> > u64 msr_val;
> >
> > + if (static_branch_likely(&rmid_read_locked))
>
> Why static_branch_likely() as opposed to static_branch_unlikely()?
I read the documentation for static branches and I agree that unlikely
would make more sense so that the non-locked case is less impacted.
This instance apparently confused my understanding of static branches
and I will need to re-visit all uses of them in this patch series.
>
> > + raw_spin_lock_irqsave(&hw_dom->evtsel_lock, flags);
> > +
> > /*
> > * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
> > * with a valid event code for supported resource type and the bits
> > @@ -161,6 +166,9 @@ static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
> > wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
> > rdmsrl(MSR_IA32_QM_CTR, msr_val);
> >
> > + if (static_branch_likely(&rmid_read_locked))
> > + raw_spin_unlock_irqrestore(&hw_dom->evtsel_lock, flags);
> > +
>
> If the first "if (static_branch_likely(&rmid_read_locked))" was taken then the second
> if branch _has_ to be taken. It should not be optional to release a lock if it was taken. I
> think it would be more robust if a single test of the static key decides whether the
> spinlock should be used.
Is the concern that the branch value could change concurrently and
result in a deadlock?
I'm curious as to whether this case is performance critical enough to
justify using a static branch. It's clear that we should be using them
in the context switch path, but I'm confused about other places
they're used when there are also memory flags.
-Peter
Hi Peter,
On 5/12/2023 6:23 AM, Peter Newman wrote:
> Hi Reinette,
>
> On Thu, May 11, 2023 at 11:36 PM Reinette Chatre
> <[email protected]> wrote:
>> On 4/21/2023 7:17 AM, Peter Newman wrote:
>>> + raw_spin_lock_irqsave(&hw_dom->evtsel_lock, flags);
>>> +
>>> /*
>>> * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
>>> * with a valid event code for supported resource type and the bits
>>> @@ -161,6 +166,9 @@ static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
>>> wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
>>> rdmsrl(MSR_IA32_QM_CTR, msr_val);
>>>
>>> + if (static_branch_likely(&rmid_read_locked))
>>> + raw_spin_unlock_irqrestore(&hw_dom->evtsel_lock, flags);
>>> +
>>
>> If the first "if (static_branch_likely(&rmid_read_locked))" was taken then the second
>> if branch _has_ to be taken. It should not be optional to release a lock if it was taken. I
>> think it would be more robust if a single test of the static key decides whether the
>> spinlock should be used.
>
> Is the concern that the branch value could change concurrently and
> result in a deadlock?
Possibly ... it may be that the static key cannot change value during
this call but that thus requires deeper understanding of various flows
for this logic to be trusted. I think this should be explicit: if a lock
is taken then releasing it should not be optional at all.
> I'm curious as to whether this case is performance critical enough to
> justify using a static branch. It's clear that we should be using them
> in the context switch path, but I'm confused about other places
> they're used when there are also memory flags.
Alternatively, there could be a, (for example) __rmid_read_lock() that
is called from context switch and it always takes a spin lock.
Reinette