2021-10-07 03:49:54

by Eric Dumazet

[permalink] [raw]
Subject: [PATCH] x86/apic: reduce cache line misses in __x2apic_send_IPI_mask()

From: Eric Dumazet <[email protected]>

Using per-cpu storage for @x86_cpu_to_logical_apicid
is not optimal.

Broadcast IPI will need at least one cache line
per cpu to access this field.

__x2apic_send_IPI_mask() is using standard bitmask operators.

By converting x86_cpu_to_logical_apicid to an array,
we divide by 16x the number of needed cache lines, because
we find 16 values in one cache line. CPU prefetcher can
kick nicely.

Also move @cluster_masks to READ_MOSTLY section to avoid
possible false sharing.

Tested on a dual socket host with 256 cpus,
average cost for a full broadcast is now 11 usec instead of 33 usec.

Signed-off-by: Eric Dumazet <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Peter Zijlstra (Intel) <[email protected]>
Cc: Thomas Gleixner <[email protected]>
---
arch/x86/kernel/apic/x2apic_cluster.c | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index f4da9bb69a8859ff10824315388aeb49c2ccfad9..8ba23bfbc91c559e27fefdb8c9d401edd83022b1 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -15,9 +15,14 @@ struct cluster_mask {
struct cpumask mask;
};

-static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
+/* __x2apic_send_IPI_mask() possibly needs to read
+ * x86_cpu_to_logical_apicid for all online cpus in a sequential way.
+ * Using per cpu variable would cost one cache line per cpu.
+ */
+static u32 x86_cpu_to_logical_apicid[NR_CPUS] __read_mostly;
+
static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
-static DEFINE_PER_CPU(struct cluster_mask *, cluster_masks);
+static DEFINE_PER_CPU_READ_MOSTLY(struct cluster_mask *, cluster_masks);
static struct cluster_mask *cluster_hotplug_mask;

static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
@@ -27,7 +32,7 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)

static void x2apic_send_IPI(int cpu, int vector)
{
- u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
+ u32 dest = x86_cpu_to_logical_apicid[cpu];

/* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
@@ -58,11 +63,10 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)

dest = 0;
for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
- dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu);
+ dest |= x86_cpu_to_logical_apicid[clustercpu];

if (!dest)
continue;
-
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
/* Remove cluster CPUs from tmpmask */
cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
@@ -94,7 +98,7 @@ static void x2apic_send_IPI_all(int vector)

static u32 x2apic_calc_apicid(unsigned int cpu)
{
- return per_cpu(x86_cpu_to_logical_apicid, cpu);
+ return x86_cpu_to_logical_apicid[cpu];
}

static void init_x2apic_ldr(void)
@@ -103,7 +107,7 @@ static void init_x2apic_ldr(void)
u32 cluster, apicid = apic_read(APIC_LDR);
unsigned int cpu;

- this_cpu_write(x86_cpu_to_logical_apicid, apicid);
+ x86_cpu_to_logical_apicid[smp_processor_id()] = apicid;

if (cmsk)
goto update;
--
2.33.0.882.g93a45727a2-goog


2021-10-07 08:37:57

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] x86/apic: reduce cache line misses in __x2apic_send_IPI_mask()

On Wed, Oct 06, 2021 at 08:17:56PM -0700, Eric Dumazet wrote:
> +/* __x2apic_send_IPI_mask() possibly needs to read
> + * x86_cpu_to_logical_apicid for all online cpus in a sequential way.
> + * Using per cpu variable would cost one cache line per cpu.
> + */

Broken comment style..

> +static u32 x86_cpu_to_logical_apicid[NR_CPUS] __read_mostly;

NR_CPUS is really sad, could this at all be dynamically allocated? Say
in x2apic_cluster_probe() ?


Otherwise this looks very nice!

2021-10-07 14:10:08

by Eric Dumazet

[permalink] [raw]
Subject: Re: [PATCH] x86/apic: reduce cache line misses in __x2apic_send_IPI_mask()

On Thu, Oct 7, 2021 at 12:29 AM Peter Zijlstra <[email protected]> wrote:
>
> On Wed, Oct 06, 2021 at 08:17:56PM -0700, Eric Dumazet wrote:
> > +/* __x2apic_send_IPI_mask() possibly needs to read
> > + * x86_cpu_to_logical_apicid for all online cpus in a sequential way.
> > + * Using per cpu variable would cost one cache line per cpu.
> > + */
>
> Broken comment style..

I was not sure and ran checkpatch.pl before submission, but sure.

>
> > +static u32 x86_cpu_to_logical_apicid[NR_CPUS] __read_mostly;
>
> NR_CPUS is really sad, could this at all be dynamically allocated? Say
> in x2apic_cluster_probe() ?

Good idea, I will try this.
Hopefully nr_cpu_ids is populated there ?

>
>
> Otherwise this looks very nice!

Thanks !

2021-10-07 19:38:35

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] x86/apic: reduce cache line misses in __x2apic_send_IPI_mask()

On Thu, Oct 07, 2021 at 07:04:09AM -0700, Eric Dumazet wrote:
> On Thu, Oct 7, 2021 at 12:29 AM Peter Zijlstra <[email protected]> wrote:
> >
> > On Wed, Oct 06, 2021 at 08:17:56PM -0700, Eric Dumazet wrote:
> > > +/* __x2apic_send_IPI_mask() possibly needs to read
> > > + * x86_cpu_to_logical_apicid for all online cpus in a sequential way.
> > > + * Using per cpu variable would cost one cache line per cpu.
> > > + */
> >
> > Broken comment style..
>
> I was not sure and ran checkpatch.pl before submission, but sure.
>
> >
> > > +static u32 x86_cpu_to_logical_apicid[NR_CPUS] __read_mostly;
> >
> > NR_CPUS is really sad, could this at all be dynamically allocated? Say
> > in x2apic_cluster_probe() ?
>
> Good idea, I will try this.
> Hopefully nr_cpu_ids is populated there ?

Lets hope :-), I'm always terminally lost in early bringup. I figure it
should be painfully obvious if it goes wrong.

2021-10-07 19:39:19

by Eric Dumazet

[permalink] [raw]
Subject: Re: [PATCH] x86/apic: reduce cache line misses in __x2apic_send_IPI_mask()

On Thu, Oct 7, 2021 at 7:07 AM Peter Zijlstra <[email protected]> wrote:
>
> On Thu, Oct 07, 2021 at 07:04:09AM -0700, Eric Dumazet wrote:
> > Good idea, I will try this.
> > Hopefully nr_cpu_ids is populated there ?
>
> Lets hope :-), I'm always terminally lost in early bringup. I figure it
> should be painfully obvious if it goes wrong.

The answer seems to be yes on hosts I tested.

2021-10-07 22:34:11

by Eric Dumazet

[permalink] [raw]
Subject: Re: [PATCH] x86/apic: reduce cache line misses in __x2apic_send_IPI_mask()

On Thu, Oct 7, 2021 at 7:13 AM Eric Dumazet <[email protected]> wrote:
>
> On Thu, Oct 7, 2021 at 7:07 AM Peter Zijlstra <[email protected]> wrote:
> >
> > On Thu, Oct 07, 2021 at 07:04:09AM -0700, Eric Dumazet wrote:
> > > Good idea, I will try this.
> > > Hopefully nr_cpu_ids is populated there ?
> >
> > Lets hope :-), I'm always terminally lost in early bringup. I figure it
> > should be painfully obvious if it goes wrong.
>
> The answer seems to be yes on hosts I tested.

I have one more question about __x2apic_send_IPI_mask()

Would it make sense to disable hard irqs in __x2apic_send_IPI_mask()
only for CONFIG_CPUMASK_OFFSTACK=y builds ?

It seems wasteful to use tiny per-cpu variables and block hard irqs.

Quick and very dirty patch :

diff --git a/arch/x86/kernel/apic/x2apic_cluster.c
b/arch/x86/kernel/apic/x2apic_cluster.c
index e696e22d0531976f7cba72ed17443592eac72c13..c5076d40d4ea7bc9ffb06728531d91777a32cef4
100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -44,15 +44,18 @@ static void
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
{
unsigned int cpu, clustercpu;
- struct cpumask *tmpmsk;
+ cpumask_var_t tmpmsk;
+#ifdef CONFIG_CPUMASK_OFFSTACK
unsigned long flags;
+#endif
u32 dest;

/* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
+#ifdef CONFIG_CPUMASK_OFFSTACK
local_irq_save(flags);
-
tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
+#endif
cpumask_copy(tmpmsk, mask);
/* If IPI should not be sent to self, clear current CPU */
if (apic_dest != APIC_DEST_ALLINC)
@@ -74,7 +77,9 @@ __x2apic_send_IPI_mask(const struct cpumask *mask,
int vector, int apic_dest)
cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
}

+#ifdef CONFIG_CPUMASK_OFFSTACK
local_irq_restore(flags);
+#endif
}

static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)

2021-10-12 12:49:46

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] x86/apic: reduce cache line misses in __x2apic_send_IPI_mask()

On Thu, Oct 07, 2021 at 03:13:46PM -0700, Eric Dumazet wrote:
> It seems wasteful to use tiny per-cpu variables and block hard irqs.
>
> Quick and very dirty patch :
>
> diff --git a/arch/x86/kernel/apic/x2apic_cluster.c
> b/arch/x86/kernel/apic/x2apic_cluster.c
> index e696e22d0531976f7cba72ed17443592eac72c13..c5076d40d4ea7bc9ffb06728531d91777a32cef4
> 100644
> --- a/arch/x86/kernel/apic/x2apic_cluster.c
> +++ b/arch/x86/kernel/apic/x2apic_cluster.c
> @@ -44,15 +44,18 @@ static void
> __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
> {
> unsigned int cpu, clustercpu;
> - struct cpumask *tmpmsk;
> + cpumask_var_t tmpmsk;
> +#ifdef CONFIG_CPUMASK_OFFSTACK
> unsigned long flags;
> +#endif
> u32 dest;
>
> /* x2apic MSRs are special and need a special fence: */
> weak_wrmsr_fence();
> +#ifdef CONFIG_CPUMASK_OFFSTACK
> local_irq_save(flags);
> -
> tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
> +#endif
> cpumask_copy(tmpmsk, mask);
> /* If IPI should not be sent to self, clear current CPU */
> if (apic_dest != APIC_DEST_ALLINC)
> @@ -74,7 +77,9 @@ __x2apic_send_IPI_mask(const struct cpumask *mask,
> int vector, int apic_dest)
> cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
> }
>
> +#ifdef CONFIG_CPUMASK_OFFSTACK
> local_irq_restore(flags);
> +#endif
> }

I'm really conflicted about this. On the one hand, yes absolutely. On
the other hand, urgh, code ugly :-)


2021-10-13 18:04:43

by Eric Dumazet

[permalink] [raw]
Subject: Re: [PATCH] x86/apic: reduce cache line misses in __x2apic_send_IPI_mask()

On Tue, Oct 12, 2021 at 5:46 AM Peter Zijlstra <[email protected]> wrote:

> I'm really conflicted about this. On the one hand, yes absolutely. On
> the other hand, urgh, code ugly :-)

That was indeed some ugly hack.

I cooked this more generic patch instead, I am currently testing it.
(generic as : we no longer disable hard irqs, regardless of some CONFIG option )

diff --git a/arch/x86/kernel/apic/x2apic_cluster.c
b/arch/x86/kernel/apic/x2apic_cluster.c
index e696e22d0531976f7cba72ed17443592eac72c13..7ad81467ce33349dee1ceaf0cefc8375d60213f6
100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -22,7 +22,10 @@ struct cluster_mask {
*/
static u32 *x86_cpu_to_logical_apicid __read_mostly;

-static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
+#define IPI_NEST_MAX 3
+static DEFINE_PER_CPU(cpumask_var_t, ipi_mask[IPI_NEST_MAX]);
+static DEFINE_PER_CPU(int, ipi_nest_level);
+
static DEFINE_PER_CPU_READ_MOSTLY(struct cluster_mask *, cluster_masks);
static struct cluster_mask *cluster_hotplug_mask;

@@ -45,14 +48,18 @@ __x2apic_send_IPI_mask(const struct cpumask *mask,
int vector, int apic_dest)
{
unsigned int cpu, clustercpu;
struct cpumask *tmpmsk;
- unsigned long flags;
+ int nest_level;
u32 dest;

/* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
- local_irq_save(flags);

- tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
+ preempt_disable();
+ nest_level = this_cpu_inc_return(ipi_nest_level) - 1;
+ if (WARN_ON_ONCE(nest_level >= IPI_NEST_MAX))
+ goto end;
+
+ tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask[nest_level]);
cpumask_copy(tmpmsk, mask);
/* If IPI should not be sent to self, clear current CPU */
if (apic_dest != APIC_DEST_ALLINC)
@@ -74,7 +81,9 @@ __x2apic_send_IPI_mask(const struct cpumask *mask,
int vector, int apic_dest)
cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
}

- local_irq_restore(flags);
+end:
+ this_cpu_dec(ipi_nest_level);
+ preempt_enable();
}

static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
@@ -153,20 +162,26 @@ static int alloc_clustermask(unsigned int cpu, int node)

static int x2apic_prepare_cpu(unsigned int cpu)
{
+ int i;
+
if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
return -ENOMEM;
- if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
- return -ENOMEM;
+ for (i = 0 ; i < IPI_NEST_MAX; i++) {
+ if (!zalloc_cpumask_var(&per_cpu(ipi_mask[i], cpu), GFP_KERNEL))
+ return -ENOMEM;
+ }
return 0;
}

static int x2apic_dead_cpu(unsigned int dead_cpu)
{
struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
+ int i;

if (cmsk)
cpumask_clear_cpu(dead_cpu, &cmsk->mask);
- free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
+ for (i = 0; i < IPI_NEST_MAX; i++)
+ free_cpumask_var(per_cpu(ipi_mask[i], dead_cpu));
return 0;
}

2021-10-16 05:38:23

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] x86/apic: reduce cache line misses in __x2apic_send_IPI_mask()

On Wed, Oct 13, 2021 at 11:02:46AM -0700, Eric Dumazet wrote:
> On Tue, Oct 12, 2021 at 5:46 AM Peter Zijlstra <[email protected]> wrote:
>
> > I'm really conflicted about this. On the one hand, yes absolutely. On
> > the other hand, urgh, code ugly :-)
>
> That was indeed some ugly hack.
>
> I cooked this more generic patch instead, I am currently testing it.
> (generic as : we no longer disable hard irqs, regardless of some CONFIG option )
>
> diff --git a/arch/x86/kernel/apic/x2apic_cluster.c
> b/arch/x86/kernel/apic/x2apic_cluster.c
> index e696e22d0531976f7cba72ed17443592eac72c13..7ad81467ce33349dee1ceaf0cefc8375d60213f6
> 100644
> --- a/arch/x86/kernel/apic/x2apic_cluster.c
> +++ b/arch/x86/kernel/apic/x2apic_cluster.c
> @@ -22,7 +22,10 @@ struct cluster_mask {
> */
> static u32 *x86_cpu_to_logical_apicid __read_mostly;
>
> -static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);

This might maybe do with a comment explaining where the 3 comes from.
Also see below.

> +#define IPI_NEST_MAX 3
> +static DEFINE_PER_CPU(cpumask_var_t, ipi_mask[IPI_NEST_MAX]);
> +static DEFINE_PER_CPU(int, ipi_nest_level);
> +
> static DEFINE_PER_CPU_READ_MOSTLY(struct cluster_mask *, cluster_masks);
> static struct cluster_mask *cluster_hotplug_mask;
>
> @@ -45,14 +48,18 @@ __x2apic_send_IPI_mask(const struct cpumask *mask,
> int vector, int apic_dest)
> {
> unsigned int cpu, clustercpu;
> struct cpumask *tmpmsk;
> - unsigned long flags;
> + int nest_level;
> u32 dest;
>
> /* x2apic MSRs are special and need a special fence: */
> weak_wrmsr_fence();
> - local_irq_save(flags);
>
> - tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
> + preempt_disable();
> + nest_level = this_cpu_inc_return(ipi_nest_level) - 1;
> + if (WARN_ON_ONCE(nest_level >= IPI_NEST_MAX))
> + goto end;

So this matches the: task, softirq, irq nesting and realistically won't
trigger I suppose, but that WARN is not giving me warm and fuzzies, just
not sending the IPI is terrible behaviour if we ever do hit this.

I think I would prefer to trip x2apic_send_IPI_all() over sending too
few IPIs.

That *might* in some distant future kill some NOHZ_FULL userspace, but
at least it won't make the system grind to a halt as a missing IPI can.

Thomas, any opinions there?

> +
> + tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask[nest_level]);
> cpumask_copy(tmpmsk, mask);
> /* If IPI should not be sent to self, clear current CPU */
> if (apic_dest != APIC_DEST_ALLINC)
> @@ -74,7 +81,9 @@ __x2apic_send_IPI_mask(const struct cpumask *mask,
> int vector, int apic_dest)
> cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
> }
>
> - local_irq_restore(flags);
> +end:
> + this_cpu_dec(ipi_nest_level);
> + preempt_enable();
> }
>
> static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)