2022-07-23 08:40:57

by Yi Sun

[permalink] [raw]
Subject: [PATCH 1/2] x86/fpu: Measure the Latency of XSAVE and XRSTOR

Calculate the latency of instructions xsave and xrstor with new trace
points x86_fpu_latency_xsave and x86_fpu_latency_xrstor.

The delta TSC can be calculated within a single trace event. Another
option considered was to have 2 separated trace events marking the
start and finish of the xsave/xrstor instructions. The delta TSC was
calculated from the 2 trace points in user space, but there was
significant overhead added by the trace function itself.

In internal testing, the single trace point option which is
implemented here proved to be more accurate.

Together with the latency, dump the RFBM(requested-feature bitmap)
and XINUSE to identify the type of xsave/xrstor.

Trace log looks like following:
x86_fpu_latency_xsave: x86/fpu: latency:100 RFBM:0x202e7 XINUSE:0x202
x86_fpu_latency_xrstor: x86/fpu: latency:99 RFBM:0x202e7 XINUSE:0x202

To enable it, CONFIG_X86_DEBUG_FPU and CONFIG_TRACEPOINTS are required.
The compiler can get rid of all the extra crust when either of the two
configs is disabled.

If both of the two configs are enabled, xsave/xrstor_tracing_enabled
would be reduced to a static check for tracing enabled. Thus, in the
fast path there would be only 2 additional static checks.

Since tracepoints can be enabled dynamically, while the code is
checking tracepoint_enabled(trace_event), the trace_event could be
concurrently enabled. Hence there is probability to get single once
noisy result 'trace_clock() - (-1)' at the moment enabling the trace
points x86_fpu_latency_*. Leave the noise here instead of additional
conditions before calling the x86_fpu_latency_* because that makes the
latency more accurate and it's easy to filer the noise out by the
following consuming script.

Reviewed-by: Sohil Mehta <[email protected]>
Reviewed-by: Tony Luck <[email protected]>
Signed-off-by: Yi Sun <[email protected]>

diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 4645a6334063..b40328f4e90d 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -89,6 +89,41 @@ DEFINE_EVENT(x86_fpu, x86_fpu_xstate_check_failed,
TP_ARGS(fpu)
);

+DECLARE_EVENT_CLASS(x86_fpu_latency,
+ TP_PROTO(struct fpstate *fpstate, u64 dtsc),
+ TP_ARGS(fpstate, dtsc),
+
+ TP_STRUCT__entry(
+ __field(struct fpstate *, fpstate)
+ __field(u64, dtsc)
+ __field(u64, rfbm)
+ __field(u64, xinuse)
+ ),
+
+ TP_fast_assign(
+ __entry->fpstate = fpstate;
+ __entry->dtsc = dtsc;
+ __entry->rfbm = fpstate->xfeatures;
+ __entry->xinuse = fpstate->regs.xsave.header.xfeatures;
+ ),
+
+ TP_printk("x86/fpu: latency:%lld RFBM:0x%llx XINUSE:0x%llx",
+ __entry->dtsc,
+ __entry->rfbm,
+ __entry->xinuse
+ )
+);
+
+DEFINE_EVENT(x86_fpu_latency, x86_fpu_latency_xsave,
+ TP_PROTO(struct fpstate *fpstate, u64 dtsc),
+ TP_ARGS(fpstate, dtsc)
+);
+
+DEFINE_EVENT(x86_fpu_latency, x86_fpu_latency_xrstor,
+ TP_PROTO(struct fpstate *fpstate, u64 dtsc),
+ TP_ARGS(fpstate, dtsc)
+);
+
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH asm/trace/
#undef TRACE_INCLUDE_FILE
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index d22ace092ca2..c5d294e5ee52 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -5,6 +5,9 @@
#include <asm/cpufeature.h>
#include <asm/fpu/xstate.h>
#include <asm/fpu/xcr.h>
+#include <asm/trace/fpu.h>
+
+#include <linux/trace_clock.h>

#ifdef CONFIG_X86_64
DECLARE_PER_CPU(u64, xfd_state);
@@ -68,6 +71,20 @@ static inline u64 xfeatures_mask_independent(void)
return XFEATURE_MASK_INDEPENDENT;
}

+static inline bool xsave_tracing_enabled(void)
+{
+ if (!IS_ENABLED(CONFIG_X86_DEBUG_FPU))
+ return false;
+ return tracepoint_enabled(x86_fpu_latency_xsave);
+}
+
+static inline bool xrstor_tracing_enabled(void)
+{
+ if (!IS_ENABLED(CONFIG_X86_DEBUG_FPU))
+ return false;
+ return tracepoint_enabled(x86_fpu_latency_xrstor);
+}
+
/* XSAVE/XRSTOR wrapper functions */

#ifdef CONFIG_X86_64
@@ -110,7 +127,7 @@ static inline u64 xfeatures_mask_independent(void)
* original instruction which gets replaced. We need to use it here as the
* address of the instruction where we might get an exception at.
*/
-#define XSTATE_XSAVE(st, lmask, hmask, err) \
+#define __XSTATE_XSAVE(st, lmask, hmask, err) \
asm volatile(ALTERNATIVE_2(XSAVE, \
XSAVEOPT, X86_FEATURE_XSAVEOPT, \
XSAVES, X86_FEATURE_XSAVES) \
@@ -122,11 +139,22 @@ static inline u64 xfeatures_mask_independent(void)
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")

+#define XSTATE_XSAVE(fps, lmask, hmask, err) \
+ do { \
+ struct fpstate *f = fps; \
+ u64 tc = -1; \
+ if (xsave_tracing_enabled()) \
+ tc = trace_clock(); \
+ __XSTATE_XSAVE(&f->regs.xsave, lmask, hmask, err); \
+ if (xsave_tracing_enabled()) \
+ trace_x86_fpu_latency_xsave(f, trace_clock() - tc);\
+ } while (0)
+
/*
* Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
* XSAVE area format.
*/
-#define XSTATE_XRESTORE(st, lmask, hmask) \
+#define __XSTATE_XRESTORE(st, lmask, hmask) \
asm volatile(ALTERNATIVE(XRSTOR, \
XRSTORS, X86_FEATURE_XSAVES) \
"\n" \
@@ -136,6 +164,17 @@ static inline u64 xfeatures_mask_independent(void)
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")

+#define XSTATE_XRESTORE(fps, lmask, hmask) \
+ do { \
+ struct fpstate *f = fps; \
+ u64 tc = -1; \
+ if (xrstor_tracing_enabled()) \
+ tc = trace_clock(); \
+ __XSTATE_XRESTORE(&f->regs.xsave, lmask, hmask); \
+ if (xrstor_tracing_enabled()) \
+ trace_x86_fpu_latency_xrstor(f, trace_clock() - tc);\
+ } while (0)
+
#if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
extern void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor);
#else
@@ -180,7 +219,7 @@ static inline void os_xsave(struct fpstate *fpstate)
WARN_ON_FPU(!alternatives_patched);
xfd_validate_state(fpstate, mask, false);

- XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
+ XSTATE_XSAVE(fpstate, lmask, hmask, err);

/* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err);
@@ -197,7 +236,7 @@ static inline void os_xrstor(struct fpstate *fpstate, u64 mask)
u32 hmask = mask >> 32;

xfd_validate_state(fpstate, mask, true);
- XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
+ XSTATE_XRESTORE(fpstate, lmask, hmask);
}

/* Restore of supervisor state. Does not require XFD */
@@ -207,7 +246,7 @@ static inline void os_xrstor_supervisor(struct fpstate *fpstate)
u32 lmask = mask;
u32 hmask = mask >> 32;

- XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
+ XSTATE_XRESTORE(fpstate, lmask, hmask);
}

/*
--
2.34.1


2022-07-24 21:25:31

by David Laight

[permalink] [raw]
Subject: RE: [PATCH 1/2] x86/fpu: Measure the Latency of XSAVE and XRSTOR

From: Yi Sun
> Sent: 23 July 2022 09:38
>
> Calculate the latency of instructions xsave and xrstor with new trace
> points x86_fpu_latency_xsave and x86_fpu_latency_xrstor.
>
> The delta TSC can be calculated within a single trace event. Another
> option considered was to have 2 separated trace events marking the
> start and finish of the xsave/xrstor instructions. The delta TSC was
> calculated from the 2 trace points in user space, but there was
> significant overhead added by the trace function itself.
>
> In internal testing, the single trace point option which is
> implemented here proved to be more accurate.
...

I've done some experiments that measure short instruction latencies.
Basically I found:
1) You need a suitable serialising instruction before and after
the code being tested - otherwise it can overlap whatever
you are using for timing.
2) The only reliable counter is the performance monitor clock
counter - everything else depends on the current cpu frequency.
On intel cpu the cpu frequency can change all the time.
Allowing for that, and then ignoring complete outliers, I could
get clock-count accurate values for iterations of the IP csum loop.

David

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)

2022-07-24 23:11:55

by Luck, Tony

[permalink] [raw]
Subject: Re: [PATCH 1/2] x86/fpu: Measure the Latency of XSAVE and XRSTOR

RDTSC has returned values invariant of current frequency since Nehalem (modulo a few hiccoughs). So any CPU with XSAVE/XRESTOR should be safe to measure using TSC.

Sent from my iPhone

> On Jul 24, 2022, at 14:16, David Laight <[email protected]> wrote:
>
> From: Yi Sun
>> Sent: 23 July 2022 09:38
>>
>> Calculate the latency of instructions xsave and xrstor with new trace
>> points x86_fpu_latency_xsave and x86_fpu_latency_xrstor.
>>
>> The delta TSC can be calculated within a single trace event. Another
>> option considered was to have 2 separated trace events marking the
>> start and finish of the xsave/xrstor instructions. The delta TSC was
>> calculated from the 2 trace points in user space, but there was
>> significant overhead added by the trace function itself.
>>
>> In internal testing, the single trace point option which is
>> implemented here proved to be more accurate.
> ...
>
> I've done some experiments that measure short instruction latencies.
> Basically I found:
> 1) You need a suitable serialising instruction before and after
> the code being tested - otherwise it can overlap whatever
> you are using for timing.
> 2) The only reliable counter is the performance monitor clock
> counter - everything else depends on the current cpu frequency.
> On intel cpu the cpu frequency can change all the time.
> Allowing for that, and then ignoring complete outliers, I could
> get clock-count accurate values for iterations of the IP csum loop.
>
> David
>
> -
> Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
> Registration No: 1397386 (Wales)
>

2022-07-25 07:55:37

by David Laight

[permalink] [raw]
Subject: RE: [PATCH 1/2] x86/fpu: Measure the Latency of XSAVE and XRSTOR

From: Luck, Tony
> Sent: 25 July 2022 00:00
>
> RDTSC has returned values invariant of current frequency since Nehalem (modulo a few hiccoughs). So
> any CPU with XSAVE/XRESTOR should be safe to measure using TSC.

Indeed - that it exactly why you can't use the TSC to measure
instruction latency any more.
You need to measure latency in clocks, not time.

On cpu where all the cores run at the same frequency you can
see the effect by spinning one core in userspace.
Running 'while :; do :; done' from a shell prompt is pretty
effective at spinning in userspace.

David

>
> Sent from my iPhone
>
> > On Jul 24, 2022, at 14:16, David Laight <[email protected]> wrote:
> >
> > From: Yi Sun
> >> Sent: 23 July 2022 09:38
> >>
> >> Calculate the latency of instructions xsave and xrstor with new trace
> >> points x86_fpu_latency_xsave and x86_fpu_latency_xrstor.
> >>
> >> The delta TSC can be calculated within a single trace event. Another
> >> option considered was to have 2 separated trace events marking the
> >> start and finish of the xsave/xrstor instructions. The delta TSC was
> >> calculated from the 2 trace points in user space, but there was
> >> significant overhead added by the trace function itself.
> >>
> >> In internal testing, the single trace point option which is
> >> implemented here proved to be more accurate.
> > ...
> >
> > I've done some experiments that measure short instruction latencies.
> > Basically I found:
> > 1) You need a suitable serialising instruction before and after
> > the code being tested - otherwise it can overlap whatever
> > you are using for timing.
> > 2) The only reliable counter is the performance monitor clock
> > counter - everything else depends on the current cpu frequency.
> > On intel cpu the cpu frequency can change all the time.
> > Allowing for that, and then ignoring complete outliers, I could
> > get clock-count accurate values for iterations of the IP csum loop.
> >
> > David
> >
> > -
> > Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
> > Registration No: 1397386 (Wales)
> >

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)

2022-07-25 18:09:53

by Dave Hansen

[permalink] [raw]
Subject: Re: [PATCH 1/2] x86/fpu: Measure the Latency of XSAVE and XRSTOR

On 7/24/22 13:54, David Laight wrote:
> I've done some experiments that measure short instruction latencies.
> Basically I found:

Short? The instructions in question can write up to about 12k of data.
That's not "short" by any means.

I'm also not sure precision here is all that important. The main things
we want to know here when and where the init and modified optimizations
are coming into play. In other words, how often is there actual data
that *needs* to be saved and restored and can't be optimized away.

So, sure, if we were measuring a dozen cycles here, you could make an
argument that this _might_ be problematic.

But, in this case, we really just want to be able to tell when
XSAVE/XRSTOR are getting more or less expensive and also get out a
minimal amount of data (RFBM/XINUSE) to make a guess why that might be.

Is it *REALLY* worth throwing serializing instructions in and moving
clock sources to do that? Is the added precision worth it?

2022-07-26 07:47:34

by David Laight

[permalink] [raw]
Subject: RE: [PATCH 1/2] x86/fpu: Measure the Latency of XSAVE and XRSTOR

From: Dave Hansen
> Sent: 25 July 2022 18:44
>
> On 7/24/22 13:54, David Laight wrote:
> > I've done some experiments that measure short instruction latencies.
> > Basically I found:
>
> Short? The instructions in question can write up to about 12k of data.
> That's not "short" by any means.
>
> I'm also not sure precision here is all that important. The main things
> we want to know here when and where the init and modified optimizations
> are coming into play. In other words, how often is there actual data
> that *needs* to be saved and restored and can't be optimized away.
>
> So, sure, if we were measuring a dozen cycles here, you could make an
> argument that this _might_ be problematic.
>
> But, in this case, we really just want to be able to tell when
> XSAVE/XRSTOR are getting more or less expensive and also get out a
> minimal amount of data (RFBM/XINUSE) to make a guess why that might be.
>
> Is it *REALLY* worth throwing serializing instructions in and moving
> clock sources to do that? Is the added precision worth it?

I suspect that if you run your test in a loop the cpu will
be running at 800MHz for the first iteration but will soon
be running at 3GHz.
That is a 4-fold change in execution time if you use the TSC.

IIRC RDTSC is a serialising instruction (of some form), XSAVE
and XRSTOR may also be - so you probably are waiting for the
instruction to finish.
But you do actually need to ensure that is happening.

David

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)

2022-07-26 09:22:09

by Yi Sun

[permalink] [raw]
Subject: Re: [PATCH 1/2] x86/fpu: Measure the Latency of XSAVE and XRSTOR

On 24.07.2022 20:54, David Laight wrote:
>From: Yi Sun
>> Sent: 23 July 2022 09:38
>>
>> Calculate the latency of instructions xsave and xrstor with new trace
>> points x86_fpu_latency_xsave and x86_fpu_latency_xrstor.
>>
>> The delta TSC can be calculated within a single trace event. Another
>> option considered was to have 2 separated trace events marking the
>> start and finish of the xsave/xrstor instructions. The delta TSC was
>> calculated from the 2 trace points in user space, but there was
>> significant overhead added by the trace function itself.
>>
>> In internal testing, the single trace point option which is
>> implemented here proved to be more accurate.
>...
>
>I've done some experiments that measure short instruction latencies.
>Basically I found:
>1) You need a suitable serialising instruction before and after
> the code being tested - otherwise it can overlap whatever
> you are using for timing.

The original comments here are kindly not so exact. The patch actually
makes use of trace_clock to calculate the latency but not TSC directly.
The precision here is "at most ~1 jiffy between CPUs" which is probably
acceptable to this usage.
I would like refine the comments if it made confusing.

Thanks
--Sun, Yi

2022-07-28 19:07:28

by Sohil Mehta

[permalink] [raw]
Subject: Re: [PATCH 1/2] x86/fpu: Measure the Latency of XSAVE and XRSTOR

Some nits below:

On 7/23/2022 1:37 AM, Yi Sun wrote:
> Calculate the latency of instructions xsave and xrstor with new trace
> points x86_fpu_latency_xsave and x86_fpu_latency_xrstor.
>
> The delta TSC can be calculated within a single trace event.

s/can be/is

Another
> option considered was to have 2 separated trace events marking the
> start and finish of the xsave/xrstor instructions. The delta TSC was
> calculated from the 2 trace points in user space, but there was
> significant overhead added by the trace function itself.
>

...

>
> To enable it, CONFIG_X86_DEBUG_FPU and CONFIG_TRACEPOINTS are required.
> The compiler can get rid of all the extra crust when either of the two

s/can/will


> configs is disabled.
>

> If both of the two configs are enabled, xsave/xrstor_tracing_enabled

s/two/

> would be reduced to a static check for tracing enabled. Thus, in the
> fast path there would be only 2 additional static checks.

...

> Leave the noise here instead of additional
> conditions before calling the x86_fpu_latency_* because that makes the
> latency more accurate and it's easy to filer the noise out by the

s/filer/filter

> following consuming script.

...

>
> +DECLARE_EVENT_CLASS(x86_fpu_latency,
> + TP_PROTO(struct fpstate *fpstate, u64 dtsc),

On x86, though TSC is the underlying counter for trace_clock(), it might
be useful to keep this generic.

s/dtsc/latency

"latency" would match the printk below as well.

> + TP_ARGS(fpstate, dtsc),
> +
> + TP_STRUCT__entry(
> + __field(struct fpstate *, fpstate)
> + __field(u64, dtsc)
> + __field(u64, rfbm)
> + __field(u64, xinuse)
> + ),
> +
> + TP_fast_assign(
> + __entry->fpstate = fpstate;
> + __entry->dtsc = dtsc;
> + __entry->rfbm = fpstate->xfeatures;
> + __entry->xinuse = fpstate->regs.xsave.header.xfeatures;
> + ),
> +
> + TP_printk("x86/fpu: latency:%lld RFBM:0x%llx XINUSE:0x%llx",
> + __entry->dtsc,
> + __entry->rfbm,
> + __entry->xinuse
> + )
> +);
> +

-Sohil