2023-04-21 18:55:27

by Liang, Kan

[permalink] [raw]
Subject: [PATCH V4 1/2] perf/x86/intel/ds: Flush the PEBS buffer in PEBS enable

From: Kan Liang <[email protected]>

Several similar kernel warnings can be triggered,

[56605.607840] CPU0 PEBS record size 0, expected 32, config 0
cpuc->record_size=208

when the below commands are running in parallel for a while on SPR.

while true; do perf record --no-buildid -a --intr-regs=AX -e
cpu/event=0xd0,umask=0x81/pp -c 10003 -o /dev/null ./triad; done &

while true; do perf record -o /tmp/out -W -d -e
'{ld_blocks.store_forward:period=1000000,
MEM_TRANS_RETIRED.LOAD_LATENCY:u:precise=2:ldlat=4}'
-c 1037 ./triad; done
*The triad program is just the generation of loads/stores.

The warnings are triggered when an unexpected PEBS record (with a
different config and size) is found.

A system-wide PEBS event with the large PEBS config may be enabled
during a context switch. Some PEBS records for the system-wide PEBS may
be generated while the old task is sched out but the new one hasn't been
sched in yet. When the new task is sched in, the cpuc->pebs_record_size
may be updated for the per-task PEBS events. So the existing system-wide
PEBS records have a different size from the later PEBS records.

The PEBS buffer should be flushed right before the hardware is
reprogrammed. The new size and threshold should be updated after the old
buffer has been flushed.

Reported-by: Stephane Eranian <[email protected]>
Suggested-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Kan Liang <[email protected]>
---

Changes since V3:
- update comments

arch/x86/events/intel/ds.c | 41 +++++++++++++++++++++++++++-----------
1 file changed, 29 insertions(+), 12 deletions(-)

diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index a2e566e53076..94043232991c 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1252,22 +1252,26 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
if (x86_pmu.intel_cap.pebs_baseline && add) {
u64 pebs_data_cfg;

- /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
- if (cpuc->n_pebs == 1) {
+ /* Clear pebs_data_cfg for first PEBS. */
+ if (cpuc->n_pebs == 1)
cpuc->pebs_data_cfg = 0;
- cpuc->pebs_record_size = sizeof(struct pebs_basic);
- }

pebs_data_cfg = pebs_update_adaptive_cfg(event);

- /* Update pebs_record_size if new event requires more data. */
- if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
+ /*
+ * Only update the pebs_data_cfg here. The pebs_record_size
+ * will be updated later when the new pebs_data_cfg takes effect.
+ */
+ if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
cpuc->pebs_data_cfg |= pebs_data_cfg;
- adaptive_pebs_record_size_update();
- update = true;
- }
}

+ /*
+ * For the adaptive PEBS, the threshold will be updated later
+ * when the new pebs_data_cfg takes effect.
+ * The threshold may not be accurate before that, but that
+ * does not hurt.
+ */
if (update)
pebs_update_threshold(cpuc);
}
@@ -1326,6 +1330,13 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
wrmsrl(base + idx, value);
}

+static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
+{
+ if (cpuc->n_pebs == cpuc->n_large_pebs &&
+ cpuc->n_pebs != cpuc->n_pebs_via_pt)
+ intel_pmu_drain_pebs_buffer();
+}
+
void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1345,6 +1356,14 @@ void intel_pmu_pebs_enable(struct perf_event *event)
if (x86_pmu.intel_cap.pebs_baseline) {
hwc->config |= ICL_EVENTSEL_ADAPTIVE;
if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
+ /*
+ * drain_pebs() assumes uniform record size;
+ * hence we need to drain when changing said
+ * size.
+ */
+ intel_pmu_drain_large_pebs(cpuc);
+ adaptive_pebs_record_size_update();
+ pebs_update_threshold(cpuc);
wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
}
@@ -1391,9 +1410,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;

- if (cpuc->n_pebs == cpuc->n_large_pebs &&
- cpuc->n_pebs != cpuc->n_pebs_via_pt)
- intel_pmu_drain_pebs_buffer();
+ intel_pmu_drain_large_pebs(cpuc);

cpuc->pebs_enabled &= ~(1ULL << hwc->idx);

--
2.35.1


2023-04-21 18:56:12

by Liang, Kan

[permalink] [raw]
Subject: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update

From: Kan Liang <[email protected]>

The update of the pebs_record_size has been delayed to the place right
before the new pebs_data_cfg takes effect for the adaptive PEBS. But the
update of the DS threshold is still in the event_add stage. The
threshold is calculated from the pebs_record_size. So it may contain
inaccurate data. The data will be corrected in the event_enable stage.
So there is no real harm. But the logic is quite a mess and hard to
follow.

Move the threshold update to the event_enable stage where all the
configures have been settled down.

Steal the highest bit of cpuc->pebs_data_cfg to track whether the
threshold update is required. Just need to update the threshold once.

It's possible that the first event is eligible for the large PEBS,
while the second event is not. The current perf implementation may
update the threshold twice in the event_add stage. This patch could
also improve such kind of cases by avoiding the extra update.

No functional change.

Signed-off-by: Kan Liang <[email protected]>
---

This is a cleanup patch to address the comment.
https://lore.kernel.org/lkml/[email protected]/
It doesn't fix any real issues. It just tries to make the logic clear and
consistent.

arch/x86/events/intel/ds.c | 34 ++++++++++++-------------------
arch/x86/include/asm/perf_event.h | 8 ++++++++
2 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 94043232991c..554a58318787 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1229,12 +1229,14 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct perf_event *event, bool add)
{
struct pmu *pmu = event->pmu;
+
/*
* Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but
* that does not hurt:
*/
- bool update = cpuc->n_pebs == 1;
+ if (cpuc->n_pebs == 1)
+ cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;

if (needed_cb != pebs_needs_sched_cb(cpuc)) {
if (!needed_cb)
@@ -1242,7 +1244,7 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
else
perf_sched_cb_dec(pmu);

- update = true;
+ cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
}

/*
@@ -1252,28 +1254,15 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
if (x86_pmu.intel_cap.pebs_baseline && add) {
u64 pebs_data_cfg;

- /* Clear pebs_data_cfg for first PEBS. */
- if (cpuc->n_pebs == 1)
- cpuc->pebs_data_cfg = 0;
-
pebs_data_cfg = pebs_update_adaptive_cfg(event);

/*
* Only update the pebs_data_cfg here. The pebs_record_size
* will be updated later when the new pebs_data_cfg takes effect.
*/
- if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
- cpuc->pebs_data_cfg |= pebs_data_cfg;
+ if (pebs_data_cfg & ~get_pebs_datacfg_hw(cpuc->pebs_data_cfg))
+ cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
}
-
- /*
- * For the adaptive PEBS, the threshold will be updated later
- * when the new pebs_data_cfg takes effect.
- * The threshold may not be accurate before that, but that
- * does not hurt.
- */
- if (update)
- pebs_update_threshold(cpuc);
}

void intel_pmu_pebs_add(struct perf_event *event)
@@ -1355,7 +1344,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)

if (x86_pmu.intel_cap.pebs_baseline) {
hwc->config |= ICL_EVENTSEL_ADAPTIVE;
- if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
+ if (get_pebs_datacfg_hw(cpuc->pebs_data_cfg) != cpuc->active_pebs_data_cfg) {
/*
* drain_pebs() assumes uniform record size;
* hence we need to drain when changing said
@@ -1363,11 +1352,14 @@ void intel_pmu_pebs_enable(struct perf_event *event)
*/
intel_pmu_drain_large_pebs(cpuc);
adaptive_pebs_record_size_update();
- pebs_update_threshold(cpuc);
- wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
- cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
+ wrmsrl(MSR_PEBS_DATA_CFG, get_pebs_datacfg_hw(cpuc->pebs_data_cfg));
+ cpuc->active_pebs_data_cfg = get_pebs_datacfg_hw(cpuc->pebs_data_cfg);
}
}
+ if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
+ cpuc->pebs_data_cfg &= ~PEBS_UPDATE_DS_SW;
+ pebs_update_threshold(cpuc);
+ }

if (idx >= INTEL_PMC_IDX_FIXED) {
if (x86_pmu.intel_cap.pebs_format < 5)
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8fc15ed5e60b..259a2a8afe2b 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -121,6 +121,14 @@
#define PEBS_DATACFG_LBRS BIT_ULL(3)
#define PEBS_DATACFG_LBR_SHIFT 24

+/* Steal the highest bit of pebs_data_cfg for SW usage */
+#define PEBS_UPDATE_DS_SW BIT_ULL(63)
+
+static inline u64 get_pebs_datacfg_hw(u64 config)
+{
+ return config & ~PEBS_UPDATE_DS_SW;
+}
+
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:
--
2.35.1

2023-04-25 07:27:44

by Yujie Liu

[permalink] [raw]
Subject: Re: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update

Hello,

kernel test robot noticed "Kernel_panic-not_syncing:Timeout:Not_all_CPUs_entered_broadcast_exception_handler" on:

commit: a17c97370d1fb9b2eac75c85136a1f70ec44eded ("[PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update")
url: https://github.com/intel-lab-lkp/linux/commits/kan-liang-linux-intel-com/perf-x86-intel-ds-Delay-the-threshold-update/20230422-024743
base: https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git 15def34e2635ab7e0e96f1bc32e1b69609f14942
patch link: https://lore.kernel.org/all/[email protected]/
patch subject: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update

in testcase: kvm-unit-tests-qemu

compiler: gcc-11
test machine: 224 threads 2 sockets Intel(R) Xeon(R) Platinum 8480+ (Sapphire Rapids) with 256G memory

(please refer to attached dmesg/kmsg for entire log/backtrace)


If you fix the issue, kindly add following tag
| Reported-by: kernel test robot <[email protected]>
| Link: https://lore.kernel.org/oe-lkp/[email protected]


[ 179.997768][ T2324] 2023-04-23 15:30:10 ./run_tests.sh
...
[ 180.442663][T20263] x86/split lock detection: #AC: qemu-system-x86/20263 took a split_lock trap at address: 0x1e3
[ 182.372875][ T2324]
[ 182.845057][T20473] x86/split lock detection: #AC: qemu-system-x86/20473 took a split_lock trap at address: 0x1e3
[ 184.109963][ T2324]
[ 184.594015][T20686] x86/split lock detection: #AC: qemu-system-x86/20686 took a split_lock trap at address: 0x1e3
[ 186.113512][ T2324]
[ 186.595753][T20897] x86/split lock detection: #AC: qemu-system-x86/20897 took a split_lock trap at address: 0x1e3
[ 188.771195][ T2324]
[ 189.257277][T21112] x86/split lock detection: #AC: qemu-system-x86/21112 took a split_lock trap at address: 0x1e3
[ 190.649532][ T2324]
[ 191.142833][T21320] x86/split lock detection: #AC: qemu-system-x86/21320 took a split_lock trap at address: 0x1e3
[ 204.181302][ T2324]
[ 204.192731][ T2324]
[ 204.677795][T21577] x86/split lock detection: #AC: qemu-system-x86/21577 took a split_lock trap at address: 0x1e3
[ 208.521357][ T2324]
[ 209.002903][T21799] x86/split lock detection: #AC: qemu-system-x86/21799 took a split_lock trap at address: 0x1e3
[ 209.092837][ T2324]
[ 209.565251][T22006] x86/split lock detection: #AC: qemu-system-x86/22006 took a split_lock trap at address: 0x1e3
[ 209.654034][ T2324]
[ 210.133408][T22210] x86/split lock detection: #AC: qemu-system-x86/22210 took a split_lock trap at address: 0x1e3
[ 210.200824][ T2324]
[ 210.668239][T22414] x86/split lock detection: #AC: qemu-system-x86/22414 took a split_lock trap at address: 0x1e3
[ 211.617069][ T2324]
[ 212.098716][T22618] x86/split lock detection: #AC: qemu-system-x86/22618 took a split_lock trap at address: 0x1e3
[ 212.139734][T22618] kvm: emulating exchange as write
[ 212.236036][ T2324]
[ 212.721375][T22825] x86/split lock detection: #AC: qemu-system-x86/22825 took a split_lock trap at address: 0x1e3
[ 212.815919][ T2324]
[ 213.308442][T23032] x86/split lock detection: #AC: qemu-system-x86/23032 took a split_lock trap at address: 0x1e3
[ 213.390697][ T2324]
[ 213.864128][T23236] x86/split lock detection: #AC: qemu-system-x86/23236 took a split_lock trap at address: 0x1e3
[ 213.937439][ T2324]
[ 214.410905][T23443] x86/split lock detection: #AC: qemu-system-x86/23443 took a split_lock trap at address: 0x1e3
[ 214.491760][ T2324]
[ 214.958649][T23647] x86/split lock detection: #AC: qemu-system-x86/23647 took a split_lock trap at address: 0x1e3
[ 216.032131][ T2324]
[ 216.520146][T23858] x86/split lock detection: #AC: qemu-system-x86/23858 took a split_lock trap at address: 0x1e3
[ 218.172563][ T2324]
[ 218.664754][T24072] x86/split lock detection: #AC: qemu-system-x86/24072 took a split_lock trap at address: 0x1e3
[ 218.739564][ T2324]
[ 219.241807][T24274] x86/split lock detection: #AC: qemu-system-x86/24274 took a split_lock trap at address: 0x1e3
[ 224.064675][ C84] mce: CPUs not responding to MCE broadcast (may include false positives): 0-83,85-223
[ 224.064681][ C84] Kernel panic - not syncing: Timeout: Not all CPUs entered broadcast exception handler
[ 225.089881][ C84] Shutting down cpus with NMI
[ 225.129381][ C84] Kernel Offset: disabled
[ 234.178388][ C84] pstore: backend (erst) writing error (-28)
input_data: 0x000000407e8bd3a8
input_len: 0x0000000001535d82
output: 0x000000407ac00000
output_len: 0x00000000050619f8
kernel_total_size: 0x0000000005228000
needed_size: 0x0000000005400000
trampoline_32bit: 0x000000000009d000


--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests


Attachments:
(No filename) (4.64 kB)
config-6.3.0-rc3-00006-ga17c97370d1f (159.21 kB)
job-script (5.74 kB)
dmesg.xz (89.49 kB)
kvm-unit-tests-qemu (511.07 kB)
job.yaml (4.90 kB)
Download all attachments

2023-04-25 11:34:09

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update

On Tue, Apr 25, 2023 at 03:16:29PM +0800, kernel test robot wrote:
> Hello,
>
> kernel test robot noticed "Kernel_panic-not_syncing:Timeout:Not_all_CPUs_entered_broadcast_exception_handler" on:
>
> commit: a17c97370d1fb9b2eac75c85136a1f70ec44eded ("[PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update")
> url: https://github.com/intel-lab-lkp/linux/commits/kan-liang-linux-intel-com/perf-x86-intel-ds-Delay-the-threshold-update/20230422-024743
> base: https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git 15def34e2635ab7e0e96f1bc32e1b69609f14942
> patch link: https://lore.kernel.org/all/[email protected]/
> patch subject: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update
>

> [ 224.064675][ C84] mce: CPUs not responding to MCE broadcast (may include false positives): 0-83,85-223
> [ 224.064681][ C84] Kernel panic - not syncing: Timeout: Not all CPUs entered broadcast exception handler
> [ 225.089881][ C84] Shutting down cpus with NMI
> [ 225.129381][ C84] Kernel Offset: disabled

That seems very unrelated to the patch at hand; was this bisect double
checked?

2023-04-26 13:21:32

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH V4 1/2] perf/x86/intel/ds: Flush the PEBS buffer in PEBS enable

On Fri, Apr 21, 2023 at 11:45:28AM -0700, [email protected] wrote:
> From: Kan Liang <[email protected]>
>
> Several similar kernel warnings can be triggered,
>
> [56605.607840] CPU0 PEBS record size 0, expected 32, config 0
> cpuc->record_size=208
>
> when the below commands are running in parallel for a while on SPR.
>
> while true; do perf record --no-buildid -a --intr-regs=AX -e
> cpu/event=0xd0,umask=0x81/pp -c 10003 -o /dev/null ./triad; done &
>
> while true; do perf record -o /tmp/out -W -d -e
> '{ld_blocks.store_forward:period=1000000,
> MEM_TRANS_RETIRED.LOAD_LATENCY:u:precise=2:ldlat=4}'
> -c 1037 ./triad; done
> *The triad program is just the generation of loads/stores.
>
> The warnings are triggered when an unexpected PEBS record (with a
> different config and size) is found.
>
> A system-wide PEBS event with the large PEBS config may be enabled
> during a context switch. Some PEBS records for the system-wide PEBS may
> be generated while the old task is sched out but the new one hasn't been
> sched in yet. When the new task is sched in, the cpuc->pebs_record_size
> may be updated for the per-task PEBS events. So the existing system-wide
> PEBS records have a different size from the later PEBS records.
>
> The PEBS buffer should be flushed right before the hardware is
> reprogrammed. The new size and threshold should be updated after the old
> buffer has been flushed.
>
> Reported-by: Stephane Eranian <[email protected]>
> Suggested-by: Peter Zijlstra (Intel) <[email protected]>
> Signed-off-by: Kan Liang <[email protected]>
> ---

So I find it much easier to read the whole thing when collapsed.
Something like the below; that ok with you?

---
arch/x86/events/intel/ds.c | 56 ++++++++++++++++++++++-----------------
arch/x86/include/asm/perf_event.h | 3 +++
2 files changed, 35 insertions(+), 24 deletions(-)

diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index a2e566e53076..df88576d6b2a 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1229,12 +1229,14 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct perf_event *event, bool add)
{
struct pmu *pmu = event->pmu;
+
/*
* Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but
* that does not hurt:
*/
- bool update = cpuc->n_pebs == 1;
+ if (cpuc->n_pebs == 1)
+ cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;

if (needed_cb != pebs_needs_sched_cb(cpuc)) {
if (!needed_cb)
@@ -1242,7 +1244,7 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
else
perf_sched_cb_dec(pmu);

- update = true;
+ cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
}

/*
@@ -1252,24 +1254,13 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
if (x86_pmu.intel_cap.pebs_baseline && add) {
u64 pebs_data_cfg;

- /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
- if (cpuc->n_pebs == 1) {
- cpuc->pebs_data_cfg = 0;
- cpuc->pebs_record_size = sizeof(struct pebs_basic);
- }
-
pebs_data_cfg = pebs_update_adaptive_cfg(event);
-
- /* Update pebs_record_size if new event requires more data. */
- if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
- cpuc->pebs_data_cfg |= pebs_data_cfg;
- adaptive_pebs_record_size_update();
- update = true;
- }
+ /*
+ * Be sure to update the thresholds when we change the record.
+ */
+ if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
+ cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
}
-
- if (update)
- pebs_update_threshold(cpuc);
}

void intel_pmu_pebs_add(struct perf_event *event)
@@ -1326,9 +1317,17 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
wrmsrl(base + idx, value);
}

+static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
+{
+ if (cpuc->n_pebs == cpuc->n_large_pebs &&
+ cpuc->n_pebs != cpuc->n_pebs_via_pt)
+ intel_pmu_drain_pebs_buffer();
+}
+
void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
unsigned int idx = hwc->idx;
@@ -1344,11 +1343,22 @@ void intel_pmu_pebs_enable(struct perf_event *event)

if (x86_pmu.intel_cap.pebs_baseline) {
hwc->config |= ICL_EVENTSEL_ADAPTIVE;
- if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
- wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
- cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
+ if (pebs_data_cfg != cpuc->active_pebs_data_cfg) {
+ /*
+ * drain_pebs() assumes uniform record size;
+ * hence we need to drain when changing said
+ * size.
+ */
+ intel_pmu_drain_large_pebs(cpuc);
+ adaptive_pebs_record_size_update();
+ wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
+ cpuc->active_pebs_data_cfg = pebs_data_cfg;
}
}
+ if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
+ cpuc->pebs_data_cfg = pebs_data_cfg;
+ pebs_update_threshold(cpuc);
+ }

if (idx >= INTEL_PMC_IDX_FIXED) {
if (x86_pmu.intel_cap.pebs_format < 5)
@@ -1391,9 +1401,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;

- if (cpuc->n_pebs == cpuc->n_large_pebs &&
- cpuc->n_pebs != cpuc->n_pebs_via_pt)
- intel_pmu_drain_pebs_buffer();
+ intel_pmu_drain_large_pebs(cpuc);

cpuc->pebs_enabled &= ~(1ULL << hwc->idx);

diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8fc15ed5e60b..abf09882f58b 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -121,6 +121,9 @@
#define PEBS_DATACFG_LBRS BIT_ULL(3)
#define PEBS_DATACFG_LBR_SHIFT 24

+/* Steal the highest bit of pebs_data_cfg for SW usage */
+#define PEBS_UPDATE_DS_SW BIT_ULL(63)
+
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:

2023-04-26 14:35:42

by Liang, Kan

[permalink] [raw]
Subject: Re: [PATCH V4 1/2] perf/x86/intel/ds: Flush the PEBS buffer in PEBS enable



On 2023-04-26 9:18 a.m., Peter Zijlstra wrote:
> On Fri, Apr 21, 2023 at 11:45:28AM -0700, [email protected] wrote:
>> From: Kan Liang <[email protected]>
>>
>> Several similar kernel warnings can be triggered,
>>
>> [56605.607840] CPU0 PEBS record size 0, expected 32, config 0
>> cpuc->record_size=208
>>
>> when the below commands are running in parallel for a while on SPR.
>>
>> while true; do perf record --no-buildid -a --intr-regs=AX -e
>> cpu/event=0xd0,umask=0x81/pp -c 10003 -o /dev/null ./triad; done &
>>
>> while true; do perf record -o /tmp/out -W -d -e
>> '{ld_blocks.store_forward:period=1000000,
>> MEM_TRANS_RETIRED.LOAD_LATENCY:u:precise=2:ldlat=4}'
>> -c 1037 ./triad; done
>> *The triad program is just the generation of loads/stores.
>>
>> The warnings are triggered when an unexpected PEBS record (with a
>> different config and size) is found.
>>
>> A system-wide PEBS event with the large PEBS config may be enabled
>> during a context switch. Some PEBS records for the system-wide PEBS may
>> be generated while the old task is sched out but the new one hasn't been
>> sched in yet. When the new task is sched in, the cpuc->pebs_record_size
>> may be updated for the per-task PEBS events. So the existing system-wide
>> PEBS records have a different size from the later PEBS records.
>>
>> The PEBS buffer should be flushed right before the hardware is
>> reprogrammed. The new size and threshold should be updated after the old
>> buffer has been flushed.
>>
>> Reported-by: Stephane Eranian <[email protected]>
>> Suggested-by: Peter Zijlstra (Intel) <[email protected]>
>> Signed-off-by: Kan Liang <[email protected]>
>> ---
>
> So I find it much easier to read the whole thing when collapsed.
> Something like the below; that ok with you?

Yes, the changes look good to me.

Thanks,
Kan
>
> ---
> arch/x86/events/intel/ds.c | 56 ++++++++++++++++++++++-----------------
> arch/x86/include/asm/perf_event.h | 3 +++
> 2 files changed, 35 insertions(+), 24 deletions(-)
>
> diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
> index a2e566e53076..df88576d6b2a 100644
> --- a/arch/x86/events/intel/ds.c
> +++ b/arch/x86/events/intel/ds.c
> @@ -1229,12 +1229,14 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
> struct perf_event *event, bool add)
> {
> struct pmu *pmu = event->pmu;
> +
> /*
> * Make sure we get updated with the first PEBS
> * event. It will trigger also during removal, but
> * that does not hurt:
> */
> - bool update = cpuc->n_pebs == 1;
> + if (cpuc->n_pebs == 1)
> + cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
>
> if (needed_cb != pebs_needs_sched_cb(cpuc)) {
> if (!needed_cb)
> @@ -1242,7 +1244,7 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
> else
> perf_sched_cb_dec(pmu);
>
> - update = true;
> + cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
> }
>
> /*
> @@ -1252,24 +1254,13 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
> if (x86_pmu.intel_cap.pebs_baseline && add) {
> u64 pebs_data_cfg;
>
> - /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
> - if (cpuc->n_pebs == 1) {
> - cpuc->pebs_data_cfg = 0;
> - cpuc->pebs_record_size = sizeof(struct pebs_basic);
> - }
> -
> pebs_data_cfg = pebs_update_adaptive_cfg(event);
> -
> - /* Update pebs_record_size if new event requires more data. */
> - if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
> - cpuc->pebs_data_cfg |= pebs_data_cfg;
> - adaptive_pebs_record_size_update();
> - update = true;
> - }
> + /*
> + * Be sure to update the thresholds when we change the record.
> + */
> + if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
> + cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
> }
> -
> - if (update)
> - pebs_update_threshold(cpuc);
> }
>
> void intel_pmu_pebs_add(struct perf_event *event)
> @@ -1326,9 +1317,17 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
> wrmsrl(base + idx, value);
> }
>
> +static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
> +{
> + if (cpuc->n_pebs == cpuc->n_large_pebs &&
> + cpuc->n_pebs != cpuc->n_pebs_via_pt)
> + intel_pmu_drain_pebs_buffer();
> +}
> +
> void intel_pmu_pebs_enable(struct perf_event *event)
> {
> struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> + u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;
> struct hw_perf_event *hwc = &event->hw;
> struct debug_store *ds = cpuc->ds;
> unsigned int idx = hwc->idx;
> @@ -1344,11 +1343,22 @@ void intel_pmu_pebs_enable(struct perf_event *event)
>
> if (x86_pmu.intel_cap.pebs_baseline) {
> hwc->config |= ICL_EVENTSEL_ADAPTIVE;
> - if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
> - wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
> - cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
> + if (pebs_data_cfg != cpuc->active_pebs_data_cfg) {
> + /*
> + * drain_pebs() assumes uniform record size;
> + * hence we need to drain when changing said
> + * size.
> + */
> + intel_pmu_drain_large_pebs(cpuc);
> + adaptive_pebs_record_size_update();
> + wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
> + cpuc->active_pebs_data_cfg = pebs_data_cfg;
> }
> }
> + if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
> + cpuc->pebs_data_cfg = pebs_data_cfg;
> + pebs_update_threshold(cpuc);
> + }
>
> if (idx >= INTEL_PMC_IDX_FIXED) {
> if (x86_pmu.intel_cap.pebs_format < 5)
> @@ -1391,9 +1401,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
> struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> struct hw_perf_event *hwc = &event->hw;
>
> - if (cpuc->n_pebs == cpuc->n_large_pebs &&
> - cpuc->n_pebs != cpuc->n_pebs_via_pt)
> - intel_pmu_drain_pebs_buffer();
> + intel_pmu_drain_large_pebs(cpuc);
>
> cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
>
> diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
> index 8fc15ed5e60b..abf09882f58b 100644
> --- a/arch/x86/include/asm/perf_event.h
> +++ b/arch/x86/include/asm/perf_event.h
> @@ -121,6 +121,9 @@
> #define PEBS_DATACFG_LBRS BIT_ULL(3)
> #define PEBS_DATACFG_LBR_SHIFT 24
>
> +/* Steal the highest bit of pebs_data_cfg for SW usage */
> +#define PEBS_UPDATE_DS_SW BIT_ULL(63)
> +
> /*
> * Intel "Architectural Performance Monitoring" CPUID
> * detection/enumeration details:

2023-04-27 06:34:20

by Yujie Liu

[permalink] [raw]
Subject: Re: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update

Hi Peter,

On Tue, Apr 25, 2023 at 01:19:55PM +0200, Peter Zijlstra wrote:
> On Tue, Apr 25, 2023 at 03:16:29PM +0800, kernel test robot wrote:
> > Hello,
> >
> > kernel test robot noticed "Kernel_panic-not_syncing:Timeout:Not_all_CPUs_entered_broadcast_exception_handler" on:
> >
> > commit: a17c97370d1fb9b2eac75c85136a1f70ec44eded ("[PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update")
> > url: https://github.com/intel-lab-lkp/linux/commits/kan-liang-linux-intel-com/perf-x86-intel-ds-Delay-the-threshold-update/20230422-024743
> > base: https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git 15def34e2635ab7e0e96f1bc32e1b69609f14942
> > patch link: https://lore.kernel.org/all/[email protected]/
> > patch subject: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update
> >
>
> > [ 224.064675][ C84] mce: CPUs not responding to MCE broadcast (may include false positives): 0-83,85-223
> > [ 224.064681][ C84] Kernel panic - not syncing: Timeout: Not all CPUs entered broadcast exception handler
> > [ 225.089881][ C84] Shutting down cpus with NMI
> > [ 225.129381][ C84] Kernel Offset: disabled
>
> That seems very unrelated to the patch at hand; was this bisect double
> checked?

We rechecked this case and the bisect should be valid.

We also ran this test on different platforms. The issue can be
reproduced on Sapphire Rapids and Ice Lake. The same thing is they
have a similar "split lock detection" warning when the test started,
but with different dmesg after that. The issue cannot be reproduced
on Cascade Lake.

Another thing to note is that the patchset was applied on commit
15def34e2635 of tip tree (the head of perf/core branch then). Not sure
if this is the correct base to test it.

Please check the details as below:

== Sapphire Rapids ==

# tail of dmesg

[ 217.823350][T24250] x86/split lock detection: #AC: qemu-system-x86/24250 took a split_lock trap at address: 0x1e3
[ 222.633517][ C218] mce: CPUs not responding to MCE broadcast (may include false positives): 0-217,219-223
[ 222.633523][ C218] Kernel panic - not syncing: Timeout: Not all CPUs entered broadcast exception handler
[ 223.663277][ C218] Shutting down cpus with NMI
[ 223.704639][ C218] Kernel Offset: disabled
[ 232.753628][ C218] pstore: backend (erst) writing error (-28)

# result comparison

tbox_group/testcase/rootfs/kconfig/compiler:
lkp-spr-2sp1/kvm-unit-tests-qemu/debian-11.1-x86_64-20220510.cgz/x86_64-rhel-8.3-kvm/gcc-11

commit:
1283dc16325e3 ("perf/x86/intel/ds: Flush the PEBS buffer in PEBS enable")
a17c97370d1fb ("perf/x86/intel/ds: Delay the threshold update")

1283dc16325e3b9d a17c97370d1fb9b2eac75c85136
---------------- ---------------------------
fail:runs %reproduction fail:runs
| | |
:6 83% 5:6 dmesg.Kernel_panic-not_syncing:Timeout:Not_all_CPUs_entered_broadcast_exception_handler


== Ice Lake ==

# tail of dmesg

[ 234.463750][T23542] x86/split lock detection: #AC: qemu-system-x86/23542 took a split_lock trap at address: 0x1e3
[ 261.554115][ C48] watchdog: BUG: soft lockup - CPU#48 stuck for 26s! [kworker/48:1:933]
[ 261.563287][ C48] Modules linked in: kvm_intel kvm irqbypass btrfs blake2b_generic xor raid6_pq zstd_compress libcrc32c sd_mod t10_pi crc64_rocksoft_generic crc64_rocksoft crc64 sg intel_rapl_msr intel_rapl_common x86_pkg_temp_thermal intel_powerclamp coretemp crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel ipmi_ssif sha512_ssse3 rapl ast ahci drm_shmem_helper libahci drm_kms_helper intel_cstate acpi_ipmi mei_me ipmi_si syscopyarea ioatdma sysfillrect ipmi_devintf libata sysimgblt mei intel_uncore wmi dca joydev ipmi_msghandler acpi_pad acpi_power_meter drm fuse ip_tables [last unloaded: irqbypass]
[ 261.622458][ C48] CPU: 48 PID: 933 Comm: kworker/48:1 Not tainted 6.3.0-rc3-00006-ga17c97370d1f #1
[ 261.632557][ C48] Workqueue: events jump_label_update_timeout
[ 261.639444][ C48] RIP: 0010:smp_call_function_many_cond (kernel/smp.c:442 kernel/smp.c:987)
[ 261.646846][ C48] Code: 38 c8 7c 08 84 c9 0f 85 16 05 00 00 8b 45 08 a8 01 74 2e 48 89 f1 49 89 f7 48 c1 e9 03 41 83 e7 07 4c 01 f1 41 83 c7 03 f3 90 <0f> b6 01 41 38 c7 7c 08 84 c0 0f 85 20 04 00 00 8b 45 08 a8 01 75
All code
========
0: 38 c8 cmp %cl,%al
2: 7c 08 jl 0xc
4: 84 c9 test %cl,%cl
6: 0f 85 16 05 00 00 jne 0x522
c: 8b 45 08 mov 0x8(%rbp),%eax
f: a8 01 test $0x1,%al
11: 74 2e je 0x41
13: 48 89 f1 mov %rsi,%rcx
16: 49 89 f7 mov %rsi,%r15
19: 48 c1 e9 03 shr $0x3,%rcx
1d: 41 83 e7 07 and $0x7,%r15d
21: 4c 01 f1 add %r14,%rcx
24: 41 83 c7 03 add $0x3,%r15d
28: f3 90 pause
2a:* 0f b6 01 movzbl (%rcx),%eax <-- trapping instruction
2d: 41 38 c7 cmp %al,%r15b
30: 7c 08 jl 0x3a
32: 84 c0 test %al,%al
34: 0f 85 20 04 00 00 jne 0x45a
3a: 8b 45 08 mov 0x8(%rbp),%eax
3d: a8 01 test $0x1,%al
3f: 75 .byte 0x75

Code starting with the faulting instruction
===========================================
0: 0f b6 01 movzbl (%rcx),%eax
3: 41 38 c7 cmp %al,%r15b
6: 7c 08 jl 0x10
8: 84 c0 test %al,%al
a: 0f 85 20 04 00 00 jne 0x430
10: 8b 45 08 mov 0x8(%rbp),%eax
13: a8 01 test $0x1,%al
15: 75 .byte 0x75
[ 261.668227][ C48] RSP: 0018:ffa000000af07be8 EFLAGS: 00000202
[ 261.675122][ C48] RAX: 0000000000000011 RBX: ff1100202043aa40 RCX: ffe21c04041980ed
[ 261.683923][ C48] RDX: 0000000000000061 RSI: ff11002020cc0768 RDI: ffffffff841ecbc8
[ 261.692703][ C48] RBP: ff11002020cc0760 R08: 0000000000000000 R09: ff11001083a9b867
[ 261.701490][ C48] R10: ffe21c021075370c R11: 0000000000000001 R12: ffe21c0404087549
[ 261.710264][ C48] R13: ffe21c0404087548 R14: dffffc0000000000 R15: 0000000000000003
[ 261.719023][ C48] FS: 0000000000000000(0000) GS:ff11002020400000(0000) knlGS:0000000000000000
[ 261.728751][ C48] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 261.736129][ C48] CR2: 00007f41400060a8 CR3: 000000207e246002 CR4: 0000000000773ee0
[ 261.744887][ C48] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 261.753644][ C48] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[ 261.762385][ C48] PKRU: 55555554
[ 261.766683][ C48] Call Trace:
[ 261.770714][ C48] <TASK>
[ 261.774389][ C48] ? optimize_nops (arch/x86/kernel/alternative.c:1764)
[ 261.779882][ C48] on_each_cpu_cond_mask (arch/x86/include/asm/preempt.h:85 kernel/smp.c:1156)
[ 261.785707][ C48] text_poke_bp_batch (include/linux/smp.h:71 arch/x86/kernel/alternative.c:1770 arch/x86/kernel/alternative.c:1970)
[ 261.791446][ C48] ? do_sync_core (arch/x86/kernel/alternative.c:1940)
WARNING! Modules path isn't set, but is needed to parse this symbol
[ 261.796644][ C48] ? kvm_irq_delivery_to_apic+0x31c/0x890 kvm
[ 261.803629][ C48] ? __cond_resched (kernel/sched/core.c:8486)
[ 261.808993][ C48] ? mutex_lock (arch/x86/include/asm/atomic64_64.h:190 include/linux/atomic/atomic-long.h:443 include/linux/atomic/atomic-instrumented.h:1781 kernel/locking/mutex.c:171 kernel/locking/mutex.c:285)
[ 261.813986][ C48] ? __mutex_lock_slowpath (kernel/locking/mutex.c:282)
[ 261.819917][ C48] ? kernel_text_address (kernel/extable.c:119 kernel/extable.c:94)
[ 261.825681][ C48] text_poke_finish (arch/x86/kernel/alternative.c:2162 arch/x86/kernel/alternative.c:2158 arch/x86/kernel/alternative.c:2168)
[ 261.830991][ C48] arch_jump_label_transform_apply (arch/x86/kernel/jump_label.c:147)
[ 261.837598][ C48] __static_key_slow_dec_cpuslocked+0x3d/0x50
[ 261.844899][ C48] jump_label_update_timeout (kernel/jump_label.c:292)
[ 261.850966][ C48] process_one_work (kernel/workqueue.c:2395)
[ 261.856506][ C48] worker_thread (include/linux/list.h:292 kernel/workqueue.c:2538)
[ 261.861697][ C48] ? __kthread_parkme (arch/x86/include/asm/bitops.h:207 (discriminator 4) arch/x86/include/asm/bitops.h:239 (discriminator 4) include/asm-generic/bitops/instrumented-non-atomic.h:142 (discriminator 4) kernel/kthread.c:270 (discriminator 4))
[ 261.867219][ C48] ? schedule (arch/x86/include/asm/preempt.h:85 (discriminator 1) kernel/sched/core.c:6699 (discriminator 1))
[ 261.872116][ C48] ? process_one_work (kernel/workqueue.c:2480)
[ 261.877879][ C48] ? process_one_work (kernel/workqueue.c:2480)
[ 261.883622][ C48] kthread (kernel/kthread.c:376)
[ 261.888220][ C48] ? kthread_complete_and_exit (kernel/kthread.c:331)
[ 261.894381][ C48] ret_from_fork (arch/x86/entry/entry_64.S:314)
[ 261.899315][ C48] </TASK>
[ 261.902834][ C48] Kernel panic - not syncing: softlockup: hung tasks
[ 261.909991][ C48] CPU: 48 PID: 933 Comm: kworker/48:1 Tainted: G L 6.3.0-rc3-00006-ga17c97370d1f #1
[ 261.921226][ C48] Workqueue: events jump_label_update_timeout
[ 261.927764][ C48] Call Trace:
[ 261.931505][ C48] <IRQ>
[ 261.934778][ C48] dump_stack_lvl (lib/dump_stack.c:107 (discriminator 1))
[ 261.939688][ C48] panic (kernel/panic.c:340)
[ 261.943981][ C48] ? panic_smp_self_stop+0x70/0x70
[ 261.949482][ C48] ? add_taint (arch/x86/include/asm/bitops.h:60 include/asm-generic/bitops/instrumented-atomic.h:29 kernel/panic.c:539)
[ 261.954103][ C48] watchdog_timer_fn (kernel/watchdog.c:435)
[ 261.959397][ C48] ? lockup_detector_update_enable (kernel/watchdog.c:355)
[ 261.965721][ C48] __hrtimer_run_queues (kernel/time/hrtimer.c:1685 kernel/time/hrtimer.c:1749)
[ 261.971263][ C48] ? sched_clock_cpu (kernel/sched/clock.c:384)
[ 261.976454][ C48] ? enqueue_hrtimer (kernel/time/hrtimer.c:1719)
[ 261.981725][ C48] ? _raw_read_unlock_irqrestore (kernel/locking/spinlock.c:161)
[ 261.987864][ C48] ? ktime_get_update_offsets_now (kernel/time/timekeeping.c:292 (discriminator 3) kernel/time/timekeeping.c:388 (discriminator 3) kernel/time/timekeeping.c:2320 (discriminator 3))
[ 261.994181][ C48] hrtimer_interrupt (kernel/time/hrtimer.c:1814)
[ 261.999450][ C48] __sysvec_apic_timer_interrupt (arch/x86/include/asm/jump_label.h:27 include/linux/jump_label.h:207 arch/x86/include/asm/trace/irq_vectors.h:41 arch/x86/kernel/apic/apic.c:1114)
[ 262.005670][ C48] sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1107 (discriminator 14))
[ 262.011616][ C48] </IRQ>
[ 262.014869][ C48] <TASK>
[ 262.018126][ C48] asm_sysvec_apic_timer_interrupt (arch/x86/include/asm/idtentry.h:645)
[ 262.024418][ C48] RIP: 0010:smp_call_function_many_cond (kernel/smp.c:442 kernel/smp.c:987)
[ 262.031329][ C48] Code: 38 c8 7c 08 84 c9 0f 85 16 05 00 00 8b 45 08 a8 01 74 2e 48 89 f1 49 89 f7 48 c1 e9 03 41 83 e7 07 4c 01 f1 41 83 c7 03 f3 90 <0f> b6 01 41 38 c7 7c 08 84 c0 0f 85 20 04 00 00 8b 45 08 a8 01 75
All code
========
0: 38 c8 cmp %cl,%al
2: 7c 08 jl 0xc
4: 84 c9 test %cl,%cl
6: 0f 85 16 05 00 00 jne 0x522
c: 8b 45 08 mov 0x8(%rbp),%eax
f: a8 01 test $0x1,%al
11: 74 2e je 0x41
13: 48 89 f1 mov %rsi,%rcx
16: 49 89 f7 mov %rsi,%r15
19: 48 c1 e9 03 shr $0x3,%rcx
1d: 41 83 e7 07 and $0x7,%r15d
21: 4c 01 f1 add %r14,%rcx
24: 41 83 c7 03 add $0x3,%r15d
28: f3 90 pause
2a:* 0f b6 01 movzbl (%rcx),%eax <-- trapping instruction
2d: 41 38 c7 cmp %al,%r15b
30: 7c 08 jl 0x3a
32: 84 c0 test %al,%al
34: 0f 85 20 04 00 00 jne 0x45a
3a: 8b 45 08 mov 0x8(%rbp),%eax
3d: a8 01 test $0x1,%al
3f: 75 .byte 0x75

Code starting with the faulting instruction
===========================================
0: 0f b6 01 movzbl (%rcx),%eax
3: 41 38 c7 cmp %al,%r15b
6: 7c 08 jl 0x10
8: 84 c0 test %al,%al
a: 0f 85 20 04 00 00 jne 0x430
10: 8b 45 08 mov 0x8(%rbp),%eax
13: a8 01 test $0x1,%al
15: 75 .byte 0x75
[ 262.051777][ C48] RSP: 0018:ffa000000af07be8 EFLAGS: 00000202
[ 262.058210][ C48] RAX: 0000000000000011 RBX: ff1100202043aa40 RCX: ffe21c04041980ed
[ 262.066559][ C48] RDX: 0000000000000061 RSI: ff11002020cc0768 RDI: ffffffff841ecbc8
[ 262.074915][ C48] RBP: ff11002020cc0760 R08: 0000000000000000 R09: ff11001083a9b867
[ 262.083270][ C48] R10: ffe21c021075370c R11: 0000000000000001 R12: ffe21c0404087549
[ 262.091623][ C48] R13: ffe21c0404087548 R14: dffffc0000000000 R15: 0000000000000003
[ 262.099983][ C48] ? smp_call_function_many_cond (kernel/smp.c:983 (discriminator 1))
[ 262.106355][ C48] ? optimize_nops (arch/x86/kernel/alternative.c:1764)
[ 262.111513][ C48] on_each_cpu_cond_mask (arch/x86/include/asm/preempt.h:85 kernel/smp.c:1156)
[ 262.117014][ C48] text_poke_bp_batch (include/linux/smp.h:71 arch/x86/kernel/alternative.c:1770 arch/x86/kernel/alternative.c:1970)
[ 262.122431][ C48] ? do_sync_core (arch/x86/kernel/alternative.c:1940)
WARNING! Modules path isn't set, but is needed to parse this symbol
[ 262.127320][ C48] ? kvm_irq_delivery_to_apic+0x31c/0x890 kvm
[ 262.133997][ C48] ? __cond_resched (kernel/sched/core.c:8486)
[ 262.139055][ C48] ? mutex_lock (arch/x86/include/asm/atomic64_64.h:190 include/linux/atomic/atomic-long.h:443 include/linux/atomic/atomic-instrumented.h:1781 kernel/locking/mutex.c:171 kernel/locking/mutex.c:285)
[ 262.143768][ C48] ? __mutex_lock_slowpath (kernel/locking/mutex.c:282)
[ 262.149438][ C48] ? kernel_text_address (kernel/extable.c:119 kernel/extable.c:94)
[ 262.154935][ C48] text_poke_finish (arch/x86/kernel/alternative.c:2162 arch/x86/kernel/alternative.c:2158 arch/x86/kernel/alternative.c:2168)
[ 262.159994][ C48] arch_jump_label_transform_apply (arch/x86/kernel/jump_label.c:147)
[ 262.166369][ C48] __static_key_slow_dec_cpuslocked+0x3d/0x50
[ 262.173444][ C48] jump_label_update_timeout (kernel/jump_label.c:292)
[ 262.179304][ C48] process_one_work (kernel/workqueue.c:2395)
[ 262.184641][ C48] worker_thread (include/linux/list.h:292 kernel/workqueue.c:2538)
[ 262.189631][ C48] ? __kthread_parkme (arch/x86/include/asm/bitops.h:207 (discriminator 4) arch/x86/include/asm/bitops.h:239 (discriminator 4) include/asm-generic/bitops/instrumented-non-atomic.h:142 (discriminator 4) kernel/kthread.c:270 (discriminator 4))
[ 262.194965][ C48] ? schedule (arch/x86/include/asm/preempt.h:85 (discriminator 1) kernel/sched/core.c:6699 (discriminator 1))
[ 262.199676][ C48] ? process_one_work (kernel/workqueue.c:2480)
[ 262.205247][ C48] ? process_one_work (kernel/workqueue.c:2480)
[ 262.210810][ C48] kthread (kernel/kthread.c:376)
[ 262.215250][ C48] ? kthread_complete_and_exit (kernel/kthread.c:331)
[ 262.221254][ C48] ret_from_fork (arch/x86/entry/entry_64.S:314)
[ 262.226045][ C48] </TASK>

# result comparison

compiler/kconfig/rootfs/tbox_group/testcase:
gcc-11/x86_64-rhel-8.3-kvm/debian-11.1-x86_64-20220510.cgz/lkp-icl-2sp6/kvm-unit-tests-qemu

commit:
1283dc16325e3 ("perf/x86/intel/ds: Flush the PEBS buffer in PEBS enable")
a17c97370d1fb ("perf/x86/intel/ds: Delay the threshold update")

1283dc16325e3b9d a17c97370d1fb9b2eac75c85136
---------------- ---------------------------
fail:runs %reproduction fail:runs
| | |
:6 100% 6:6 dmesg.BUG:soft_lockup-CPU##stuck_for#s![kworker:#:#]
:6 83% 5:6 dmesg.Kernel_panic-not_syncing:softlockup:hung_tasks
:6 100% 6:6 dmesg.RIP:smp_call_function_many_cond


--
Best Regards,
Yujie

2023-05-04 09:14:53

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update

On Tue, Apr 25, 2023 at 03:16:29PM +0800, kernel test robot wrote:
> Hello,
>
> kernel test robot noticed "Kernel_panic-not_syncing:Timeout:Not_all_CPUs_entered_broadcast_exception_handler" on:
>
> commit: a17c97370d1fb9b2eac75c85136a1f70ec44eded ("[PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update")
> url: https://github.com/intel-lab-lkp/linux/commits/kan-liang-linux-intel-com/perf-x86-intel-ds-Delay-the-threshold-update/20230422-024743
> base: https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git 15def34e2635ab7e0e96f1bc32e1b69609f14942
> patch link: https://lore.kernel.org/all/[email protected]/
> patch subject: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update
>
> in testcase: kvm-unit-tests-qemu
>
> compiler: gcc-11
> test machine: 224 threads 2 sockets Intel(R) Xeon(R) Platinum 8480+ (Sapphire Rapids) with 256G memory
>
> (please refer to attached dmesg/kmsg for entire log/backtrace)
>
>
> If you fix the issue, kindly add following tag
> | Reported-by: kernel test robot <[email protected]>
> | Link: https://lore.kernel.org/oe-lkp/[email protected]
>
>
> [ 179.997768][ T2324] 2023-04-23 15:30:10 ./run_tests.sh
> ...
> [ 180.442663][T20263] x86/split lock detection: #AC: qemu-system-x86/20263 took a split_lock trap at address: 0x1e3
> [ 182.372875][ T2324]

I'm going to try and replicate this on my ADL which should have
split_lock crud on. How does one run this kvm-unit-test stuff, inside
kvm or on bare metal?

2023-05-04 10:39:44

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update

On Thu, May 04, 2023 at 11:11:23AM +0200, Peter Zijlstra wrote:
> On Tue, Apr 25, 2023 at 03:16:29PM +0800, kernel test robot wrote:
> > Hello,
> >
> > kernel test robot noticed "Kernel_panic-not_syncing:Timeout:Not_all_CPUs_entered_broadcast_exception_handler" on:
> >
> > commit: a17c97370d1fb9b2eac75c85136a1f70ec44eded ("[PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update")
> > url: https://github.com/intel-lab-lkp/linux/commits/kan-liang-linux-intel-com/perf-x86-intel-ds-Delay-the-threshold-update/20230422-024743
> > base: https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git 15def34e2635ab7e0e96f1bc32e1b69609f14942
> > patch link: https://lore.kernel.org/all/[email protected]/
> > patch subject: [PATCH V4 2/2] perf/x86/intel/ds: Delay the threshold update
> >
> > in testcase: kvm-unit-tests-qemu
> >
> > compiler: gcc-11
> > test machine: 224 threads 2 sockets Intel(R) Xeon(R) Platinum 8480+ (Sapphire Rapids) with 256G memory
> >
> > (please refer to attached dmesg/kmsg for entire log/backtrace)
> >
> >
> > If you fix the issue, kindly add following tag
> > | Reported-by: kernel test robot <[email protected]>
> > | Link: https://lore.kernel.org/oe-lkp/[email protected]
> >
> >
> > [ 179.997768][ T2324] 2023-04-23 15:30:10 ./run_tests.sh
> > ...
> > [ 180.442663][T20263] x86/split lock detection: #AC: qemu-system-x86/20263 took a split_lock trap at address: 0x1e3
> > [ 182.372875][ T2324]
>
> I'm going to try and replicate this on my ADL which should have
> split_lock crud on. How does one run this kvm-unit-test stuff, inside
> kvm or on bare metal?

Bare metal it is..

Anyway, on the ADL I can see the split lock thing with or without the
patches in question so thta can't be it.

2023-05-10 13:36:58

by tip-bot2 for Jacob Pan

[permalink] [raw]
Subject: [tip: perf/urgent] perf/x86/intel/ds: Flush PEBS DS when changing PEBS_DATA_CFG

The following commit has been merged into the perf/urgent branch of tip:

Commit-ID: b752ea0c28e3f7f0aaaad6abf84f735eebc37a60
Gitweb: https://git.kernel.org/tip/b752ea0c28e3f7f0aaaad6abf84f735eebc37a60
Author: Kan Liang <[email protected]>
AuthorDate: Fri, 21 Apr 2023 11:45:28 -07:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Mon, 08 May 2023 10:58:27 +02:00

perf/x86/intel/ds: Flush PEBS DS when changing PEBS_DATA_CFG

Several similar kernel warnings can be triggered,

[56605.607840] CPU0 PEBS record size 0, expected 32, config 0 cpuc->record_size=208

when the below commands are running in parallel for a while on SPR.

while true;
do
perf record --no-buildid -a --intr-regs=AX \
-e cpu/event=0xd0,umask=0x81/pp \
-c 10003 -o /dev/null ./triad;
done &

while true;
do
perf record -o /tmp/out -W -d \
-e '{ld_blocks.store_forward:period=1000000, \
MEM_TRANS_RETIRED.LOAD_LATENCY:u:precise=2:ldlat=4}' \
-c 1037 ./triad;
done

The triad program is just the generation of loads/stores.

The warnings are triggered when an unexpected PEBS record (with a
different config and size) is found.

A system-wide PEBS event with the large PEBS config may be enabled
during a context switch. Some PEBS records for the system-wide PEBS
may be generated while the old task is sched out but the new one
hasn't been sched in yet. When the new task is sched in, the
cpuc->pebs_record_size may be updated for the per-task PEBS events. So
the existing system-wide PEBS records have a different size from the
later PEBS records.

The PEBS buffer should be flushed right before the hardware is
reprogrammed. The new size and threshold should be updated after the
old buffer has been flushed.

Reported-by: Stephane Eranian <[email protected]>
Suggested-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Kan Liang <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
arch/x86/events/intel/ds.c | 56 +++++++++++++++++-------------
arch/x86/include/asm/perf_event.h | 3 ++-
2 files changed, 35 insertions(+), 24 deletions(-)

diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index a2e566e..df88576 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1229,12 +1229,14 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct perf_event *event, bool add)
{
struct pmu *pmu = event->pmu;
+
/*
* Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but
* that does not hurt:
*/
- bool update = cpuc->n_pebs == 1;
+ if (cpuc->n_pebs == 1)
+ cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;

if (needed_cb != pebs_needs_sched_cb(cpuc)) {
if (!needed_cb)
@@ -1242,7 +1244,7 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
else
perf_sched_cb_dec(pmu);

- update = true;
+ cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
}

/*
@@ -1252,24 +1254,13 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
if (x86_pmu.intel_cap.pebs_baseline && add) {
u64 pebs_data_cfg;

- /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
- if (cpuc->n_pebs == 1) {
- cpuc->pebs_data_cfg = 0;
- cpuc->pebs_record_size = sizeof(struct pebs_basic);
- }
-
pebs_data_cfg = pebs_update_adaptive_cfg(event);
-
- /* Update pebs_record_size if new event requires more data. */
- if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
- cpuc->pebs_data_cfg |= pebs_data_cfg;
- adaptive_pebs_record_size_update();
- update = true;
- }
+ /*
+ * Be sure to update the thresholds when we change the record.
+ */
+ if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
+ cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
}
-
- if (update)
- pebs_update_threshold(cpuc);
}

void intel_pmu_pebs_add(struct perf_event *event)
@@ -1326,9 +1317,17 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
wrmsrl(base + idx, value);
}

+static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
+{
+ if (cpuc->n_pebs == cpuc->n_large_pebs &&
+ cpuc->n_pebs != cpuc->n_pebs_via_pt)
+ intel_pmu_drain_pebs_buffer();
+}
+
void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
unsigned int idx = hwc->idx;
@@ -1344,11 +1343,22 @@ void intel_pmu_pebs_enable(struct perf_event *event)

if (x86_pmu.intel_cap.pebs_baseline) {
hwc->config |= ICL_EVENTSEL_ADAPTIVE;
- if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
- wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
- cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
+ if (pebs_data_cfg != cpuc->active_pebs_data_cfg) {
+ /*
+ * drain_pebs() assumes uniform record size;
+ * hence we need to drain when changing said
+ * size.
+ */
+ intel_pmu_drain_large_pebs(cpuc);
+ adaptive_pebs_record_size_update();
+ wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
+ cpuc->active_pebs_data_cfg = pebs_data_cfg;
}
}
+ if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
+ cpuc->pebs_data_cfg = pebs_data_cfg;
+ pebs_update_threshold(cpuc);
+ }

if (idx >= INTEL_PMC_IDX_FIXED) {
if (x86_pmu.intel_cap.pebs_format < 5)
@@ -1391,9 +1401,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;

- if (cpuc->n_pebs == cpuc->n_large_pebs &&
- cpuc->n_pebs != cpuc->n_pebs_via_pt)
- intel_pmu_drain_pebs_buffer();
+ intel_pmu_drain_large_pebs(cpuc);

cpuc->pebs_enabled &= ~(1ULL << hwc->idx);

diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8fc15ed..abf0988 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -121,6 +121,9 @@
#define PEBS_DATACFG_LBRS BIT_ULL(3)
#define PEBS_DATACFG_LBR_SHIFT 24

+/* Steal the highest bit of pebs_data_cfg for SW usage */
+#define PEBS_UPDATE_DS_SW BIT_ULL(63)
+
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details: