[Patch v7 24/24] perf/x86/intel: Add sanity check for PEBS fragment size

From: Dapeng Mi

Date: Mon Mar 23 2026 - 20:53:01 EST


Prevent potential infinite loops by adding a sanity check for the
corrupted PEBS fragment sizes which could happen in theory.

If a corrupted PEBS fragment is detected, the entire PEBS record
including the fragment and all subsequent records will be discarded.
This ensures the integrity of PEBS data and prevents infinite loops
in setup_arch_pebs_sample_data() again.

Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx>
---

V7: new patch.

arch/x86/events/intel/ds.c | 33 +++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)

diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 6e1c516122c0..4b0dd8379737 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -2819,7 +2819,7 @@ static void setup_arch_pebs_sample_data(struct perf_event *event,
}

/* Parse followed fragments if there are. */
- if (arch_pebs_record_continued(header)) {
+ if (arch_pebs_record_continued(header) && header->size) {
at = at + header->size;
goto again;
}
@@ -2948,13 +2948,17 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
struct pt_regs *iregs,
struct pt_regs *regs,
struct perf_sample_data *data,
- void *at,
- int count,
+ void *at, int count, bool corrupted,
setup_fn setup_sample)
{
struct hw_perf_event *hwc = &event->hw;

- setup_sample(event, iregs, at, data, regs);
+ /* Skip parsing corrupted PEBS record. */
+ if (corrupted)
+ perf_sample_data_init(data, 0, event->hw.last_period);
+ else
+ setup_sample(event, iregs, at, data, regs);
+
if (iregs == &dummy_iregs) {
/*
* The PEBS records may be drained in the non-overflow context,
@@ -3026,13 +3030,15 @@ __intel_pmu_pebs_events(struct perf_event *event,
iregs = &dummy_iregs;

while (cnt > 1) {
- __intel_pmu_pebs_event(event, iregs, regs, data, at, setup_sample);
+ __intel_pmu_pebs_event(event, iregs, regs, data,
+ at, setup_sample);
at += cpuc->pebs_record_size;
at = get_next_pebs_record_by_bit(at, top, bit);
cnt--;
}

- __intel_pmu_pebs_last_event(event, iregs, regs, data, at, count, setup_sample);
+ __intel_pmu_pebs_last_event(event, iregs, regs, data, at,
+ count, false, setup_sample);
}

static int intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
@@ -3247,7 +3253,8 @@ static __always_inline void
__intel_pmu_handle_last_pebs_record(struct pt_regs *iregs,
struct pt_regs *regs,
struct perf_sample_data *data,
- u64 mask, short *counts, void **last,
+ u64 mask, short *counts,
+ void **last, bool corrupted,
setup_fn setup_sample)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -3261,7 +3268,7 @@ __intel_pmu_handle_last_pebs_record(struct pt_regs *iregs,
event = cpuc->events[bit];

__intel_pmu_pebs_last_event(event, iregs, regs, data, last[bit],
- counts[bit], setup_sample);
+ counts[bit], corrupted, setup_sample);
}

}
@@ -3317,7 +3324,7 @@ static int intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_da
}

__intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, counts, last,
- setup_pebs_adaptive_sample_data);
+ false, setup_pebs_adaptive_sample_data);

return hweight64(events_bitmap);
}
@@ -3333,6 +3340,7 @@ static int intel_pmu_drain_arch_pebs(struct pt_regs *iregs,
struct pt_regs *regs = &perf_regs->regs;
void *base, *at, *top;
u64 events_bitmap = 0;
+ bool corrupted = false;
u64 mask;

rdmsrq(MSR_IA32_PEBS_INDEX, index.whole);
@@ -3388,6 +3396,10 @@ static int intel_pmu_drain_arch_pebs(struct pt_regs *iregs,
if (!header->size)
break;
at += header->size;
+ if (WARN_ON_ONCE(at >= top)) {
+ corrupted = true;
+ goto done;
+ }
header = at;
}

@@ -3395,8 +3407,9 @@ static int intel_pmu_drain_arch_pebs(struct pt_regs *iregs,
at += header->size;
}

+done:
__intel_pmu_handle_last_pebs_record(iregs, regs, data, mask,
- counts, last,
+ counts, last, corrupted,
setup_arch_pebs_sample_data);

return hweight64(events_bitmap);
--
2.34.1