[PATCH v5 1/5] perf evsel: Improve falling back from cycles

From: Ian Rogers

Date: Tue Mar 17 2026 - 01:54:35 EST


Switch to using evsel__match rather than comparing perf_event_attr
values, this is robust on hybrid architectures.
Ensure evsel->pmu matches the evsel->core.attr.
Remove exclude bits that get set in other fallback attempts when
switching the event.
Log the event name with modifiers when switching the event on fallback.

Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
---
tools/perf/util/evsel.c | 45 ++++++++++++++++++++++++++++-------------
tools/perf/util/evsel.h | 2 ++
2 files changed, 33 insertions(+), 14 deletions(-)

diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f59228c1a39e..bd14d9bbc91f 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -3785,25 +3785,42 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
{
int paranoid;

- if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
- evsel->core.attr.type == PERF_TYPE_HARDWARE &&
- evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+ if ((err == ENODEV || err == ENOENT || err == ENXIO) &&
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
/*
- * If it's cycles then fall back to hrtimer based cpu-clock sw
- * counter, which is always available even if no PMU support.
- *
- * PPC returns ENXIO until 2.6.37 (behavior changed with commit
- * b0a873e).
+ * If it's the legacy hardware cycles event fails then fall back
+ * to hrtimer based cpu-clock sw counter, which is always
+ * available even if no PMU support. PPC returned ENXIO rather
+ * than ENODEV or ENOENT until 2.6.37.
*/
- evsel->core.attr.type = PERF_TYPE_SOFTWARE;
+ evsel->pmu = perf_pmus__find_by_type(PERF_TYPE_SOFTWARE);
+ assert(evsel->pmu); /* software is a "well-known" and can't fail PMU type. */
+
+ /* Configure the event. */
+ evsel->core.attr.type = PERF_TYPE_SOFTWARE;
evsel->core.attr.config = target__has_cpu(target)
? PERF_COUNT_SW_CPU_CLOCK
: PERF_COUNT_SW_TASK_CLOCK;
- scnprintf(msg, msgsize,
- "The cycles event is not supported, trying to fall back to %s",
- target__has_cpu(target) ? "cpu-clock" : "task-clock");
+ evsel->core.is_pmu_core = false;
+
+ /* Remove excludes for new event. */
+ if (evsel->fallenback_eacces) {
+ evsel->core.attr.exclude_kernel = 0;
+ evsel->core.attr.exclude_hv = 0;
+ evsel->fallenback_eacces = false;
+ }
+ if (evsel->fallenback_eopnotsupp) {
+ evsel->core.attr.exclude_guest = 0;
+ evsel->fallenback_eopnotsupp = false;
+ }

+ /* Name is recomputed by evsel__name. */
zfree(&evsel->name);
+
+ /* Log message. */
+ scnprintf(msg, msgsize,
+ "The cycles event is not supported, trying to fall back to %s",
+ evsel__name(evsel));
return true;
} else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
(paranoid = perf_event_paranoid()) > 1) {
@@ -3830,7 +3847,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
" samples", paranoid);
evsel->core.attr.exclude_kernel = 1;
evsel->core.attr.exclude_hv = 1;
-
+ evsel->fallenback_eacces = true;
return true;
} else if (err == EOPNOTSUPP && !evsel->core.attr.exclude_guest &&
!evsel->exclude_GH) {
@@ -3851,7 +3868,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
/* Apple M1 requires exclude_guest */
scnprintf(msg, msgsize, "Trying to fall back to excluding guest samples");
evsel->core.attr.exclude_guest = 1;
-
+ evsel->fallenback_eopnotsupp = true;
return true;
}
no_fallback:
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index a3d754c029a0..97f57fab28ce 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -124,6 +124,8 @@ struct evsel {
bool default_metricgroup; /* A member of the Default metricgroup */
bool default_show_events; /* If a default group member, show the event */
bool needs_uniquify;
+ bool fallenback_eacces;
+ bool fallenback_eopnotsupp;
struct hashmap *per_pkg_mask;
int err;
int script_output_type;
--
2.53.0.851.ga537e3e6e9-goog