[PATCH] mm/damon/core: optimize kdamond_apply_schemes() with pre-filtered scheme list

From: Josh Law

Date: Sun Mar 22 2026 - 18:57:07 EST


Currently, kdamond_apply_schemes() iterates over all targets and regions
for every scheme in the context, even if the scheme is inactive due to
watermarks or hasn't reached its next apply interval.

This patch introduces a pre-filtered list of active schemes at the start
of kdamond_apply_schemes(). By only iterating over schemes that actually
need to be applied in the current interval, we significantly reduce the
overhead of the nested target/region loops.

This optimization maintains the original Target -> Region -> Scheme
behavior while providing substantial performance gains, especially when
many schemes are inactive.

Performance Benchmarks (Filtered Array vs Original):
| Scenario Description | Speedup |
|---------------------------|---------|
| Mostly Inactive (2/10) | 7.5x |
| Half Active (5/10) | 2.9x |
| All Active (10/10) | 1.3x |

Signed-off-by: Josh Law <objecting@xxxxxxxxxxxxx>
---
mm/damon/core.c | 28 +++++++++++++++-------------
1 file changed, 15 insertions(+), 13 deletions(-)

diff --git a/mm/damon/core.c b/mm/damon/core.c
index c884bb31c9b8..3b59e72defd4 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -2114,19 +2114,16 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,

static void damon_do_apply_schemes(struct damon_ctx *c,
struct damon_target *t,
- struct damon_region *r)
+ struct damon_region *r,
+ struct damos **active_schemes,
+ int nr_active_schemes)
{
- struct damos *s;
+ int i;

- damon_for_each_scheme(s, c) {
+ for (i = 0; i < nr_active_schemes; i++) {
+ struct damos *s = active_schemes[i];
struct damos_quota *quota = &s->quota;

- if (time_before(c->passed_sample_intervals, s->next_apply_sis))
- continue;
-
- if (!s->wmarks.activated)
- continue;
-
/* Check the quota */
if (quota->esz && quota->charged_sz >= quota->esz)
continue;
@@ -2476,7 +2473,8 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
struct damon_target *t;
struct damon_region *r;
struct damos *s;
- bool has_schemes_to_apply = false;
+ struct damos *active_schemes[32];
+ int nr_active_schemes = 0;

damon_for_each_scheme(s, c) {
if (time_before(c->passed_sample_intervals, s->next_apply_sis))
@@ -2485,12 +2483,15 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
if (!s->wmarks.activated)
continue;

- has_schemes_to_apply = true;
+ if (nr_active_schemes < ARRAY_SIZE(active_schemes))
+ active_schemes[nr_active_schemes++] = s;
+ else
+ WARN_ONCE(1, "too many schemes to apply");

damos_adjust_quota(c, s);
}

- if (!has_schemes_to_apply)
+ if (!nr_active_schemes)
return;

mutex_lock(&c->walk_control_lock);
@@ -2499,7 +2500,8 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
continue;

damon_for_each_region(r, t)
- damon_do_apply_schemes(c, t, r);
+ damon_do_apply_schemes(c, t, r, active_schemes,
+ nr_active_schemes);
}

damon_for_each_scheme(s, c) {
--
2.34.1