[LSF/MM/BPF TOPIC][RFC PATCH 2/2] mm/hugetlb: skip hugetlb shrinking for proactive reclaim
From: Sourav Panda
Date: Wed Mar 18 2026 - 19:42:18 EST
Scan control can indicate if we are in the proactive reclaim mode.
Pass that to shrinker control and preclude frozen memory hugetlb
shrinking if set.
Signed-off-by: Sourav Panda <souravpanda@xxxxxxxxxx>
---
include/linux/shrinker.h | 1 +
mm/hugetlb.c | 6 ++++++
mm/internal.h | 2 +-
mm/shrinker.c | 10 ++++++----
mm/vmscan.c | 6 +++---
5 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 5374c251ee9e..973d5fd68803 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -52,6 +52,7 @@ struct shrink_control {
unsigned long nr_scanned;
s8 priority;
+ bool proactive;
/* current memcg being shrunk (for memcg aware shrinkers) */
struct mem_cgroup *memcg;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d4953ff1dda1..a70aed7c8665 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4145,6 +4145,9 @@ static unsigned long hugepage_shrinker_count(struct shrinker *s,
if (sc->priority >= DEF_PRIORITY - 6)
return 0;
+ if (sc->proactive)
+ return 0;
+
if (!gigantic_page_runtime_supported())
return 0;
@@ -4193,6 +4196,9 @@ static unsigned long hugepage_shrinker_scan(struct shrinker *s,
if (sc->nr_to_scan == 0)
return SHRINK_STOP;
+ if (sc->proactive)
+ return SHRINK_STOP;
+
if (!gigantic_page_runtime_supported())
return SHRINK_STOP;
diff --git a/mm/internal.h b/mm/internal.h
index cb0af847d7d9..cccb68d723d4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1660,7 +1660,7 @@ void __meminit __init_page_from_nid(unsigned long pfn, int nid);
/* shrinker related functions */
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
- int priority);
+ int priority, bool proactive);
int shmem_add_to_page_cache(struct folio *folio,
struct address_space *mapping,
diff --git a/mm/shrinker.c b/mm/shrinker.c
index 8a7a05182465..21b8f0b9d092 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -467,7 +467,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
#ifdef CONFIG_MEMCG
static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
- struct mem_cgroup *memcg, int priority)
+ struct mem_cgroup *memcg, int priority, bool proactive)
{
struct shrinker_info *info;
unsigned long ret, freed = 0;
@@ -530,6 +530,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
.nid = nid,
.memcg = memcg,
.priority = priority,
+ .proactive = proactive,
};
struct shrinker *shrinker;
int shrinker_id = calc_shrinker_id(index, offset);
@@ -586,7 +587,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
}
#else /* !CONFIG_MEMCG */
static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
- struct mem_cgroup *memcg, int priority)
+ struct mem_cgroup *memcg, int priority, bool proactive)
{
return 0;
}
@@ -613,7 +614,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
* Returns the number of reclaimed slab objects.
*/
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
- int priority)
+ int priority, bool proactive)
{
unsigned long ret, freed = 0;
struct shrinker *shrinker;
@@ -626,7 +627,7 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
* oom.
*/
if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
- return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
+ return shrink_slab_memcg(gfp_mask, nid, memcg, priority, proactive);
/*
* lockless algorithm of global shrink.
@@ -656,6 +657,7 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
.nid = nid,
.memcg = memcg,
.priority = priority,
+ .proactive = proactive,
};
if (!shrinker_try_get(shrinker))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0fc9373e8251..39151d1edeff 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -432,7 +432,7 @@ static unsigned long drop_slab_node(int nid)
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
- freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
+ freed += shrink_slab(GFP_KERNEL, nid, memcg, 0, false);
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
return freed;
@@ -4925,7 +4925,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
success = try_to_shrink_lruvec(lruvec, sc);
- shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
+ shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority, sc->proactive);
if (!sc->proactive)
vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
@@ -6020,7 +6020,7 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
shrink_lruvec(lruvec, sc);
shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
- sc->priority);
+ sc->priority, sc->proactive);
/* Record the group's reclaim efficiency */
if (!sc->proactive)
--
2.53.0.983.g0bb29b3bc5-goog