[PATCH v2 5/9] mm/huge_memory: add a common exit path to zap_huge_pmd()

From: Lorenzo Stoakes (Oracle)

Date: Thu Mar 19 2026 - 09:05:22 EST


Other than when we acquire the PTL, we always need to unlock the PTL, and
optionally need to flush on exit.

The code is currently very duplicated in this respect, so default
flush_needed to false, set it true in the case in which it's required, then
share the same logic for all exit paths.

This also makes flush_needed make more sense as a function-scope value (we
don't need to flush for the PFN map/mixed map, zero huge, error cases for
instance).

Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@xxxxxxxxxx>
---
mm/huge_memory.c | 15 ++++++---------
1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a2f87315195d..c84b30461cc5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2431,7 +2431,7 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr)
{
struct folio *folio = NULL;
- bool flush_needed = true;
+ bool flush_needed = false;
spinlock_t *ptl;
pmd_t orig_pmd;

@@ -2453,19 +2453,18 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (vma_is_special_huge(vma)) {
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
- spin_unlock(ptl);
- return true;
+ goto out;
}
if (is_huge_zero_pmd(orig_pmd)) {
if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
- spin_unlock(ptl);
- return true;
+ goto out;
}

if (pmd_present(orig_pmd)) {
struct page *page = pmd_page(orig_pmd);

+ flush_needed = true;
folio = page_folio(page);
folio_remove_rmap_pmd(folio, page, vma);
WARN_ON_ONCE(folio_mapcount(folio) < 0);
@@ -2474,14 +2473,12 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
const softleaf_t entry = softleaf_from_pmd(orig_pmd);

folio = softleaf_to_folio(entry);
- flush_needed = false;

if (!thp_migration_supported())
WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
} else {
WARN_ON_ONCE(true);
- spin_unlock(ptl);
- return true;
+ goto out;
}

if (folio_test_anon(folio)) {
@@ -2508,10 +2505,10 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
folio_put(folio);
}

+out:
spin_unlock(ptl);
if (flush_needed)
tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
-
return true;
}

--
2.53.0