Re: [PATCH 2/4] mm/mprotect: move softleaf code out of the main function
From: Lorenzo Stoakes (Oracle)
Date: Thu Mar 19 2026 - 15:14:04 EST
On Thu, Mar 19, 2026 at 06:31:06PM +0000, Pedro Falcato wrote:
> Move softleaf change_pte_range code into a separate function. This makes
> the change_pte_range() function (or where it inlines) a good bit
> smaller. Plus it lessens cognitive load when reading through the
> function.
>
> Signed-off-by: Pedro Falcato <pfalcato@xxxxxxx>
Honestly I like this as a refactoring, the noinline notwithstanding, so:
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@xxxxxxxxxx>
> ---
> mm/mprotect.c | 128 +++++++++++++++++++++++++++-----------------------
> 1 file changed, 68 insertions(+), 60 deletions(-)
>
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index 1bd0d4aa07c2..8d4fa38a8a26 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -211,6 +211,73 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
> commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
> }
>
> +static noinline long change_pte_softleaf(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *pte, pte_t oldpte, unsigned long cp_flags)
> +{
> + bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
> + bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
> + softleaf_t entry = softleaf_from_pte(oldpte);
> + pte_t newpte;
> +
> + if (softleaf_is_migration_write(entry)) {
> + const struct folio *folio = softleaf_to_folio(entry);
> +
> + /*
> + * A protection check is difficult so
> + * just be safe and disable write
> + */
> + if (folio_test_anon(folio))
> + entry = make_readable_exclusive_migration_entry(
> + swp_offset(entry));
> + else
> + entry = make_readable_migration_entry(swp_offset(entry));
> + newpte = swp_entry_to_pte(entry);
> + if (pte_swp_soft_dirty(oldpte))
> + newpte = pte_swp_mksoft_dirty(newpte);
> + } else if (softleaf_is_device_private_write(entry)) {
> + /*
> + * We do not preserve soft-dirtiness. See
> + * copy_nonpresent_pte() for explanation.
> + */
> + entry = make_readable_device_private_entry(
> + swp_offset(entry));
> + newpte = swp_entry_to_pte(entry);
> + if (pte_swp_uffd_wp(oldpte))
> + newpte = pte_swp_mkuffd_wp(newpte);
> + } else if (softleaf_is_marker(entry)) {
> + /*
> + * Ignore error swap entries unconditionally,
> + * because any access should sigbus/sigsegv
> + * anyway.
> + */
> + if (softleaf_is_poison_marker(entry) ||
> + softleaf_is_guard_marker(entry))
> + return 0;
This just continues in the original:
if (softleaf_is_poison_marker(entry) ||
softleaf_is_guard_marker(entry))
continue;
So this is correct.
> + /*
> + * If this is uffd-wp pte marker and we'd like
> + * to unprotect it, drop it; the next page
> + * fault will trigger without uffd trapping.
> + */
> + if (uffd_wp_resolve) {
> + pte_clear(vma->vm_mm, addr, pte);
> + return 1;
This incrmements pages and continues in the orignal:
if (uffd_wp_resolve) {
pte_clear(vma->vm_mm, addr, pte);
pages++;
}
continue;
So this is correct.
> + }
> + } else {
> + newpte = oldpte;
> + }
> +
> + if (uffd_wp)
> + newpte = pte_swp_mkuffd_wp(newpte);
> + else if (uffd_wp_resolve)
> + newpte = pte_swp_clear_uffd_wp(newpte);
> +
> + if (!pte_same(oldpte, newpte)) {
> + set_pte_at(vma->vm_mm, addr, pte, newpte);
> + return 1;
This increments pages and is at the end of the loop in the original:
if (!pte_same(oldpte, newpte)) {
set_pte_at(vma->vm_mm, addr, pte, newpte);
pages++;
}
So this is correct.
> + }
> + return 0;
> +}
> +
> static long change_pte_range(struct mmu_gather *tlb,
> struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
> unsigned long end, pgprot_t newprot, unsigned long cp_flags)
> @@ -317,66 +384,7 @@ static long change_pte_range(struct mmu_gather *tlb,
> pages++;
> }
> } else {
> - softleaf_t entry = softleaf_from_pte(oldpte);
> - pte_t newpte;
> -
> - if (softleaf_is_migration_write(entry)) {
> - const struct folio *folio = softleaf_to_folio(entry);
> -
> - /*
> - * A protection check is difficult so
> - * just be safe and disable write
> - */
> - if (folio_test_anon(folio))
> - entry = make_readable_exclusive_migration_entry(
> - swp_offset(entry));
> - else
> - entry = make_readable_migration_entry(swp_offset(entry));
> - newpte = swp_entry_to_pte(entry);
> - if (pte_swp_soft_dirty(oldpte))
> - newpte = pte_swp_mksoft_dirty(newpte);
> - } else if (softleaf_is_device_private_write(entry)) {
> - /*
> - * We do not preserve soft-dirtiness. See
> - * copy_nonpresent_pte() for explanation.
> - */
> - entry = make_readable_device_private_entry(
> - swp_offset(entry));
> - newpte = swp_entry_to_pte(entry);
> - if (pte_swp_uffd_wp(oldpte))
> - newpte = pte_swp_mkuffd_wp(newpte);
> - } else if (softleaf_is_marker(entry)) {
> - /*
> - * Ignore error swap entries unconditionally,
> - * because any access should sigbus/sigsegv
> - * anyway.
> - */
> - if (softleaf_is_poison_marker(entry) ||
> - softleaf_is_guard_marker(entry))
> - continue;
> - /*
> - * If this is uffd-wp pte marker and we'd like
> - * to unprotect it, drop it; the next page
> - * fault will trigger without uffd trapping.
> - */
> - if (uffd_wp_resolve) {
> - pte_clear(vma->vm_mm, addr, pte);
> - pages++;
> - }
> - continue;
> - } else {
> - newpte = oldpte;
> - }
> -
> - if (uffd_wp)
> - newpte = pte_swp_mkuffd_wp(newpte);
> - else if (uffd_wp_resolve)
> - newpte = pte_swp_clear_uffd_wp(newpte);
> -
> - if (!pte_same(oldpte, newpte)) {
> - set_pte_at(vma->vm_mm, addr, pte, newpte);
> - pages++;
> - }
> + pages += change_pte_softleaf(vma, addr, pte, oldpte, cp_flags);
> }
> } while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
> lazy_mmu_mode_disable();
> --
> 2.53.0
>