Re: [PATCH 10/14] mm: prepare to move subsection_map_init() to mm/sparse-vmemmap.c
From: Lorenzo Stoakes (Oracle)
Date: Tue Mar 17 2026 - 15:56:06 EST
On Tue, Mar 17, 2026 at 05:56:48PM +0100, David Hildenbrand (Arm) wrote:
> We want to move subsection_map_init() to mm/sparse-vmemmap.c.
>
> To prepare for getting rid of subsection_map_init() in mm/sparse.c
> completely, use a static inline function for !CONFIG_SPARSEMEM_VMEMMAP.
>
> While at it, move the declaration to internal.h and rename it to
> "sparse_init_subsection_map()".
Why not init_sparse_subsection_map()??
Or sparse_init_map_subsection()????
Or <all other permutations>
Joking that's fine ;)
>
> Signed-off-by: David Hildenbrand (Arm) <david@xxxxxxxxxx>
You've initialised the sparse subsection of my heart, so:
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@xxxxxxxxxx>
> ---
> include/linux/mmzone.h | 3 ---
> mm/internal.h | 12 ++++++++++++
> mm/mm_init.c | 2 +-
> mm/sparse.c | 6 +-----
> 4 files changed, 14 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 7bd0134c241c..b694c69dee04 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -2002,8 +2002,6 @@ struct mem_section_usage {
> unsigned long pageblock_flags[0];
> };
>
> -void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
> -
> struct page;
> struct page_ext;
> struct mem_section {
> @@ -2396,7 +2394,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
> #define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
> #define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
> #define pfn_in_present_section pfn_valid
> -#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
> #endif /* CONFIG_SPARSEMEM */
>
> /*
> diff --git a/mm/internal.h b/mm/internal.h
> index f98f4746ac41..5f5c45d80aca 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -960,12 +960,24 @@ void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
> unsigned long, enum meminit_context, struct vmem_altmap *, int,
> bool);
>
> +/*
> + * mm/sparse.c
> + */
> #ifdef CONFIG_SPARSEMEM
> void sparse_init(void);
> #else
> static inline void sparse_init(void) {}
> #endif /* CONFIG_SPARSEMEM */
>
> +#ifdef CONFIG_SPARSEMEM_VMEMMAP
> +void sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages);
> +#else
> +static inline void sparse_init_subsection_map(unsigned long pfn,
> + unsigned long nr_pages)
> +{
> +}
> +#endif /* CONFIG_SPARSEMEM_VMEMMAP */
> +
> #if defined CONFIG_COMPACTION || defined CONFIG_CMA
>
> /*
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 969048f9b320..3c5f18537cd1 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -1898,7 +1898,7 @@ static void __init free_area_init(void)
> pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
> (u64)start_pfn << PAGE_SHIFT,
> ((u64)end_pfn << PAGE_SHIFT) - 1);
> - subsection_map_init(start_pfn, end_pfn - start_pfn);
> + sparse_init_subsection_map(start_pfn, end_pfn - start_pfn);
> }
>
> /* Initialise every node */
> diff --git a/mm/sparse.c b/mm/sparse.c
> index b57c81e99340..7b0bfea73a9b 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -185,7 +185,7 @@ static void subsection_mask_set(unsigned long *map, unsigned long pfn,
> bitmap_set(map, idx, end - idx + 1);
> }
>
> -void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
> +void __init sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages)
> {
> int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
> unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
> @@ -207,10 +207,6 @@ void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
> nr_pages -= pfns;
> }
> }
> -#else
> -void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
> -{
> -}
> #endif
>
> /* Record a memory area against a node. */
> --
> 2.43.0
>