Re: [PATCH mm-unstable v15 12/13] mm/khugepaged: run khugepaged for all orders
From: Nico Pache
Date: Wed Mar 18 2026 - 15:02:57 EST
On 3/17/26 4:58 AM, Lorenzo Stoakes (Oracle) wrote:
> On Wed, Feb 25, 2026 at 08:26:50PM -0700, Nico Pache wrote:
>> From: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
>>
>> If any order (m)THP is enabled we should allow running khugepaged to
>> attempt scanning and collapsing mTHPs. In order for khugepaged to operate
>> when only mTHP sizes are specified in sysfs, we must modify the predicate
>> function that determines whether it ought to run to do so.
>>
>> This function is currently called hugepage_pmd_enabled(), this patch
>> renames it to hugepage_enabled() and updates the logic to check to
>> determine whether any valid orders may exist which would justify
>> khugepaged running.
>>
>> We must also update collapse_allowable_orders() to check all orders if
>> the vma is anonymous and the collapse is khugepaged.
>>
>> After this patch khugepaged mTHP collapse is fully enabled.
>>
>> Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
>> Signed-off-by: Nico Pache <npache@xxxxxxxxxx>
>
> This looks good to me, so:
>
> Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@xxxxxxxxxx>
Thanks!
>
>> ---
>> mm/khugepaged.c | 30 ++++++++++++++++++------------
>> 1 file changed, 18 insertions(+), 12 deletions(-)
>>
>> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
>> index 388d3f2537e2..e8bfcc1d0c9a 100644
>> --- a/mm/khugepaged.c
>> +++ b/mm/khugepaged.c
>> @@ -434,23 +434,23 @@ static inline int collapse_test_exit_or_disable(struct mm_struct *mm)
>> mm_flags_test(MMF_DISABLE_THP_COMPLETELY, mm);
>> }
>>
>> -static bool hugepage_pmd_enabled(void)
>> +static bool hugepage_enabled(void)
>> {
>> /*
>> * We cover the anon, shmem and the file-backed case here; file-backed
>> * hugepages, when configured in, are determined by the global control.
>> - * Anon pmd-sized hugepages are determined by the pmd-size control.
>> + * Anon hugepages are determined by its per-size mTHP control.
>
> Well also PMD right? I mean this terminology sucks because in a sense mTHP
> includes PMD... :)
yeah kinda hard with our verbiage being so broad and overlapping some times.
>
>> * Shmem pmd-sized hugepages are also determined by its pmd-size control,
>> * except when the global shmem_huge is set to SHMEM_HUGE_DENY.
>> */
>> if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
>> hugepage_global_enabled())
>> return true;
>> - if (test_bit(PMD_ORDER, &huge_anon_orders_always))
>> + if (READ_ONCE(huge_anon_orders_always))
>> return true;
>> - if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
>> + if (READ_ONCE(huge_anon_orders_madvise))
>> return true;
>> - if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
>> + if (READ_ONCE(huge_anon_orders_inherit) &&
>> hugepage_global_enabled())
>> return true;
>> if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
>> @@ -521,8 +521,14 @@ static unsigned int collapse_max_ptes_none(unsigned int order)
>> static unsigned long collapse_allowable_orders(struct vm_area_struct *vma,
>> vm_flags_t vm_flags, bool is_khugepaged)
>> {
>> + unsigned long orders;
>> enum tva_type tva_flags = is_khugepaged ? TVA_KHUGEPAGED : TVA_FORCED_COLLAPSE;
>> - unsigned long orders = BIT(HPAGE_PMD_ORDER);
>> +
>> + /* If khugepaged is scanning an anonymous vma, allow mTHP collapse */
>> + if (is_khugepaged && vma_is_anonymous(vma))
>> + orders = THP_ORDERS_ALL_ANON;
>> + else
>> + orders = BIT(HPAGE_PMD_ORDER);
>>
>> return thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
>> }
>> @@ -531,7 +537,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
>> vm_flags_t vm_flags)
>> {
>> if (!mm_flags_test(MMF_VM_HUGEPAGE, vma->vm_mm) &&
>> - hugepage_pmd_enabled()) {
>> + hugepage_enabled()) {
>> if (collapse_allowable_orders(vma, vm_flags, /*is_khugepaged=*/true))
>> __khugepaged_enter(vma->vm_mm);
>> }
>> @@ -2929,7 +2935,7 @@ static unsigned int collapse_scan_mm_slot(unsigned int pages, enum scan_result *
>>
>> static int khugepaged_has_work(void)
>> {
>> - return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
>> + return !list_empty(&khugepaged_scan.mm_head) && hugepage_enabled();
>> }
>>
>> static int khugepaged_wait_event(void)
>> @@ -3002,7 +3008,7 @@ static void khugepaged_wait_work(void)
>> return;
>> }
>>
>> - if (hugepage_pmd_enabled())
>> + if (hugepage_enabled())
>> wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
>> }
>>
>> @@ -3033,7 +3039,7 @@ static void set_recommended_min_free_kbytes(void)
>> int nr_zones = 0;
>> unsigned long recommended_min;
>>
>> - if (!hugepage_pmd_enabled()) {
>> + if (!hugepage_enabled()) {
>> calculate_min_free_kbytes();
>> goto update_wmarks;
>> }
>> @@ -3083,7 +3089,7 @@ int start_stop_khugepaged(void)
>> int err = 0;
>>
>> mutex_lock(&khugepaged_mutex);
>> - if (hugepage_pmd_enabled()) {
>> + if (hugepage_enabled()) {
>> if (!khugepaged_thread)
>> khugepaged_thread = kthread_run(khugepaged, NULL,
>> "khugepaged");
>> @@ -3109,7 +3115,7 @@ int start_stop_khugepaged(void)
>> void khugepaged_min_free_kbytes_update(void)
>> {
>> mutex_lock(&khugepaged_mutex);
>> - if (hugepage_pmd_enabled() && khugepaged_thread)
>> + if (hugepage_enabled() && khugepaged_thread)
>> set_recommended_min_free_kbytes();
>> mutex_unlock(&khugepaged_mutex);
>> }
>> --
>> 2.53.0
>>
>