Re: [PATCH v3] drm/shmem-helper: Fix Map huge page mapping in fault handler
From: Thomas Zimmermann
Date: Mon Mar 16 2026 - 06:15:21 EST
Hi
Am 16.03.26 um 10:50 schrieb Boris Brezillon:
Avoid this race by adding a huge_fault callback to drm_gem_shmem_vm_ops so that
PMD-sized mappings are handled through the appropriate huge page fault path.
This patch will conflict heavily with the folio-tracking patches that has been merged into upstream DRM. How do we deal with that?
Best regards
Thomas
Those two variables can be moved to the `if (try_pmd)` branch.
Fixes: 211b9a39f261 ("drm/shmem-helper: Map huge pages in fault handler")
Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@xxxxxxxxx>
---
Changes in v3:
- Pass a try_pmd boolean parameter to drm_gem_shmem_any_fault
- Compile drm_gem_shmem_huge_fault only if CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
is defined to avoid a build warning
Changes in v2: https://lore.kernel.org/dri-devel/20260313141719.3949700-1-pedrodemargomes@xxxxxxxxx/
- Keep the #ifdef unindented
- Create drm_gem_shmem_any_fault to handle faults of any order and use
drm_gem_shmem_[huge_]fault() as wrappers
v1: https://lore.kernel.org/all/20260312155027.1682606-1-pedrodemargomes@xxxxxxxxx/
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 63 ++++++++++++++------------
1 file changed, 35 insertions(+), 28 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 7b5a49935ae4..9428c3ab7283 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -550,36 +550,18 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
-static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
- struct page *page)
-{
-#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
- unsigned long pfn = page_to_pfn(page);
- unsigned long paddr = pfn << PAGE_SHIFT;
- bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
-
- if (aligned &&
- pmd_none(*vmf->pmd) &&
- folio_test_pmd_mappable(page_folio(page))) {
- pfn &= PMD_MASK >> PAGE_SHIFT;
- if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
- return true;
- }
-#endif
-
- return false;
-}
-
-static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+static vm_fault_t drm_gem_shmem_any_fault(struct vm_fault *vmf, bool try_pmd)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
- vm_fault_t ret;
+ vm_fault_t ret = VM_FAULT_FALLBACK;
struct page **pages = shmem->pages;
pgoff_t page_offset;
unsigned long pfn;
+ unsigned long paddr;
+ bool aligned;
/* Offset to faulty address in the VMA. */Hm, if we get rid of drm_gem_shmem_try_map_pmd(), we probably
page_offset = vmf->pgoff - vma->vm_pgoff;
@@ -593,13 +575,19 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
goto out;
}
- if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
- ret = VM_FAULT_NOPAGE;
- goto out;
- }
-
pfn = page_to_pfn(pages[page_offset]);
- ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ if (try_pmd) {
+ paddr = pfn << PAGE_SHIFT;
+ aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
+
+ if (aligned &&
+ folio_test_pmd_mappable(page_folio(pages[page_offset]))) {
+ pfn &= PMD_MASK >> PAGE_SHIFT;
+ ret = vmf_insert_pfn_pmd(vmf, pfn, false);
+ }
+ } else {
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ }
need something like
if (!try_pmd) {
ret = vmf_insert_pfn(vma, vmf->address, pfn);
} else {
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
unsigned long paddr = pfn << PAGE_SHIFT;
bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
if (aligned &&
folio_test_pmd_mappable(page_folio(pages[page_offset]))) {
pfn &= PMD_MASK >> PAGE_SHIFT;
ret = vmf_insert_pfn_pmd(vmf, pfn, false);
}
#else
ret = VM_FAULT_FALLBACK;
#endif
}
to make sure we're not calling helpers that might not
exist in some configs (the compile might optimize the
try_pmd==true branch out already, but better safe than sorry).
out:
dma_resv_unlock(shmem->base.resv);
@@ -607,6 +595,22 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
return ret;
}
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static vm_fault_t drm_gem_shmem_huge_fault(struct vm_fault *vmf,
+ unsigned int order)
+{
+ if (order != PMD_ORDER)
+ return VM_FAULT_FALLBACK;
+
+ return drm_gem_shmem_any_fault(vmf, true);
+}
+#endif
+
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+{
+ return drm_gem_shmem_any_fault(vmf, false);
+}
+
static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
@@ -643,6 +647,9 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ .huge_fault = drm_gem_shmem_huge_fault,
+#endif
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
};
--
2.47.3
--
--
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Frankenstr. 146, 90461 Nürnberg, Germany, www.suse.com
GF: Jochen Jaser, Andrew McDonald, Werner Knoblich, (HRB 36809, AG Nürnberg)