linux-next: manual merge of the drm tree with the drm-misc-fixes tree

From: Mark Brown

Date: Fri Mar 20 2026 - 10:18:58 EST


Hi all,

Today's linux-next merge of the drm tree got a conflict in:

drivers/gpu/drm/drm_gem_shmem_helper.c

between commit:

fc3bbf34e643f ("drm/shmem-helper: Fix huge page mapping in fault handler")

from the drm-misc-fixes tree and commits:

5cf8de6cd1620 ("drm/gem-shmem: Return vm_fault_t from drm_gem_shmem_try_map_pmd()")
06f3662cb3ba9 ("drm/gem-shmem: Refactor drm_gem_shmem_try_map_pmd()")

from the drm tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging. You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

diff --cc drivers/gpu/drm/drm_gem_shmem_helper.c
index c549293b5bb61,4500deef41278..0000000000000
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@@ -550,27 -554,23 +554,23 @@@ int drm_gem_shmem_dumb_create(struct dr
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);

- static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order,
- unsigned long pfn)
+ static vm_fault_t drm_gem_shmem_try_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn)
{
- if (!order) {
- return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
- } else if (order == PMD_ORDER) {
- unsigned long paddr = pfn << PAGE_SHIFT;
- bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
+ unsigned long paddr = pfn << PAGE_SHIFT;
+ bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);

- if (aligned &&
- folio_test_pmd_mappable(page_folio(pfn_to_page(pfn)))) {
- pfn &= PMD_MASK >> PAGE_SHIFT;
- return vmf_insert_pfn_pmd(vmf, pfn, false);
- }
- #endif
+ if (aligned && pmd_none(*vmf->pmd)) {
+ /* Read-only mapping; split upon write fault */
+ pfn &= PMD_MASK >> PAGE_SHIFT;
+ return vmf_insert_pfn_pmd(vmf, pfn, false);
}
- return VM_FAULT_FALLBACK;
+ #endif
+
+ return 0;
}

-static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+static vm_fault_t drm_gem_shmem_any_fault(struct vm_fault *vmf, unsigned int order)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@@ -644,13 -645,29 +650,32 @@@ static void drm_gem_shmem_vm_close(stru
drm_gem_vm_close(vma);
}

+ static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ loff_t num_pages = obj->size >> PAGE_SHIFT;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+
+ if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
+ return VM_FAULT_SIGBUS;
+
+ file_update_time(vma->vm_file);
+
+ folio_mark_dirty(page_folio(shmem->pages[page_offset]));
+
+ return 0;
+ }
+
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ .huge_fault = drm_gem_shmem_any_fault,
+#endif
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
+ .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
};
EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);

Attachment: signature.asc
Description: PGP signature