Re: linux-next: manual merge of the drm tree with the drm-misc-fixes tree
From: Boris Brezillon
Date: Fri Mar 20 2026 - 11:40:08 EST
Hello Mark,
On Fri, 20 Mar 2026 14:17:46 +0000
Mark Brown <broonie@xxxxxxxxxx> wrote:
> Hi all,
>
> Today's linux-next merge of the drm tree got a conflict in:
>
> drivers/gpu/drm/drm_gem_shmem_helper.c
>
> between commit:
>
> fc3bbf34e643f ("drm/shmem-helper: Fix huge page mapping in fault handler")
>
> from the drm-misc-fixes tree and commits:
>
> 5cf8de6cd1620 ("drm/gem-shmem: Return vm_fault_t from drm_gem_shmem_try_map_pmd()")
> 06f3662cb3ba9 ("drm/gem-shmem: Refactor drm_gem_shmem_try_map_pmd()")
>
> from the drm tree.
>
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging. You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
I have a slightly different conflict resolution (it's the one we currently
have in drm-tip[1]).
Regards,
Boris
[1]https://gitlab.freedesktop.org/drm/tip
--->8---
diff --cc drivers/gpu/drm/drm_gem_shmem_helper.c
index c549293b5bb6,4500deef4127..2062ca607833
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@@ -574,33 -574,39 +578,38 @@@ static vm_fault_t drm_gem_shmem_any_fau
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
- vm_fault_t ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
struct page **pages = shmem->pages;
- pgoff_t page_offset;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+ struct page *page;
+ struct folio *folio;
unsigned long pfn;
+ if (order && order != PMD_ORDER)
+ return VM_FAULT_FALLBACK;
+
- /* Offset to faulty address in the VMA. */
- page_offset = vmf->pgoff - vma->vm_pgoff;
+ dma_resv_lock(obj->resv, NULL);
- dma_resv_lock(shmem->base.resv, NULL);
-
- if (page_offset >= num_pages ||
- drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
- shmem->madv < 0) {
- ret = VM_FAULT_SIGBUS;
+ if (page_offset >= num_pages || drm_WARN_ON_ONCE(dev, !shmem->pages) ||
+ shmem->madv < 0)
goto out;
- }
- pfn = page_to_pfn(pages[page_offset]);
+ page = pages[page_offset];
+ if (drm_WARN_ON_ONCE(dev, !page))
+ goto out;
+ folio = page_folio(page);
+
+ pfn = page_to_pfn(page);
+
- if (folio_test_pmd_mappable(folio))
- ret = drm_gem_shmem_try_insert_pfn_pmd(vmf, pfn);
- if (ret != VM_FAULT_NOPAGE)
- ret = vmf_insert_pfn(vma, vmf->address, pfn);
-
+ ret = try_insert_pfn(vmf, order, pfn);
+ if (ret == VM_FAULT_NOPAGE)
+ folio_mark_accessed(folio);
- out:
- dma_resv_unlock(shmem->base.resv);
+ out:
+ dma_resv_unlock(obj->resv);
return ret;
}
@@@ -644,13 -645,29 +653,32 @@@ static void drm_gem_shmem_vm_close(stru
drm_gem_vm_close(vma);
}
+ static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ loff_t num_pages = obj->size >> PAGE_SHIFT;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+
+ if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
+ return VM_FAULT_SIGBUS;
+
+ file_update_time(vma->vm_file);
+
+ folio_mark_dirty(page_folio(shmem->pages[page_offset]));
+
+ return 0;
+ }
+
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ .huge_fault = drm_gem_shmem_any_fault,
+#endif
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
+ .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
};
EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);