@@ -550,27 +550,27 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
550550}
551551EXPORT_SYMBOL_GPL (drm_gem_shmem_dumb_create );
552552
553- static bool drm_gem_shmem_try_map_pmd (struct vm_fault * vmf , unsigned long addr ,
554- struct page * page )
553+ static vm_fault_t try_insert_pfn (struct vm_fault * vmf , unsigned int order ,
554+ unsigned long pfn )
555555{
556+ if (!order ) {
557+ return vmf_insert_pfn (vmf -> vma , vmf -> address , pfn );
556558#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
557- unsigned long pfn = page_to_pfn (page );
558- unsigned long paddr = pfn << PAGE_SHIFT ;
559- bool aligned = (addr & ~PMD_MASK ) == (paddr & ~PMD_MASK );
560-
561- if (aligned &&
562- pmd_none (* vmf -> pmd ) &&
563- folio_test_pmd_mappable (page_folio (page ))) {
564- pfn &= PMD_MASK >> PAGE_SHIFT ;
565- if (vmf_insert_pfn_pmd (vmf , pfn , false) == VM_FAULT_NOPAGE )
566- return true;
567- }
559+ } else if (order == PMD_ORDER ) {
560+ unsigned long paddr = pfn << PAGE_SHIFT ;
561+ bool aligned = (vmf -> address & ~PMD_MASK ) == (paddr & ~PMD_MASK );
562+
563+ if (aligned &&
564+ folio_test_pmd_mappable (page_folio (pfn_to_page (pfn )))) {
565+ pfn &= PMD_MASK >> PAGE_SHIFT ;
566+ return vmf_insert_pfn_pmd (vmf , pfn , false);
567+ }
568568#endif
569-
570- return false ;
569+ }
570+ return VM_FAULT_FALLBACK ;
571571}
572572
573- static vm_fault_t drm_gem_shmem_fault (struct vm_fault * vmf )
573+ static vm_fault_t drm_gem_shmem_any_fault (struct vm_fault * vmf , unsigned int order )
574574{
575575 struct vm_area_struct * vma = vmf -> vma ;
576576 struct drm_gem_object * obj = vma -> vm_private_data ;
@@ -581,6 +581,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
581581 pgoff_t page_offset ;
582582 unsigned long pfn ;
583583
584+ if (order && order != PMD_ORDER )
585+ return VM_FAULT_FALLBACK ;
586+
584587 /* Offset to faulty address in the VMA. */
585588 page_offset = vmf -> pgoff - vma -> vm_pgoff ;
586589
@@ -593,20 +596,20 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
593596 goto out ;
594597 }
595598
596- if (drm_gem_shmem_try_map_pmd (vmf , vmf -> address , pages [page_offset ])) {
597- ret = VM_FAULT_NOPAGE ;
598- goto out ;
599- }
600-
601599 pfn = page_to_pfn (pages [page_offset ]);
602- ret = vmf_insert_pfn ( vma , vmf -> address , pfn );
600+ ret = try_insert_pfn ( vmf , order , pfn );
603601
604602 out :
605603 dma_resv_unlock (shmem -> base .resv );
606604
607605 return ret ;
608606}
609607
608+ static vm_fault_t drm_gem_shmem_fault (struct vm_fault * vmf )
609+ {
610+ return drm_gem_shmem_any_fault (vmf , 0 );
611+ }
612+
610613static void drm_gem_shmem_vm_open (struct vm_area_struct * vma )
611614{
612615 struct drm_gem_object * obj = vma -> vm_private_data ;
@@ -643,6 +646,9 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
643646
644647const struct vm_operations_struct drm_gem_shmem_vm_ops = {
645648 .fault = drm_gem_shmem_fault ,
649+ #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
650+ .huge_fault = drm_gem_shmem_any_fault ,
651+ #endif
646652 .open = drm_gem_shmem_vm_open ,
647653 .close = drm_gem_shmem_vm_close ,
648654};
0 commit comments