@@ -554,6 +554,21 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
554554}
555555EXPORT_SYMBOL_GPL (drm_gem_shmem_dumb_create );
556556
557+ static void drm_gem_shmem_record_mkwrite (struct vm_fault * vmf )
558+ {
559+ struct vm_area_struct * vma = vmf -> vma ;
560+ struct drm_gem_object * obj = vma -> vm_private_data ;
561+ struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj (obj );
562+ loff_t num_pages = obj -> size >> PAGE_SHIFT ;
563+ pgoff_t page_offset = vmf -> pgoff - vma -> vm_pgoff ; /* page offset within VMA */
564+
565+ if (drm_WARN_ON (obj -> dev , !shmem -> pages || page_offset >= num_pages ))
566+ return ;
567+
568+ file_update_time (vma -> vm_file );
569+ folio_mark_dirty (page_folio (shmem -> pages [page_offset ]));
570+ }
571+
557572static vm_fault_t try_insert_pfn (struct vm_fault * vmf , unsigned int order ,
558573 unsigned long pfn )
559574{
@@ -566,8 +581,23 @@ static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order,
566581
567582 if (aligned &&
568583 folio_test_pmd_mappable (page_folio (pfn_to_page (pfn )))) {
584+ vm_fault_t ret ;
585+
569586 pfn &= PMD_MASK >> PAGE_SHIFT ;
570- return vmf_insert_pfn_pmd (vmf , pfn , false);
587+
588+ /* Unlike PTEs which are automatically upgraded to
589+ * writeable entries, the PMD upgrades go through
590+ * .huge_fault(). Make sure we pass the "write" info
591+ * along in that case.
592+ * This also means we have to record the write fault
593+ * here, instead of in .pfn_mkwrite().
594+ */
595+ ret = vmf_insert_pfn_pmd (vmf , pfn ,
596+ vmf -> flags & FAULT_FLAG_WRITE );
597+ if (ret == VM_FAULT_NOPAGE && (vmf -> flags & FAULT_FLAG_WRITE ))
598+ drm_gem_shmem_record_mkwrite (vmf );
599+
600+ return ret ;
571601 }
572602#endif
573603 }
@@ -655,19 +685,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
655685
656686static vm_fault_t drm_gem_shmem_pfn_mkwrite (struct vm_fault * vmf )
657687{
658- struct vm_area_struct * vma = vmf -> vma ;
659- struct drm_gem_object * obj = vma -> vm_private_data ;
660- struct drm_gem_shmem_object * shmem = to_drm_gem_shmem_obj (obj );
661- loff_t num_pages = obj -> size >> PAGE_SHIFT ;
662- pgoff_t page_offset = vmf -> pgoff - vma -> vm_pgoff ; /* page offset within VMA */
663-
664- if (drm_WARN_ON (obj -> dev , !shmem -> pages || page_offset >= num_pages ))
665- return VM_FAULT_SIGBUS ;
666-
667- file_update_time (vma -> vm_file );
668-
669- folio_mark_dirty (page_folio (shmem -> pages [page_offset ]));
670-
688+ drm_gem_shmem_record_mkwrite (vmf );
671689 return 0 ;
672690}
673691
0 commit comments