432432 * DRM GPUVM also does not take care of the locking of the backing
433433 * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
434434 * itself; drivers are responsible to enforce mutual exclusion using either the
435- * GEMs dma_resv lock or alternatively a driver specific external lock. For the
436- * latter see also drm_gem_gpuva_set_lock().
435+ * GEMs dma_resv lock or the GEMs gpuva.lock mutex.
437436 *
438437 * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
439438 * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
@@ -1518,7 +1517,7 @@ drm_gpuvm_bo_destroy(struct kref *kref)
15181517 drm_gpuvm_bo_list_del (vm_bo , extobj , lock );
15191518 drm_gpuvm_bo_list_del (vm_bo , evict , lock );
15201519
1521- drm_gem_gpuva_assert_lock_held (obj );
1520+ drm_gem_gpuva_assert_lock_held (gpuvm , obj );
15221521 list_del (& vm_bo -> list .entry .gem );
15231522
15241523 if (ops && ops -> vm_bo_free )
@@ -1539,7 +1538,8 @@ drm_gpuvm_bo_destroy(struct kref *kref)
15391538 * If the reference count drops to zero, the &gpuvm_bo is destroyed, which
15401539 * includes removing it from the GEMs gpuva list. Hence, if a call to this
15411540 * function can potentially let the reference count drop to zero the caller must
1542- * hold the dma-resv or driver specific GEM gpuva lock.
1541+ * hold the lock that the GEM uses for its gpuva list (either the GEM's
1542+ * dma-resv or gpuva.lock mutex).
15431543 *
15441544 * This function may only be called from non-atomic context.
15451545 *
@@ -1563,7 +1563,7 @@ __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
15631563{
15641564 struct drm_gpuvm_bo * vm_bo ;
15651565
1566- drm_gem_gpuva_assert_lock_held (obj );
1566+ drm_gem_gpuva_assert_lock_held (gpuvm , obj );
15671567 drm_gem_for_each_gpuvm_bo (vm_bo , obj )
15681568 if (vm_bo -> vm == gpuvm )
15691569 return vm_bo ;
@@ -1622,7 +1622,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
16221622 if (!vm_bo )
16231623 return ERR_PTR (- ENOMEM );
16241624
1625- drm_gem_gpuva_assert_lock_held (obj );
1625+ drm_gem_gpuva_assert_lock_held (gpuvm , obj );
16261626 list_add_tail (& vm_bo -> list .entry .gem , & obj -> gpuva .list );
16271627
16281628 return vm_bo ;
@@ -1658,7 +1658,7 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
16581658 return vm_bo ;
16591659 }
16601660
1661- drm_gem_gpuva_assert_lock_held (obj );
1661+ drm_gem_gpuva_assert_lock_held (gpuvm , obj );
16621662 list_add_tail (& __vm_bo -> list .entry .gem , & obj -> gpuva .list );
16631663
16641664 return __vm_bo ;
@@ -1830,8 +1830,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove);
18301830 * reference of the latter is taken.
18311831 *
18321832 * This function expects the caller to protect the GEM's GPUVA list against
1833- * concurrent access using either the GEMs dma_resv lock or a driver specific
1834- * lock set through drm_gem_gpuva_set_lock().
1833+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
18351834 */
18361835void
18371836drm_gpuva_link (struct drm_gpuva * va , struct drm_gpuvm_bo * vm_bo )
@@ -1846,7 +1845,7 @@ drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
18461845
18471846 va -> vm_bo = drm_gpuvm_bo_get (vm_bo );
18481847
1849- drm_gem_gpuva_assert_lock_held (obj );
1848+ drm_gem_gpuva_assert_lock_held (gpuvm , obj );
18501849 list_add_tail (& va -> gem .entry , & vm_bo -> list .gpuva );
18511850}
18521851EXPORT_SYMBOL_GPL (drm_gpuva_link );
@@ -1866,8 +1865,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);
18661865 * the latter is dropped.
18671866 *
18681867 * This function expects the caller to protect the GEM's GPUVA list against
1869- * concurrent access using either the GEMs dma_resv lock or a driver specific
1870- * lock set through drm_gem_gpuva_set_lock().
1868+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
18711869 */
18721870void
18731871drm_gpuva_unlink (struct drm_gpuva * va )
@@ -1878,7 +1876,7 @@ drm_gpuva_unlink(struct drm_gpuva *va)
18781876 if (unlikely (!obj ))
18791877 return ;
18801878
1881- drm_gem_gpuva_assert_lock_held (obj );
1879+ drm_gem_gpuva_assert_lock_held (va -> vm , obj );
18821880 list_del_init (& va -> gem .entry );
18831881
18841882 va -> vm_bo = NULL ;
@@ -2888,8 +2886,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap);
28882886 * After the caller finished processing the returned &drm_gpuva_ops, they must
28892887 * be freed with &drm_gpuva_ops_free.
28902888 *
2891- * It is the callers responsibility to protect the GEMs GPUVA list against
2892- * concurrent access using the GEMs dma_resv lock.
2889+ * This function expects the caller to protect the GEM's GPUVA list against
2890+ * concurrent access using either the GEM's dma-resv or gpuva. lock mutex .
28932891 *
28942892 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
28952893 */
@@ -2901,7 +2899,7 @@ drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
29012899 struct drm_gpuva * va ;
29022900 int ret ;
29032901
2904- drm_gem_gpuva_assert_lock_held (vm_bo -> obj );
2902+ drm_gem_gpuva_assert_lock_held (vm_bo -> vm , vm_bo -> obj );
29052903
29062904 ops = kzalloc (sizeof (* ops ), GFP_KERNEL );
29072905 if (!ops )
0 commit comments