@@ -181,20 +181,6 @@ struct panthor_vm_op_ctx {
181181 u64 range ;
182182 } va ;
183183
184- /**
185- * @returned_vmas: List of panthor_vma objects returned after a VM operation.
186- *
187- * For unmap operations, this will contain all VMAs that were covered by the
188- * specified VA range.
189- *
190- * For map operations, this will contain all VMAs that previously mapped to
191- * the specified VA range.
192- *
193- * Those VMAs, and the resources they point to will be released as part of
194- * the op_ctx cleanup operation.
195- */
196- struct list_head returned_vmas ;
197-
198184 /** @map: Fields specific to a map operation. */
199185 struct {
200186 /** @map.vm_bo: Buffer object to map. */
@@ -1081,47 +1067,18 @@ void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
10811067 mutex_unlock (& vm -> mm_lock );
10821068}
10831069
1084- static void panthor_vm_bo_put (struct drm_gpuvm_bo * vm_bo )
1070+ static void panthor_vm_bo_free (struct drm_gpuvm_bo * vm_bo )
10851071{
10861072 struct panthor_gem_object * bo = to_panthor_bo (vm_bo -> obj );
1087- struct drm_gpuvm * vm = vm_bo -> vm ;
1088- bool unpin ;
1089-
1090- /* We must retain the GEM before calling drm_gpuvm_bo_put(),
1091- * otherwise the mutex might be destroyed while we hold it.
1092- * Same goes for the VM, since we take the VM resv lock.
1093- */
1094- drm_gem_object_get (& bo -> base .base );
1095- drm_gpuvm_get (vm );
1096-
1097- /* We take the resv lock to protect against concurrent accesses to the
1098- * gpuvm evicted/extobj lists that are modified in
1099- * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
1100- * releases sthe last vm_bo reference.
1101- * We take the BO GPUVA list lock to protect the vm_bo removal from the
1102- * GEM vm_bo list.
1103- */
1104- dma_resv_lock (drm_gpuvm_resv (vm ), NULL );
1105- mutex_lock (& bo -> base .base .gpuva .lock );
1106- unpin = drm_gpuvm_bo_put (vm_bo );
1107- mutex_unlock (& bo -> base .base .gpuva .lock );
1108- dma_resv_unlock (drm_gpuvm_resv (vm ));
11091073
1110- /* If the vm_bo object was destroyed, release the pin reference that
1111- * was hold by this object.
1112- */
1113- if (unpin && !drm_gem_is_imported (& bo -> base .base ))
1074+ if (!drm_gem_is_imported (& bo -> base .base ))
11141075 drm_gem_shmem_unpin (& bo -> base );
1115-
1116- drm_gpuvm_put (vm );
1117- drm_gem_object_put (& bo -> base .base );
1076+ kfree (vm_bo );
11181077}
11191078
11201079static void panthor_vm_cleanup_op_ctx (struct panthor_vm_op_ctx * op_ctx ,
11211080 struct panthor_vm * vm )
11221081{
1123- struct panthor_vma * vma , * tmp_vma ;
1124-
11251082 u32 remaining_pt_count = op_ctx -> rsvd_page_tables .count -
11261083 op_ctx -> rsvd_page_tables .ptr ;
11271084
@@ -1134,16 +1091,12 @@ static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
11341091 kfree (op_ctx -> rsvd_page_tables .pages );
11351092
11361093 if (op_ctx -> map .vm_bo )
1137- panthor_vm_bo_put (op_ctx -> map .vm_bo );
1094+ drm_gpuvm_bo_put_deferred (op_ctx -> map .vm_bo );
11381095
11391096 for (u32 i = 0 ; i < ARRAY_SIZE (op_ctx -> preallocated_vmas ); i ++ )
11401097 kfree (op_ctx -> preallocated_vmas [i ]);
11411098
1142- list_for_each_entry_safe (vma , tmp_vma , & op_ctx -> returned_vmas , node ) {
1143- list_del (& vma -> node );
1144- panthor_vm_bo_put (vma -> base .vm_bo );
1145- kfree (vma );
1146- }
1099+ drm_gpuvm_bo_deferred_cleanup (& vm -> base );
11471100}
11481101
11491102static void
@@ -1250,7 +1203,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12501203 return - EINVAL ;
12511204
12521205 memset (op_ctx , 0 , sizeof (* op_ctx ));
1253- INIT_LIST_HEAD (& op_ctx -> returned_vmas );
12541206 op_ctx -> flags = flags ;
12551207 op_ctx -> va .range = size ;
12561208 op_ctx -> va .addr = va ;
@@ -1261,7 +1213,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12611213
12621214 if (!drm_gem_is_imported (& bo -> base .base )) {
12631215 /* Pre-reserve the BO pages, so the map operation doesn't have to
1264- * allocate.
1216+ * allocate. This pin is dropped in panthor_vm_bo_free(), so
1217+ * once we have successfully called drm_gpuvm_bo_create(),
1218+ * GPUVM will take care of dropping the pin for us.
12651219 */
12661220 ret = drm_gem_shmem_pin (& bo -> base );
12671221 if (ret )
@@ -1300,16 +1254,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
13001254 mutex_unlock (& bo -> base .base .gpuva .lock );
13011255 dma_resv_unlock (panthor_vm_resv (vm ));
13021256
1303- /* If the a vm_bo for this <VM,BO> combination exists, it already
1304- * retains a pin ref, and we can release the one we took earlier.
1305- *
1306- * If our pre-allocated vm_bo is picked, it now retains the pin ref,
1307- * which will be released in panthor_vm_bo_put().
1308- */
1309- if (preallocated_vm_bo != op_ctx -> map .vm_bo &&
1310- !drm_gem_is_imported (& bo -> base .base ))
1311- drm_gem_shmem_unpin (& bo -> base );
1312-
13131257 op_ctx -> map .bo_offset = offset ;
13141258
13151259 /* L1, L2 and L3 page tables.
@@ -1357,7 +1301,6 @@ static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
13571301 int ret ;
13581302
13591303 memset (op_ctx , 0 , sizeof (* op_ctx ));
1360- INIT_LIST_HEAD (& op_ctx -> returned_vmas );
13611304 op_ctx -> va .range = size ;
13621305 op_ctx -> va .addr = va ;
13631306 op_ctx -> flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP ;
@@ -1405,7 +1348,6 @@ static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx
14051348 struct panthor_vm * vm )
14061349{
14071350 memset (op_ctx , 0 , sizeof (* op_ctx ));
1408- INIT_LIST_HEAD (& op_ctx -> returned_vmas );
14091351 op_ctx -> flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY ;
14101352}
14111353
@@ -2051,26 +1993,13 @@ static void panthor_vma_link(struct panthor_vm *vm,
20511993
20521994 mutex_lock (& bo -> base .base .gpuva .lock );
20531995 drm_gpuva_link (& vma -> base , vm_bo );
2054- drm_WARN_ON (& vm -> ptdev -> base , drm_gpuvm_bo_put (vm_bo ));
20551996 mutex_unlock (& bo -> base .base .gpuva .lock );
20561997}
20571998
2058- static void panthor_vma_unlink (struct panthor_vm * vm ,
2059- struct panthor_vma * vma )
1999+ static void panthor_vma_unlink (struct panthor_vma * vma )
20602000{
2061- struct panthor_gem_object * bo = to_panthor_bo (vma -> base .gem .obj );
2062- struct drm_gpuvm_bo * vm_bo = drm_gpuvm_bo_get (vma -> base .vm_bo );
2063-
2064- mutex_lock (& bo -> base .base .gpuva .lock );
2065- drm_gpuva_unlink (& vma -> base );
2066- mutex_unlock (& bo -> base .base .gpuva .lock );
2067-
2068- /* drm_gpuva_unlink() release the vm_bo, but we manually retained it
2069- * when entering this function, so we can implement deferred VMA
2070- * destruction. Re-assign it here.
2071- */
2072- vma -> base .vm_bo = vm_bo ;
2073- list_add_tail (& vma -> node , & vm -> op_ctx -> returned_vmas );
2001+ drm_gpuva_unlink_defer (& vma -> base );
2002+ kfree (vma );
20742003}
20752004
20762005static void panthor_vma_init (struct panthor_vma * vma , u32 flags )
@@ -2104,12 +2033,12 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
21042033 return ret ;
21052034 }
21062035
2107- /* Ref owned by the mapping now, clear the obj field so we don't release the
2108- * pinning/obj ref behind GPUVA's back.
2109- */
21102036 drm_gpuva_map (& vm -> base , & vma -> base , & op -> map );
21112037 panthor_vma_link (vm , vma , op_ctx -> map .vm_bo );
2038+
2039+ drm_gpuvm_bo_put_deferred (op_ctx -> map .vm_bo );
21122040 op_ctx -> map .vm_bo = NULL ;
2041+
21132042 return 0 ;
21142043}
21152044
@@ -2148,16 +2077,14 @@ static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
21482077 * owned by the old mapping which will be released when this
21492078 * mapping is destroyed, we need to grab a ref here.
21502079 */
2151- panthor_vma_link (vm , prev_vma ,
2152- drm_gpuvm_bo_get (op -> remap .unmap -> va -> vm_bo ));
2080+ panthor_vma_link (vm , prev_vma , op -> remap .unmap -> va -> vm_bo );
21532081 }
21542082
21552083 if (next_vma ) {
2156- panthor_vma_link (vm , next_vma ,
2157- drm_gpuvm_bo_get (op -> remap .unmap -> va -> vm_bo ));
2084+ panthor_vma_link (vm , next_vma , op -> remap .unmap -> va -> vm_bo );
21582085 }
21592086
2160- panthor_vma_unlink (vm , unmap_vma );
2087+ panthor_vma_unlink (unmap_vma );
21612088 return 0 ;
21622089}
21632090
@@ -2174,12 +2101,13 @@ static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
21742101 return ret ;
21752102
21762103 drm_gpuva_unmap (& op -> unmap );
2177- panthor_vma_unlink (vm , unmap_vma );
2104+ panthor_vma_unlink (unmap_vma );
21782105 return 0 ;
21792106}
21802107
21812108static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
21822109 .vm_free = panthor_vm_free ,
2110+ .vm_bo_free = panthor_vm_bo_free ,
21832111 .sm_step_map = panthor_gpuva_sm_step_map ,
21842112 .sm_step_remap = panthor_gpuva_sm_step_remap ,
21852113 .sm_step_unmap = panthor_gpuva_sm_step_unmap ,
0 commit comments