Skip to content

Commit 9a2e730

Browse files
Darksonnherrnst
authored andcommitted
panthor: use drm_gpuva_unlink_defer()
Instead of manually deferring cleanup of vm_bos, use the new GPUVM infrastructure for doing so. To avoid manual management of vm_bo refcounts, the panthor_vma_link() and panthor_vma_unlink() methods are changed to get and put a vm_bo refcount on the vm_bo. This simplifies the code a lot. I preserved the behavior where panthor_gpuva_sm_step_map() drops the refcount right away rather than letting panthor_vm_cleanup_op_ctx() do it later. Reviewed-by: Boris Brezillon <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alice Ryhl <[email protected]>
1 parent ec90d02 commit 9a2e730

1 file changed

Lines changed: 19 additions & 91 deletions

File tree

drivers/gpu/drm/panthor/panthor_mmu.c

Lines changed: 19 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -181,20 +181,6 @@ struct panthor_vm_op_ctx {
181181
u64 range;
182182
} va;
183183

184-
/**
185-
* @returned_vmas: List of panthor_vma objects returned after a VM operation.
186-
*
187-
* For unmap operations, this will contain all VMAs that were covered by the
188-
* specified VA range.
189-
*
190-
* For map operations, this will contain all VMAs that previously mapped to
191-
* the specified VA range.
192-
*
193-
* Those VMAs, and the resources they point to will be released as part of
194-
* the op_ctx cleanup operation.
195-
*/
196-
struct list_head returned_vmas;
197-
198184
/** @map: Fields specific to a map operation. */
199185
struct {
200186
/** @map.vm_bo: Buffer object to map. */
@@ -1078,47 +1064,18 @@ void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
10781064
mutex_unlock(&vm->mm_lock);
10791065
}
10801066

1081-
static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
1067+
static void panthor_vm_bo_free(struct drm_gpuvm_bo *vm_bo)
10821068
{
10831069
struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
1084-
struct drm_gpuvm *vm = vm_bo->vm;
1085-
bool unpin;
1086-
1087-
/* We must retain the GEM before calling drm_gpuvm_bo_put(),
1088-
* otherwise the mutex might be destroyed while we hold it.
1089-
* Same goes for the VM, since we take the VM resv lock.
1090-
*/
1091-
drm_gem_object_get(&bo->base.base);
1092-
drm_gpuvm_get(vm);
1093-
1094-
/* We take the resv lock to protect against concurrent accesses to the
1095-
* gpuvm evicted/extobj lists that are modified in
1096-
* drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
1097-
* releases sthe last vm_bo reference.
1098-
* We take the BO GPUVA list lock to protect the vm_bo removal from the
1099-
* GEM vm_bo list.
1100-
*/
1101-
dma_resv_lock(drm_gpuvm_resv(vm), NULL);
1102-
mutex_lock(&bo->base.base.gpuva.lock);
1103-
unpin = drm_gpuvm_bo_put(vm_bo);
1104-
mutex_unlock(&bo->base.base.gpuva.lock);
1105-
dma_resv_unlock(drm_gpuvm_resv(vm));
11061070

1107-
/* If the vm_bo object was destroyed, release the pin reference that
1108-
* was hold by this object.
1109-
*/
1110-
if (unpin && !drm_gem_is_imported(&bo->base.base))
1071+
if (!drm_gem_is_imported(&bo->base.base))
11111072
drm_gem_shmem_unpin(&bo->base);
1112-
1113-
drm_gpuvm_put(vm);
1114-
drm_gem_object_put(&bo->base.base);
1073+
kfree(vm_bo);
11151074
}
11161075

11171076
static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
11181077
struct panthor_vm *vm)
11191078
{
1120-
struct panthor_vma *vma, *tmp_vma;
1121-
11221079
u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
11231080
op_ctx->rsvd_page_tables.ptr;
11241081

@@ -1131,16 +1088,12 @@ static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
11311088
kfree(op_ctx->rsvd_page_tables.pages);
11321089

11331090
if (op_ctx->map.vm_bo)
1134-
panthor_vm_bo_put(op_ctx->map.vm_bo);
1091+
drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
11351092

11361093
for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
11371094
kfree(op_ctx->preallocated_vmas[i]);
11381095

1139-
list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) {
1140-
list_del(&vma->node);
1141-
panthor_vm_bo_put(vma->base.vm_bo);
1142-
kfree(vma);
1143-
}
1096+
drm_gpuvm_bo_deferred_cleanup(&vm->base);
11441097
}
11451098

11461099
static void
@@ -1247,7 +1200,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12471200
return -EINVAL;
12481201

12491202
memset(op_ctx, 0, sizeof(*op_ctx));
1250-
INIT_LIST_HEAD(&op_ctx->returned_vmas);
12511203
op_ctx->flags = flags;
12521204
op_ctx->va.range = size;
12531205
op_ctx->va.addr = va;
@@ -1258,7 +1210,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12581210

12591211
if (!drm_gem_is_imported(&bo->base.base)) {
12601212
/* Pre-reserve the BO pages, so the map operation doesn't have to
1261-
* allocate.
1213+
* allocate. This pin is dropped in panthor_vm_bo_free(), so
1214+
* once we have successfully called drm_gpuvm_bo_create(),
1215+
* GPUVM will take care of dropping the pin for us.
12621216
*/
12631217
ret = drm_gem_shmem_pin(&bo->base);
12641218
if (ret)
@@ -1297,16 +1251,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12971251
mutex_unlock(&bo->base.base.gpuva.lock);
12981252
dma_resv_unlock(panthor_vm_resv(vm));
12991253

1300-
/* If the a vm_bo for this <VM,BO> combination exists, it already
1301-
* retains a pin ref, and we can release the one we took earlier.
1302-
*
1303-
* If our pre-allocated vm_bo is picked, it now retains the pin ref,
1304-
* which will be released in panthor_vm_bo_put().
1305-
*/
1306-
if (preallocated_vm_bo != op_ctx->map.vm_bo &&
1307-
!drm_gem_is_imported(&bo->base.base))
1308-
drm_gem_shmem_unpin(&bo->base);
1309-
13101254
op_ctx->map.bo_offset = offset;
13111255

13121256
/* L1, L2 and L3 page tables.
@@ -1354,7 +1298,6 @@ static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
13541298
int ret;
13551299

13561300
memset(op_ctx, 0, sizeof(*op_ctx));
1357-
INIT_LIST_HEAD(&op_ctx->returned_vmas);
13581301
op_ctx->va.range = size;
13591302
op_ctx->va.addr = va;
13601303
op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
@@ -1402,7 +1345,6 @@ static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx
14021345
struct panthor_vm *vm)
14031346
{
14041347
memset(op_ctx, 0, sizeof(*op_ctx));
1405-
INIT_LIST_HEAD(&op_ctx->returned_vmas);
14061348
op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
14071349
}
14081350

@@ -2052,26 +1994,13 @@ static void panthor_vma_link(struct panthor_vm *vm,
20521994

20531995
mutex_lock(&bo->base.base.gpuva.lock);
20541996
drm_gpuva_link(&vma->base, vm_bo);
2055-
drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
20561997
mutex_unlock(&bo->base.base.gpuva.lock);
20571998
}
20581999

2059-
static void panthor_vma_unlink(struct panthor_vm *vm,
2060-
struct panthor_vma *vma)
2000+
static void panthor_vma_unlink(struct panthor_vma *vma)
20612001
{
2062-
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
2063-
struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
2064-
2065-
mutex_lock(&bo->base.base.gpuva.lock);
2066-
drm_gpuva_unlink(&vma->base);
2067-
mutex_unlock(&bo->base.base.gpuva.lock);
2068-
2069-
/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
2070-
* when entering this function, so we can implement deferred VMA
2071-
* destruction. Re-assign it here.
2072-
*/
2073-
vma->base.vm_bo = vm_bo;
2074-
list_add_tail(&vma->node, &vm->op_ctx->returned_vmas);
2002+
drm_gpuva_unlink_defer(&vma->base);
2003+
kfree(vma);
20752004
}
20762005

20772006
static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
@@ -2105,12 +2034,12 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
21052034
return ret;
21062035
}
21072036

2108-
/* Ref owned by the mapping now, clear the obj field so we don't release the
2109-
* pinning/obj ref behind GPUVA's back.
2110-
*/
21112037
drm_gpuva_map(&vm->base, &vma->base, &op->map);
21122038
panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
2039+
2040+
drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
21132041
op_ctx->map.vm_bo = NULL;
2042+
21142043
return 0;
21152044
}
21162045

@@ -2149,16 +2078,14 @@ static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
21492078
* owned by the old mapping which will be released when this
21502079
* mapping is destroyed, we need to grab a ref here.
21512080
*/
2152-
panthor_vma_link(vm, prev_vma,
2153-
drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2081+
panthor_vma_link(vm, prev_vma, op->remap.unmap->va->vm_bo);
21542082
}
21552083

21562084
if (next_vma) {
2157-
panthor_vma_link(vm, next_vma,
2158-
drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2085+
panthor_vma_link(vm, next_vma, op->remap.unmap->va->vm_bo);
21592086
}
21602087

2161-
panthor_vma_unlink(vm, unmap_vma);
2088+
panthor_vma_unlink(unmap_vma);
21622089
return 0;
21632090
}
21642091

@@ -2175,12 +2102,13 @@ static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
21752102
return ret;
21762103

21772104
drm_gpuva_unmap(&op->unmap);
2178-
panthor_vma_unlink(vm, unmap_vma);
2105+
panthor_vma_unlink(unmap_vma);
21792106
return 0;
21802107
}
21812108

21822109
static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
21832110
.vm_free = panthor_vm_free,
2111+
.vm_bo_free = panthor_vm_bo_free,
21842112
.sm_step_map = panthor_gpuva_sm_step_map,
21852113
.sm_step_remap = panthor_gpuva_sm_step_remap,
21862114
.sm_step_unmap = panthor_gpuva_sm_step_unmap,

0 commit comments

Comments
 (0)