Skip to content

Commit a01745c

Browse files
Konstantin Taranovrleon
authored andcommitted
RDMA/mana_ib: Add device‑memory support
Introduce a basic DM implementation that enables creating and registering device memory, and using the associated memory keys for networking operations. Signed-off-by: Konstantin Taranov <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Leon Romanovsky <[email protected]>
1 parent 9b9d253 commit a01745c

4 files changed

Lines changed: 193 additions & 3 deletions

File tree

drivers/infiniband/hw/mana/device.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,12 @@ static const struct ib_device_ops mana_ib_device_stats_ops = {
6969
.alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
7070
};
7171

72+
const struct ib_device_ops mana_ib_dev_dm_ops = {
73+
.alloc_dm = mana_ib_alloc_dm,
74+
.dealloc_dm = mana_ib_dealloc_dm,
75+
.reg_dm_mr = mana_ib_reg_dm_mr,
76+
};
77+
7278
static int mana_ib_netdev_event(struct notifier_block *this,
7379
unsigned long event, void *ptr)
7480
{
@@ -139,6 +145,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
139145
ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
140146
if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
141147
ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);
148+
ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_dm_ops);
142149

143150
ret = mana_ib_create_eqs(dev);
144151
if (ret) {

drivers/infiniband/hw/mana/mana_ib.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,11 @@ struct mana_ib_mr {
131131
mana_handle_t mr_handle;
132132
};
133133

134+
struct mana_ib_dm {
135+
struct ib_dm ibdm;
136+
mana_handle_t dm_handle;
137+
};
138+
134139
struct mana_ib_cq {
135140
struct ib_cq ibcq;
136141
struct mana_ib_queue queue;
@@ -735,4 +740,11 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
735740
u64 iova, int fd, int mr_access_flags,
736741
struct ib_dmah *dmah,
737742
struct uverbs_attr_bundle *attrs);
743+
744+
struct ib_dm *mana_ib_alloc_dm(struct ib_device *dev, struct ib_ucontext *context,
745+
struct ib_dm_alloc_attr *attr, struct uverbs_attr_bundle *attrs);
746+
int mana_ib_dealloc_dm(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
747+
struct ib_mr *mana_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, struct ib_dm_mr_attr *attr,
748+
struct uverbs_attr_bundle *attrs);
749+
738750
#endif

drivers/infiniband/hw/mana/mr.c

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
4040

4141
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
4242
sizeof(resp));
43+
req.hdr.req.msg_version = GDMA_MESSAGE_V2;
4344
req.pd_handle = mr_params->pd_handle;
4445
req.mr_type = mr_params->mr_type;
4546

@@ -55,6 +56,12 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
5556
req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
5657
req.zbva.access_flags = mr_params->zbva.access_flags;
5758
break;
59+
case GDMA_MR_TYPE_DM:
60+
req.da_ext.length = mr_params->da.length;
61+
req.da.dm_handle = mr_params->da.dm_handle;
62+
req.da.offset = mr_params->da.offset;
63+
req.da.access_flags = mr_params->da.access_flags;
64+
break;
5865
default:
5966
ibdev_dbg(&dev->ib_dev,
6067
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
@@ -317,3 +324,126 @@ int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
317324

318325
return 0;
319326
}
327+
328+
static int mana_ib_gd_alloc_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm,
329+
struct ib_dm_alloc_attr *attr)
330+
{
331+
struct gdma_context *gc = mdev_to_gc(mdev);
332+
struct gdma_alloc_dm_resp resp = {};
333+
struct gdma_alloc_dm_req req = {};
334+
int err;
335+
336+
mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOC_DM, sizeof(req), sizeof(resp));
337+
req.length = attr->length;
338+
req.alignment = attr->alignment;
339+
req.flags = attr->flags;
340+
341+
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
342+
if (err || resp.hdr.status) {
343+
if (!err)
344+
err = -EPROTO;
345+
346+
return err;
347+
}
348+
349+
dm->dm_handle = resp.dm_handle;
350+
351+
return 0;
352+
}
353+
354+
struct ib_dm *mana_ib_alloc_dm(struct ib_device *ibdev,
355+
struct ib_ucontext *context,
356+
struct ib_dm_alloc_attr *attr,
357+
struct uverbs_attr_bundle *attrs)
358+
{
359+
struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
360+
struct mana_ib_dm *dm;
361+
int err;
362+
363+
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
364+
if (!dm)
365+
return ERR_PTR(-ENOMEM);
366+
367+
err = mana_ib_gd_alloc_dm(dev, dm, attr);
368+
if (err)
369+
goto err_free;
370+
371+
return &dm->ibdm;
372+
373+
err_free:
374+
kfree(dm);
375+
return ERR_PTR(err);
376+
}
377+
378+
static int mana_ib_gd_destroy_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm)
379+
{
380+
struct gdma_context *gc = mdev_to_gc(mdev);
381+
struct gdma_destroy_dm_resp resp = {};
382+
struct gdma_destroy_dm_req req = {};
383+
int err;
384+
385+
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DM, sizeof(req), sizeof(resp));
386+
req.dm_handle = dm->dm_handle;
387+
388+
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
389+
if (err || resp.hdr.status) {
390+
if (!err)
391+
err = -EPROTO;
392+
393+
return err;
394+
}
395+
396+
return 0;
397+
}
398+
399+
int mana_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
400+
{
401+
struct mana_ib_dev *dev = container_of(ibdm->device, struct mana_ib_dev, ib_dev);
402+
struct mana_ib_dm *dm = container_of(ibdm, struct mana_ib_dm, ibdm);
403+
int err;
404+
405+
err = mana_ib_gd_destroy_dm(dev, dm);
406+
if (err)
407+
return err;
408+
409+
kfree(dm);
410+
return 0;
411+
}
412+
413+
struct ib_mr *mana_ib_reg_dm_mr(struct ib_pd *ibpd, struct ib_dm *ibdm,
414+
struct ib_dm_mr_attr *attr,
415+
struct uverbs_attr_bundle *attrs)
416+
{
417+
struct mana_ib_dev *dev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
418+
struct mana_ib_dm *mana_dm = container_of(ibdm, struct mana_ib_dm, ibdm);
419+
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
420+
struct gdma_create_mr_params mr_params = {};
421+
struct mana_ib_mr *mr;
422+
int err;
423+
424+
attr->access_flags &= ~IB_ACCESS_OPTIONAL;
425+
if (attr->access_flags & ~VALID_MR_FLAGS)
426+
return ERR_PTR(-EOPNOTSUPP);
427+
428+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
429+
if (!mr)
430+
return ERR_PTR(-ENOMEM);
431+
432+
mr_params.pd_handle = pd->pd_handle;
433+
mr_params.mr_type = GDMA_MR_TYPE_DM;
434+
mr_params.da.dm_handle = mana_dm->dm_handle;
435+
mr_params.da.offset = attr->offset;
436+
mr_params.da.length = attr->length;
437+
mr_params.da.access_flags =
438+
mana_ib_verbs_to_gdma_access_flags(attr->access_flags);
439+
440+
err = mana_ib_gd_create_mr(dev, mr, &mr_params);
441+
if (err)
442+
goto err_free;
443+
444+
return &mr->ibmr;
445+
446+
err_free:
447+
kfree(mr);
448+
return ERR_PTR(err);
449+
}

include/net/mana/gdma.h

Lines changed: 44 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ enum gdma_request_type {
3535
GDMA_CREATE_MR = 31,
3636
GDMA_DESTROY_MR = 32,
3737
GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
38+
GDMA_ALLOC_DM = 96, /* 0x60 */
39+
GDMA_DESTROY_DM = 97, /* 0x61 */
3840
};
3941

4042
#define GDMA_RESOURCE_DOORBELL_PAGE 27
@@ -861,6 +863,8 @@ enum gdma_mr_type {
861863
GDMA_MR_TYPE_GVA = 2,
862864
/* Guest zero-based address MRs */
863865
GDMA_MR_TYPE_ZBVA = 4,
866+
/* Device address MRs */
867+
GDMA_MR_TYPE_DM = 5,
864868
};
865869

866870
struct gdma_create_mr_params {
@@ -876,6 +880,12 @@ struct gdma_create_mr_params {
876880
u64 dma_region_handle;
877881
enum gdma_mr_access_flags access_flags;
878882
} zbva;
883+
struct {
884+
u64 dm_handle;
885+
u64 offset;
886+
u64 length;
887+
enum gdma_mr_access_flags access_flags;
888+
} da;
879889
};
880890
};
881891

@@ -890,13 +900,23 @@ struct gdma_create_mr_request {
890900
u64 dma_region_handle;
891901
u64 virtual_address;
892902
enum gdma_mr_access_flags access_flags;
893-
} gva;
903+
} __packed gva;
894904
struct {
895905
u64 dma_region_handle;
896906
enum gdma_mr_access_flags access_flags;
897-
} zbva;
898-
};
907+
} __packed zbva;
908+
struct {
909+
u64 dm_handle;
910+
u64 offset;
911+
enum gdma_mr_access_flags access_flags;
912+
} __packed da;
913+
} __packed;
899914
u32 reserved_2;
915+
union {
916+
struct {
917+
u64 length;
918+
} da_ext;
919+
};
900920
};/* HW DATA */
901921

902922
struct gdma_create_mr_response {
@@ -915,6 +935,27 @@ struct gdma_destroy_mr_response {
915935
struct gdma_resp_hdr hdr;
916936
};/* HW DATA */
917937

938+
struct gdma_alloc_dm_req {
939+
struct gdma_req_hdr hdr;
940+
u64 length;
941+
u32 alignment;
942+
u32 flags;
943+
}; /* HW Data */
944+
945+
struct gdma_alloc_dm_resp {
946+
struct gdma_resp_hdr hdr;
947+
u64 dm_handle;
948+
}; /* HW Data */
949+
950+
struct gdma_destroy_dm_req {
951+
struct gdma_req_hdr hdr;
952+
u64 dm_handle;
953+
}; /* HW Data */
954+
955+
struct gdma_destroy_dm_resp {
956+
struct gdma_resp_hdr hdr;
957+
}; /* HW Data */
958+
918959
int mana_gd_verify_vf_version(struct pci_dev *pdev);
919960

920961
int mana_gd_register_device(struct gdma_dev *gd);

0 commit comments

Comments
 (0)