Skip to content

Commit 224c9b6

Browse files
committed
uring: move io_uring code to new file
Factor out the io_uring code into a new file to easy allow to move the nvme_get_log code to be moved as well. While at it, make uring code initialization at global context creation, which is a huge optimization as the ring is not create per nvme_get_log call. Signed-off-by: Daniel Wagner <[email protected]>
1 parent a12f658 commit 224c9b6

5 files changed

Lines changed: 155 additions & 130 deletions

File tree

libnvme/src/meson.build

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,10 @@ if want_fabrics
4646
]
4747
endif
4848

49+
if liburing_dep.found()
50+
sources += 'nvme/uring.c'
51+
endif
52+
4953
if json_c_dep.found()
5054
sources += 'nvme/json.c'
5155
else

libnvme/src/nvme/ioctl.c

Lines changed: 5 additions & 130 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,6 @@
1616
#include <sys/ioctl.h>
1717
#include <sys/stat.h>
1818

19-
#ifdef CONFIG_LIBURING
20-
#include <liburing.h>
21-
#endif
22-
2319
#include <ccan/build_assert/build_assert.h>
2420
#include <ccan/endian/endian.h>
2521
#include <ccan/minmax/minmax.h>
@@ -219,95 +215,6 @@ static void nvme_init_env(void)
219215
force_4k = true;
220216
}
221217

222-
#ifdef CONFIG_LIBURING
223-
enum {
224-
IO_URING_NOT_AVAILABLE,
225-
IO_URING_AVAILABLE,
226-
} io_uring_kernel_support = IO_URING_NOT_AVAILABLE;
227-
228-
/*
229-
* gcc specific attribute, call automatically on the library loading.
230-
* if IORING_OP_URING_CMD is not supported, fallback to ioctl interface.
231-
*
232-
* The uring API expects the command of type struct nvme_passthru_cmd64.
233-
*/
234-
__attribute__((constructor))
235-
static void nvme_uring_cmd_probe()
236-
{
237-
struct io_uring_probe *probe = io_uring_get_probe();
238-
239-
if (!probe)
240-
return;
241-
242-
if (!io_uring_opcode_supported(probe, IORING_OP_URING_CMD))
243-
return;
244-
245-
io_uring_kernel_support = IO_URING_AVAILABLE;
246-
}
247-
248-
static int nvme_uring_cmd_setup(struct io_uring *ring)
249-
{
250-
if (io_uring_queue_init(NVME_URING_ENTRIES, ring,
251-
IORING_SETUP_SQE128 | IORING_SETUP_CQE32))
252-
return -errno;
253-
return 0;
254-
}
255-
256-
static void nvme_uring_cmd_exit(struct io_uring *ring)
257-
{
258-
io_uring_queue_exit(ring);
259-
}
260-
261-
static int nvme_uring_cmd_admin_passthru_async(struct nvme_transport_handle *hdl,
262-
struct io_uring *ring, struct nvme_passthru_cmd *cmd)
263-
{
264-
struct io_uring_sqe *sqe;
265-
int ret;
266-
267-
sqe = io_uring_get_sqe(ring);
268-
if (!sqe)
269-
return -1;
270-
271-
memcpy(&sqe->cmd, cmd, sizeof(*cmd));
272-
273-
sqe->fd = hdl->fd;
274-
sqe->opcode = IORING_OP_URING_CMD;
275-
sqe->cmd_op = NVME_URING_CMD_ADMIN;
276-
277-
ret = io_uring_submit(ring);
278-
if (ret < 0)
279-
return -errno;
280-
281-
return 0;
282-
}
283-
284-
static int nvme_uring_cmd_wait_complete(struct io_uring *ring, int n)
285-
{
286-
struct io_uring_cqe *cqe;
287-
int ret, i;
288-
289-
for (i = 0; i < n; i++) {
290-
ret = io_uring_wait_cqe(ring, &cqe);
291-
if (ret < 0)
292-
return -errno;
293-
io_uring_cqe_seen(ring, cqe);
294-
}
295-
296-
return 0;
297-
}
298-
299-
static bool nvme_uring_is_usable(struct nvme_transport_handle *hdl)
300-
{
301-
struct stat st;
302-
303-
if (io_uring_kernel_support != IO_URING_AVAILABLE ||
304-
hdl->type != NVME_TRANSPORT_HANDLE_TYPE_DIRECT ||
305-
fstat(hdl->fd, &st) || !S_ISCHR(st.st_mode))
306-
return false;
307-
308-
return true;
309-
}
310-
#endif /* CONFIG_LIBURING */
311218

312219
int nvme_get_log(struct nvme_transport_handle *hdl,
313220
struct nvme_passthru_cmd *cmd, bool rae,
@@ -324,17 +231,6 @@ int nvme_get_log(struct nvme_transport_handle *hdl,
324231
__u32 cdw10 = cmd->cdw10 & (NVME_VAL(LOG_CDW10_LID) |
325232
NVME_VAL(LOG_CDW10_LSP));
326233
__u32 cdw11 = cmd->cdw11 & NVME_VAL(LOG_CDW11_LSI);
327-
#ifdef CONFIG_LIBURING
328-
bool use_uring = nvme_uring_is_usable(hdl);
329-
struct io_uring ring;
330-
int n = 0;
331-
332-
if (use_uring) {
333-
ret = nvme_uring_cmd_setup(&ring);
334-
if (ret)
335-
return ret;
336-
}
337-
#endif /* CONFIG_LIBURING */
338234

339235
if (force_4k)
340236
xfer_len = NVME_LOG_PAGE_PDU_SIZE;
@@ -373,43 +269,22 @@ int nvme_get_log(struct nvme_transport_handle *hdl,
373269
cmd->data_len = xfer;
374270
cmd->addr = (__u64)(uintptr_t)ptr;
375271

376-
#ifdef CONFIG_LIBURING
377-
if (use_uring) {
378-
if (n >= NVME_URING_ENTRIES) {
379-
ret = nvme_uring_cmd_wait_complete(&ring, n);
380-
if (ret)
381-
goto uring_exit;
382-
n = 0;
383-
}
384-
n += 1;
385-
ret = nvme_uring_cmd_admin_passthru_async(hdl,
386-
&ring, cmd);
387-
if (ret)
388-
goto uring_exit;
389-
} else {
272+
if (hdl->ctx->uring_enabled)
273+
ret = nvme_uring_cmd_admin_passthru_async(hdl, cmd);
274+
else
390275
ret = nvme_submit_admin_passthru(hdl, cmd);
391-
if (ret)
392-
return ret;
393-
}
394-
#else /* CONFIG_LIBURING */
395-
ret = nvme_submit_admin_passthru(hdl, cmd);
396-
#endif /* CONFIG_LIBURING */
397276
if (ret)
398277
return ret;
399278

400279
offset += xfer;
401280
ptr += xfer;
402281
} while (offset < data_len);
403282

404-
#ifdef CONFIG_LIBURING
405-
if (use_uring) {
406-
ret = nvme_uring_cmd_wait_complete(&ring, n);
407-
uring_exit:
408-
nvme_uring_cmd_exit(&ring);
283+
if (hdl->ctx->uring_enabled) {
284+
ret = nvme_uring_cmd_wait_complete(hdl);
409285
if (ret)
410286
return ret;
411287
}
412-
#endif /* CONFIG_LIBURING */
413288

414289
return 0;
415290
}

libnvme/src/nvme/lib.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,9 @@ struct nvme_global_ctx *nvme_create_global_ctx(FILE *fp, int log_level)
5959
ctx->ioctl_probing = true;
6060
ctx->mi_probe_enabled = nvme_mi_probe_enabled_default();
6161

62+
if (!nvme_open_uring(ctx))
63+
ctx->uring_enabled = true;
64+
6265
return ctx;
6366
}
6467

@@ -80,6 +83,7 @@ void nvme_free_global_ctx(struct nvme_global_ctx *ctx)
8083
nvme_mi_close(ep);
8184
free(ctx->config_file);
8285
free(ctx->application);
86+
nvme_close_uring(ctx);
8387
free(ctx);
8488
}
8589

libnvme/src/nvme/private.h

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
*/
88
#pragma once
99

10+
#include <errno.h>
1011
#include <ifaddrs.h>
1112
#include <poll.h>
1213

@@ -286,10 +287,16 @@ struct nvme_global_ctx {
286287
struct nvme_log log;
287288
bool mi_probe_enabled;
288289
bool ioctl_probing;
290+
bool uring_enabled;
289291
bool create_only;
290292
bool dry_run;
291293
struct nvme_fabric_options *options;
292294
struct ifaddrs *ifaddrs_cache; /* init with nvme_getifaddrs() */
295+
296+
#ifdef CONFIG_LIBURING
297+
int ring_cmds;
298+
struct io_uring *ring;
299+
#endif
293300
};
294301

295302
struct nvmf_discovery_ctx {
@@ -780,3 +787,31 @@ void nvme_ns_release_transport_handle(nvme_ns_t n);
780787
*/
781788
int nvme_mi_admin_admin_passthru(struct nvme_transport_handle *hdl,
782789
struct nvme_passthru_cmd *cmd);
790+
791+
#ifdef CONFIG_LIBURING
792+
int nvme_open_uring( struct nvme_global_ctx *ctx);
793+
void nvme_close_uring(struct nvme_global_ctx *ctx);
794+
int nvme_uring_cmd_admin_passthru_async(struct nvme_transport_handle *hdl,
795+
struct nvme_passthru_cmd *cmd);
796+
int nvme_uring_cmd_wait_complete(struct nvme_transport_handle *hdl);
797+
#else
798+
static inline int nvme_open_uring( struct nvme_global_ctx *ctx)
799+
{
800+
return -ENOTSUP;
801+
}
802+
static inline void nvme_close_uring(struct nvme_global_ctx *ctx)
803+
{
804+
}
805+
static inline int nvme_uring_cmd_admin_passthru_async(
806+
struct nvme_transport_handle *hdl,
807+
struct nvme_passthru_cmd *cmd)
808+
{
809+
return -ENOTSUP;
810+
}
811+
static inline int nvme_uring_cmd_wait_complete(
812+
struct nvme_transport_handle *hdl)
813+
{
814+
return -ENOTSUP;
815+
}
816+
#endif
817+

libnvme/src/nvme/uring.c

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
// SPDX-License-Identifier: LGPL-2.1-or-later
2+
/*
3+
* This file is part of libnvme.
4+
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
5+
*
6+
* Authors: Keith Busch <[email protected]>
7+
* Chaitanya Kulkarni <[email protected]>
8+
* Daniel Wagner <[email protected]>
9+
*/
10+
#include <liburing.h>
11+
12+
#include <libnvme.h>
13+
14+
#include "private.h"
15+
16+
int nvme_open_uring(struct nvme_global_ctx *ctx)
17+
{
18+
struct io_uring_probe *probe;
19+
struct io_uring *ring;
20+
21+
probe = io_uring_get_probe();
22+
if (!probe)
23+
return -ENOTSUP;
24+
25+
if (!io_uring_opcode_supported(probe, IORING_OP_URING_CMD))
26+
return -ENOTSUP;
27+
28+
ring = calloc(1, sizeof(*ring));
29+
if (!ring)
30+
return -ENOMEM;
31+
32+
if (io_uring_queue_init(NVME_URING_ENTRIES, ring,
33+
IORING_SETUP_SQE128 | IORING_SETUP_CQE32)) {
34+
free(ring);
35+
return -errno;
36+
}
37+
38+
ctx->ring = ring;
39+
return 0;
40+
}
41+
42+
void nvme_close_uring(struct nvme_global_ctx *ctx)
43+
{
44+
if (!ctx->ring)
45+
return;
46+
47+
io_uring_queue_exit(ctx->ring);
48+
free(ctx->ring);
49+
}
50+
51+
static int __nvme_uring_cmd_admin_passthru_async(struct io_uring *ring, int fd,
52+
struct nvme_passthru_cmd *cmd)
53+
{
54+
struct io_uring_sqe *sqe;
55+
int ret;
56+
57+
sqe = io_uring_get_sqe(ring);
58+
if (!sqe)
59+
return -1;
60+
61+
memcpy(&sqe->cmd, cmd, sizeof(*cmd));
62+
63+
sqe->fd = fd;
64+
sqe->opcode = IORING_OP_URING_CMD;
65+
sqe->cmd_op = NVME_URING_CMD_ADMIN;
66+
67+
ret = io_uring_submit(ring);
68+
if (ret < 0)
69+
return -errno;
70+
71+
return 0;
72+
}
73+
74+
int nvme_uring_cmd_wait_complete(struct nvme_transport_handle *hdl)
75+
{
76+
struct io_uring_cqe *cqe;
77+
struct io_uring *ring;
78+
int err;
79+
80+
ring = hdl->ctx->ring;
81+
82+
for (int i = 0; i < hdl->ctx->ring_cmds; i++) {
83+
err = io_uring_wait_cqe(ring, &cqe);
84+
if (err < 0)
85+
return -errno;
86+
io_uring_cqe_seen(ring, cqe);
87+
}
88+
hdl->ctx->ring_cmds = 0;
89+
90+
return 0;
91+
}
92+
93+
int nvme_uring_cmd_admin_passthru_async(struct nvme_transport_handle *hdl,
94+
struct nvme_passthru_cmd *cmd)
95+
{
96+
int err;
97+
98+
if (hdl->ctx->ring_cmds >= NVME_URING_ENTRIES) {
99+
err = nvme_uring_cmd_wait_complete(hdl);
100+
if (err)
101+
return err;
102+
}
103+
104+
hdl->ctx->ring_cmds += 1;
105+
return __nvme_uring_cmd_admin_passthru_async(hdl->ctx->ring,
106+
hdl->fd, cmd);
107+
}

0 commit comments

Comments
 (0)