1616#include <sys/ioctl.h>
1717#include <sys/stat.h>
1818
19- #ifdef CONFIG_LIBURING
20- #include <liburing.h>
21- #endif
22-
2319#include <ccan/build_assert/build_assert.h>
2420#include <ccan/endian/endian.h>
2521#include <ccan/minmax/minmax.h>
@@ -219,95 +215,6 @@ static void nvme_init_env(void)
219215 force_4k = true;
220216}
221217
222- #ifdef CONFIG_LIBURING
223- enum {
224- IO_URING_NOT_AVAILABLE ,
225- IO_URING_AVAILABLE ,
226- } io_uring_kernel_support = IO_URING_NOT_AVAILABLE ;
227-
228- /*
229- * gcc specific attribute, call automatically on the library loading.
230- * if IORING_OP_URING_CMD is not supported, fallback to ioctl interface.
231- *
232- * The uring API expects the command of type struct nvme_passthru_cmd64.
233- */
234- __attribute__((constructor ))
235- static void nvme_uring_cmd_probe ()
236- {
237- struct io_uring_probe * probe = io_uring_get_probe ();
238-
239- if (!probe )
240- return ;
241-
242- if (!io_uring_opcode_supported (probe , IORING_OP_URING_CMD ))
243- return ;
244-
245- io_uring_kernel_support = IO_URING_AVAILABLE ;
246- }
247-
248- static int nvme_uring_cmd_setup (struct io_uring * ring )
249- {
250- if (io_uring_queue_init (NVME_URING_ENTRIES , ring ,
251- IORING_SETUP_SQE128 | IORING_SETUP_CQE32 ))
252- return - errno ;
253- return 0 ;
254- }
255-
256- static void nvme_uring_cmd_exit (struct io_uring * ring )
257- {
258- io_uring_queue_exit (ring );
259- }
260-
261- static int nvme_uring_cmd_admin_passthru_async (struct nvme_transport_handle * hdl ,
262- struct io_uring * ring , struct nvme_passthru_cmd * cmd )
263- {
264- struct io_uring_sqe * sqe ;
265- int ret ;
266-
267- sqe = io_uring_get_sqe (ring );
268- if (!sqe )
269- return -1 ;
270-
271- memcpy (& sqe -> cmd , cmd , sizeof (* cmd ));
272-
273- sqe -> fd = hdl -> fd ;
274- sqe -> opcode = IORING_OP_URING_CMD ;
275- sqe -> cmd_op = NVME_URING_CMD_ADMIN ;
276-
277- ret = io_uring_submit (ring );
278- if (ret < 0 )
279- return - errno ;
280-
281- return 0 ;
282- }
283-
284- static int nvme_uring_cmd_wait_complete (struct io_uring * ring , int n )
285- {
286- struct io_uring_cqe * cqe ;
287- int ret , i ;
288-
289- for (i = 0 ; i < n ; i ++ ) {
290- ret = io_uring_wait_cqe (ring , & cqe );
291- if (ret < 0 )
292- return - errno ;
293- io_uring_cqe_seen (ring , cqe );
294- }
295-
296- return 0 ;
297- }
298-
299- static bool nvme_uring_is_usable (struct nvme_transport_handle * hdl )
300- {
301- struct stat st ;
302-
303- if (io_uring_kernel_support != IO_URING_AVAILABLE ||
304- hdl -> type != NVME_TRANSPORT_HANDLE_TYPE_DIRECT ||
305- fstat (hdl -> fd , & st ) || !S_ISCHR (st .st_mode ))
306- return false;
307-
308- return true;
309- }
310- #endif /* CONFIG_LIBURING */
311218
312219int nvme_get_log (struct nvme_transport_handle * hdl ,
313220 struct nvme_passthru_cmd * cmd , bool rae ,
@@ -324,17 +231,6 @@ int nvme_get_log(struct nvme_transport_handle *hdl,
324231 __u32 cdw10 = cmd -> cdw10 & (NVME_VAL (LOG_CDW10_LID ) |
325232 NVME_VAL (LOG_CDW10_LSP ));
326233 __u32 cdw11 = cmd -> cdw11 & NVME_VAL (LOG_CDW11_LSI );
327- #ifdef CONFIG_LIBURING
328- bool use_uring = nvme_uring_is_usable (hdl );
329- struct io_uring ring ;
330- int n = 0 ;
331-
332- if (use_uring ) {
333- ret = nvme_uring_cmd_setup (& ring );
334- if (ret )
335- return ret ;
336- }
337- #endif /* CONFIG_LIBURING */
338234
339235 if (force_4k )
340236 xfer_len = NVME_LOG_PAGE_PDU_SIZE ;
@@ -373,43 +269,22 @@ int nvme_get_log(struct nvme_transport_handle *hdl,
373269 cmd -> data_len = xfer ;
374270 cmd -> addr = (__u64 )(uintptr_t )ptr ;
375271
376- #ifdef CONFIG_LIBURING
377- if (use_uring ) {
378- if (n >= NVME_URING_ENTRIES ) {
379- ret = nvme_uring_cmd_wait_complete (& ring , n );
380- if (ret )
381- goto uring_exit ;
382- n = 0 ;
383- }
384- n += 1 ;
385- ret = nvme_uring_cmd_admin_passthru_async (hdl ,
386- & ring , cmd );
387- if (ret )
388- goto uring_exit ;
389- } else {
272+ if (hdl -> uring_enabled )
273+ ret = nvme_submit_admin_passthru_async (hdl , cmd );
274+ else
390275 ret = nvme_submit_admin_passthru (hdl , cmd );
391- if (ret )
392- return ret ;
393- }
394- #else /* CONFIG_LIBURING */
395- ret = nvme_submit_admin_passthru (hdl , cmd );
396- #endif /* CONFIG_LIBURING */
397276 if (ret )
398277 return ret ;
399278
400279 offset += xfer ;
401280 ptr += xfer ;
402281 } while (offset < data_len );
403282
404- #ifdef CONFIG_LIBURING
405- if (use_uring ) {
406- ret = nvme_uring_cmd_wait_complete (& ring , n );
407- uring_exit :
408- nvme_uring_cmd_exit (& ring );
283+ if (hdl -> uring_enabled ) {
284+ ret = nvme_wait_complete_passthru (hdl );
409285 if (ret )
410286 return ret ;
411287 }
412- #endif /* CONFIG_LIBURING */
413288
414289 return 0 ;
415290}
0 commit comments