Skip to content

Commit 9f6481c

Browse files
Ming Leikawasaki
authored andcommitted
ublk: move auto buffer register handling into one dedicated helper
Move check & clearing UBLK_IO_FLAG_AUTO_BUF_REG to ublk_handle_auto_buf_reg(), also return buffer index from this helper. Also move ublk_set_auto_buf_reg() to this single helper too. Add ublk_config_io_buf() for setting up ublk io buffer, covers both ublk buffer copy or auto buffer register. Signed-off-by: Ming Lei <[email protected]> Reviewed-by: Caleb Sander Mateos <[email protected]>
1 parent 0d50b24 commit 9f6481c

1 file changed

Lines changed: 75 additions & 56 deletions

File tree

drivers/block/ublk_drv.c

Lines changed: 75 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@
4848

4949
#define UBLK_MINORS (1U << MINORBITS)
5050

51+
#define UBLK_INVALID_BUF_IDX ((u16)-1)
52+
5153
/* private ioctl command mirror */
5254
#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
5355
#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE)
@@ -2003,16 +2005,52 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
20032005
return 0;
20042006
}
20052007

2008+
static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd)
2009+
{
2010+
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
2011+
2012+
pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
2013+
2014+
if (pdu->buf.reserved0 || pdu->buf.reserved1)
2015+
return -EINVAL;
2016+
2017+
if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
2018+
return -EINVAL;
2019+
return 0;
2020+
}
2021+
2022+
static int ublk_handle_auto_buf_reg(struct ublk_io *io,
2023+
struct io_uring_cmd *cmd,
2024+
u16 *buf_idx)
2025+
{
2026+
if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
2027+
io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
2028+
2029+
/*
2030+
* `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
2031+
* and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
2032+
* `io_ring_ctx`.
2033+
*
2034+
* If this uring_cmd's io_ring_ctx isn't same with the
2035+
* one for registering the buffer, it is ublk server's
2036+
* responsibility for unregistering the buffer, otherwise
2037+
* this ublk request gets stuck.
2038+
*/
2039+
if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
2040+
*buf_idx = io->buf_index;
2041+
}
2042+
2043+
return ublk_set_auto_buf_reg(cmd);
2044+
}
2045+
20062046
/* Once we return, `io->req` can't be used any more */
20072047
static inline struct request *
2008-
ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd,
2009-
unsigned long buf_addr, int result)
2048+
ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd, int result)
20102049
{
20112050
struct request *req = io->req;
20122051

20132052
io->cmd = cmd;
20142053
io->flags |= UBLK_IO_FLAG_ACTIVE;
2015-
io->addr = buf_addr;
20162054
io->res = result;
20172055

20182056
/* now this cmd slot is owned by ublk driver */
@@ -2021,6 +2059,22 @@ ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd,
20212059
return req;
20222060
}
20232061

2062+
static inline int
2063+
ublk_config_io_buf(const struct ublk_queue *ubq, struct ublk_io *io,
2064+
struct io_uring_cmd *cmd, unsigned long buf_addr,
2065+
u16 *buf_idx)
2066+
{
2067+
if (ublk_support_auto_buf_reg(ubq)) {
2068+
int ret = ublk_handle_auto_buf_reg(io, cmd, buf_idx);
2069+
2070+
if (ret)
2071+
return ret;
2072+
} else {
2073+
io->addr = buf_addr;
2074+
}
2075+
return 0;
2076+
}
2077+
20242078
static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
20252079
unsigned int issue_flags,
20262080
struct ublk_queue *ubq, unsigned int tag)
@@ -2036,20 +2090,6 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
20362090
io_uring_cmd_mark_cancelable(cmd, issue_flags);
20372091
}
20382092

2039-
static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd)
2040-
{
2041-
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
2042-
2043-
pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
2044-
2045-
if (pdu->buf.reserved0 || pdu->buf.reserved1)
2046-
return -EINVAL;
2047-
2048-
if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
2049-
return -EINVAL;
2050-
return 0;
2051-
}
2052-
20532093
static void ublk_io_release(void *priv)
20542094
{
20552095
struct request *rq = priv;
@@ -2170,13 +2210,11 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
21702210
goto out;
21712211
}
21722212

2173-
if (ublk_support_auto_buf_reg(ubq)) {
2174-
ret = ublk_set_auto_buf_reg(cmd);
2175-
if (ret)
2176-
goto out;
2177-
}
2213+
ublk_fill_io_cmd(io, cmd, 0);
2214+
ret = ublk_config_io_buf(ubq, io, cmd, buf_addr, NULL);
2215+
if (ret)
2216+
goto out;
21782217

2179-
ublk_fill_io_cmd(io, cmd, buf_addr, 0);
21802218
WRITE_ONCE(io->task, get_task_struct(current));
21812219
ublk_mark_io_ready(ub, ubq);
21822220
out:
@@ -2208,35 +2246,13 @@ static int ublk_check_commit_and_fetch(const struct ublk_queue *ubq,
22082246
return 0;
22092247
}
22102248

2211-
static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
2212-
struct ublk_io *io, struct io_uring_cmd *cmd,
2213-
struct request *req, unsigned int issue_flags,
2214-
__u64 zone_append_lba)
2249+
static void ublk_commit_and_fetch(const struct ublk_queue *ubq,
2250+
struct ublk_io *io, struct io_uring_cmd *cmd,
2251+
struct request *req, unsigned int issue_flags,
2252+
__u64 zone_append_lba, u16 buf_idx)
22152253
{
2216-
if (ublk_support_auto_buf_reg(ubq)) {
2217-
int ret;
2218-
2219-
/*
2220-
* `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
2221-
* and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
2222-
* `io_ring_ctx`.
2223-
*
2224-
* If this uring_cmd's io_ring_ctx isn't same with the
2225-
* one for registering the buffer, it is ublk server's
2226-
* responsibility for unregistering the buffer, otherwise
2227-
* this ublk request gets stuck.
2228-
*/
2229-
if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
2230-
if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
2231-
io_buffer_unregister_bvec(cmd, io->buf_index,
2232-
issue_flags);
2233-
io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
2234-
}
2235-
2236-
ret = ublk_set_auto_buf_reg(cmd);
2237-
if (ret)
2238-
return ret;
2239-
}
2254+
if (buf_idx != UBLK_INVALID_BUF_IDX)
2255+
io_buffer_unregister_bvec(cmd, buf_idx, issue_flags);
22402256

22412257
if (req_op(req) == REQ_OP_ZONE_APPEND)
22422258
req->__sector = zone_append_lba;
@@ -2245,7 +2261,6 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
22452261
ublk_sub_req_ref(io, req);
22462262
else
22472263
__ublk_complete_rq(req);
2248-
return 0;
22492264
}
22502265

22512266
static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
@@ -2270,6 +2285,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
22702285
unsigned int issue_flags,
22712286
const struct ublksrv_io_cmd *ub_cmd)
22722287
{
2288+
u16 buf_idx = UBLK_INVALID_BUF_IDX;
22732289
struct ublk_device *ub = cmd->file->private_data;
22742290
struct ublk_queue *ubq;
22752291
struct ublk_io *io;
@@ -2348,9 +2364,10 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
23482364
ret = ublk_check_commit_and_fetch(ubq, io, ub_cmd->addr);
23492365
if (ret)
23502366
goto out;
2351-
req = ublk_fill_io_cmd(io, cmd, ub_cmd->addr, ub_cmd->result);
2352-
ret = ublk_commit_and_fetch(ubq, io, cmd, req, issue_flags,
2353-
ub_cmd->zone_append_lba);
2367+
req = ublk_fill_io_cmd(io, cmd, ub_cmd->result);
2368+
ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, &buf_idx);
2369+
ublk_commit_and_fetch(ubq, io, cmd, req, issue_flags,
2370+
ub_cmd->zone_append_lba, buf_idx);
23542371
if (ret)
23552372
goto out;
23562373
break;
@@ -2360,7 +2377,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
23602377
* uring_cmd active first and prepare for handling new requeued
23612378
* request
23622379
*/
2363-
req = ublk_fill_io_cmd(io, cmd, ub_cmd->addr, 0);
2380+
req = ublk_fill_io_cmd(io, cmd, 0);
2381+
ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, NULL);
2382+
WARN_ON_ONCE(ret);
23642383
if (likely(ublk_get_data(ubq, io, req))) {
23652384
__ublk_prep_compl_io_cmd(io, req);
23662385
return UBLK_IO_RES_OK;

0 commit comments

Comments
 (0)