Skip to content

Commit 7b6d325

Browse files
Yang Xiuweiaxboe
authored andcommitted
scsi: bsg: add io_uring passthrough handler
Implement the SCSI-specific io_uring command handler for BSG using struct bsg_uring_cmd. The handler builds a SCSI request from the io_uring command, maps user buffers (including fixed buffers), and completes asynchronously via a request end_io callback and task_work. Completion returns a 32-bit status and packed residual/sense information via CQE res and res2, and supports IO_URING_F_NONBLOCK. Signed-off-by: Yang Xiuwei <[email protected]> Reviewed-by: Bart Van Assche <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent a1e97ce commit 7b6d325

1 file changed

Lines changed: 167 additions & 1 deletion

File tree

drivers/scsi/scsi_bsg.c

Lines changed: 167 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,176 @@
1010

1111
#define uptr64(val) ((void __user *)(uintptr_t)(val))
1212

13+
/*
14+
* Per-command BSG SCSI PDU stored in io_uring_cmd.pdu[32].
15+
* Holds temporary state between submission, completion and task_work.
16+
*/
17+
struct scsi_bsg_uring_cmd_pdu {
18+
struct bio *bio; /* mapped user buffer, unmap in task work */
19+
struct request *req; /* block request, freed in task work */
20+
u64 response_addr; /* user space response buffer address */
21+
};
22+
static_assert(sizeof(struct scsi_bsg_uring_cmd_pdu) <= sizeof_field(struct io_uring_cmd, pdu));
23+
24+
static inline struct scsi_bsg_uring_cmd_pdu *scsi_bsg_uring_cmd_pdu(
25+
struct io_uring_cmd *ioucmd)
26+
{
27+
return io_uring_cmd_to_pdu(ioucmd, struct scsi_bsg_uring_cmd_pdu);
28+
}
29+
30+
/* Task work: build res2 (layout in uapi/linux/bsg.h) and copy sense to user. */
31+
static void scsi_bsg_uring_task_cb(struct io_tw_req tw_req, io_tw_token_t tw)
32+
{
33+
struct io_uring_cmd *ioucmd = io_uring_cmd_from_tw(tw_req);
34+
struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd);
35+
struct request *rq = pdu->req;
36+
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
37+
u64 res2;
38+
int ret = 0;
39+
u8 driver_status = 0;
40+
u8 sense_len_wr = 0;
41+
42+
if (pdu->bio)
43+
blk_rq_unmap_user(pdu->bio);
44+
45+
if (scsi_status_is_check_condition(scmd->result)) {
46+
driver_status = DRIVER_SENSE;
47+
if (pdu->response_addr)
48+
sense_len_wr = min_t(u8, scmd->sense_len,
49+
SCSI_SENSE_BUFFERSIZE);
50+
}
51+
52+
if (sense_len_wr) {
53+
if (copy_to_user(uptr64(pdu->response_addr), scmd->sense_buffer,
54+
sense_len_wr))
55+
ret = -EFAULT;
56+
}
57+
58+
res2 = bsg_scsi_res2_build(status_byte(scmd->result), driver_status,
59+
host_byte(scmd->result), sense_len_wr,
60+
scmd->resid_len);
61+
62+
blk_mq_free_request(rq);
63+
io_uring_cmd_done32(ioucmd, ret, res2,
64+
IO_URING_CMD_TASK_WORK_ISSUE_FLAGS);
65+
}
66+
67+
static enum rq_end_io_ret scsi_bsg_uring_cmd_done(struct request *req,
68+
blk_status_t status,
69+
const struct io_comp_batch *iocb)
70+
{
71+
struct io_uring_cmd *ioucmd = req->end_io_data;
72+
73+
io_uring_cmd_do_in_task_lazy(ioucmd, scsi_bsg_uring_task_cb);
74+
return RQ_END_IO_NONE;
75+
}
76+
77+
static int scsi_bsg_map_user_buffer(struct request *req,
78+
struct io_uring_cmd *ioucmd,
79+
unsigned int issue_flags, gfp_t gfp_mask)
80+
{
81+
const struct bsg_uring_cmd *cmd = io_uring_sqe128_cmd(ioucmd->sqe, struct bsg_uring_cmd);
82+
bool is_write = cmd->dout_xfer_len > 0;
83+
u64 buf_addr = is_write ? cmd->dout_xferp : cmd->din_xferp;
84+
unsigned long buf_len = is_write ? cmd->dout_xfer_len : cmd->din_xfer_len;
85+
struct iov_iter iter;
86+
int ret;
87+
88+
if (ioucmd->flags & IORING_URING_CMD_FIXED) {
89+
ret = io_uring_cmd_import_fixed(buf_addr, buf_len,
90+
is_write ? WRITE : READ,
91+
&iter, ioucmd, issue_flags);
92+
if (ret < 0)
93+
return ret;
94+
ret = blk_rq_map_user_iov(req->q, req, NULL, &iter, gfp_mask);
95+
} else {
96+
ret = blk_rq_map_user(req->q, req, NULL, uptr64(buf_addr),
97+
buf_len, gfp_mask);
98+
}
99+
100+
return ret;
101+
}
102+
13103
static int scsi_bsg_uring_cmd(struct request_queue *q, struct io_uring_cmd *ioucmd,
14104
unsigned int issue_flags, bool open_for_write)
15105
{
16-
return -EOPNOTSUPP;
106+
struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd);
107+
const struct bsg_uring_cmd *cmd = io_uring_sqe128_cmd(ioucmd->sqe, struct bsg_uring_cmd);
108+
struct scsi_cmnd *scmd;
109+
struct request *req;
110+
blk_mq_req_flags_t blk_flags = 0;
111+
gfp_t gfp_mask = GFP_KERNEL;
112+
int ret;
113+
114+
if (cmd->protocol != BSG_PROTOCOL_SCSI ||
115+
cmd->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
116+
return -EINVAL;
117+
118+
if (!cmd->request || cmd->request_len == 0)
119+
return -EINVAL;
120+
121+
if (cmd->dout_xfer_len && cmd->din_xfer_len) {
122+
pr_warn_once("BIDI support in bsg has been removed.\n");
123+
return -EOPNOTSUPP;
124+
}
125+
126+
if (cmd->dout_iovec_count > 0 || cmd->din_iovec_count > 0)
127+
return -EOPNOTSUPP;
128+
129+
if (issue_flags & IO_URING_F_NONBLOCK) {
130+
blk_flags = BLK_MQ_REQ_NOWAIT;
131+
gfp_mask = GFP_NOWAIT;
132+
}
133+
134+
req = scsi_alloc_request(q, cmd->dout_xfer_len ?
135+
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, blk_flags);
136+
if (IS_ERR(req))
137+
return PTR_ERR(req);
138+
139+
scmd = blk_mq_rq_to_pdu(req);
140+
scmd->cmd_len = cmd->request_len;
141+
if (scmd->cmd_len > sizeof(scmd->cmnd)) {
142+
ret = -EINVAL;
143+
goto out_free_req;
144+
}
145+
scmd->allowed = SG_DEFAULT_RETRIES;
146+
147+
if (copy_from_user(scmd->cmnd, uptr64(cmd->request), cmd->request_len)) {
148+
ret = -EFAULT;
149+
goto out_free_req;
150+
}
151+
152+
if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) {
153+
ret = -EPERM;
154+
goto out_free_req;
155+
}
156+
157+
pdu->response_addr = cmd->response;
158+
scmd->sense_len = cmd->max_response_len ?
159+
min(cmd->max_response_len, SCSI_SENSE_BUFFERSIZE) : SCSI_SENSE_BUFFERSIZE;
160+
161+
if (cmd->dout_xfer_len || cmd->din_xfer_len) {
162+
ret = scsi_bsg_map_user_buffer(req, ioucmd, issue_flags, gfp_mask);
163+
if (ret)
164+
goto out_free_req;
165+
pdu->bio = req->bio;
166+
} else {
167+
pdu->bio = NULL;
168+
}
169+
170+
req->timeout = cmd->timeout_ms ?
171+
msecs_to_jiffies(cmd->timeout_ms) : BLK_DEFAULT_SG_TIMEOUT;
172+
173+
req->end_io = scsi_bsg_uring_cmd_done;
174+
req->end_io_data = ioucmd;
175+
pdu->req = req;
176+
177+
blk_execute_rq_nowait(req, false);
178+
return -EIOCBQUEUED;
179+
180+
out_free_req:
181+
blk_mq_free_request(req);
182+
return ret;
17183
}
18184

19185
static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,

0 commit comments

Comments
 (0)