Skip to content

Commit 232ed3c

Browse files
committed
Merge branch 'for-6.17/io_uring' into for-next
* for-6.17/io_uring: io_uring/net: allow multishot receive per-invocation cap io_uring/net: move io_sr_msg->retry_flags to io_sr_msg->flags io_uring/net: use passed in 'len' in io_recv_buf_select()
2 parents f4ca523 + 6a8afb9 commit 232ed3c

1 file changed

Lines changed: 32 additions & 17 deletions

File tree

io_uring/net.c

Lines changed: 32 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -75,15 +75,24 @@ struct io_sr_msg {
7575
u16 flags;
7676
/* initialised and used only by !msg send variants */
7777
u16 buf_group;
78-
unsigned short retry_flags;
78+
unsigned mshot_len;
7979
void __user *msg_control;
8080
/* used only for send zerocopy */
8181
struct io_kiocb *notif;
8282
};
8383

84+
/*
85+
* The UAPI flags are the lower 8 bits, as that's all sqe->ioprio will hold
86+
* anyway. Use the upper 8 bits for internal uses.
87+
*/
8488
enum sr_retry_flags {
85-
IO_SR_MSG_RETRY = 1,
86-
IO_SR_MSG_PARTIAL_MAP = 2,
89+
IORING_RECV_RETRY = (1U << 15),
90+
IORING_RECV_PARTIAL_MAP = (1U << 14),
91+
IORING_RECV_MSHOT_CAP = (1U << 13),
92+
93+
IORING_RECV_RETRY_CLEAR = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP,
94+
IORING_RECV_NO_RETRY = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP |
95+
IORING_RECV_MSHOT_CAP,
8796
};
8897

8998
/*
@@ -192,8 +201,8 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,
192201

193202
req->flags &= ~REQ_F_BL_EMPTY;
194203
sr->done_io = 0;
195-
sr->retry_flags = 0;
196-
sr->len = 0; /* get from the provided buffer */
204+
sr->flags &= ~IORING_RECV_RETRY_CLEAR;
205+
sr->len = sr->mshot_len;
197206
}
198207

199208
static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg,
@@ -402,7 +411,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
402411
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
403412

404413
sr->done_io = 0;
405-
sr->retry_flags = 0;
406414
sr->len = READ_ONCE(sqe->len);
407415
sr->flags = READ_ONCE(sqe->ioprio);
408416
if (sr->flags & ~SENDMSG_FLAGS)
@@ -756,7 +764,6 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
756764
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
757765

758766
sr->done_io = 0;
759-
sr->retry_flags = 0;
760767

761768
if (unlikely(sqe->file_index || sqe->addr2))
762769
return -EINVAL;
@@ -783,13 +790,14 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
783790
sr->buf_group = req->buf_index;
784791
req->buf_list = NULL;
785792
}
793+
sr->mshot_len = 0;
786794
if (sr->flags & IORING_RECV_MULTISHOT) {
787795
if (!(req->flags & REQ_F_BUFFER_SELECT))
788796
return -EINVAL;
789797
if (sr->msg_flags & MSG_WAITALL)
790798
return -EINVAL;
791-
if (req->opcode == IORING_OP_RECV && sr->len)
792-
return -EINVAL;
799+
if (req->opcode == IORING_OP_RECV)
800+
sr->mshot_len = sr->len;
793801
req->flags |= REQ_F_APOLL_MULTISHOT;
794802
}
795803
if (sr->flags & IORING_RECVSEND_BUNDLE) {
@@ -828,21 +836,24 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
828836

829837
cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret),
830838
issue_flags);
831-
if (sr->retry_flags & IO_SR_MSG_RETRY)
839+
if (sr->flags & IORING_RECV_RETRY)
832840
cflags = req->cqe.flags | (cflags & CQE_F_MASK);
841+
if (sr->mshot_len && *ret >= sr->mshot_len)
842+
sr->flags |= IORING_RECV_MSHOT_CAP;
833843
/* bundle with no more immediate buffers, we're done */
834844
if (req->flags & REQ_F_BL_EMPTY)
835845
goto finish;
836846
/*
837847
* If more is available AND it was a full transfer, retry and
838848
* append to this one
839849
*/
840-
if (!sr->retry_flags && kmsg->msg.msg_inq > 1 && this_ret > 0 &&
850+
if (!(sr->flags & IORING_RECV_NO_RETRY) &&
851+
kmsg->msg.msg_inq > 1 && this_ret > 0 &&
841852
!iov_iter_count(&kmsg->msg.msg_iter)) {
842853
req->cqe.flags = cflags & ~CQE_F_MASK;
843854
sr->len = kmsg->msg.msg_inq;
844855
sr->done_io += this_ret;
845-
sr->retry_flags |= IO_SR_MSG_RETRY;
856+
sr->flags |= IORING_RECV_RETRY;
846857
return false;
847858
}
848859
} else {
@@ -859,10 +870,13 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
859870
io_mshot_prep_retry(req, kmsg);
860871
/* Known not-empty or unknown state, retry */
861872
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
862-
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
873+
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY &&
874+
!(sr->flags & IORING_RECV_MSHOT_CAP)) {
863875
return false;
876+
}
864877
/* mshot retries exceeded, force a requeue */
865878
sr->nr_multishot_loops = 0;
879+
sr->flags &= ~IORING_RECV_MSHOT_CAP;
866880
if (issue_flags & IO_URING_F_MULTISHOT)
867881
*ret = IOU_REQUEUE;
868882
}
@@ -1075,8 +1089,10 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
10751089
arg.mode |= KBUF_MODE_FREE;
10761090
}
10771091

1078-
if (kmsg->msg.msg_inq > 1)
1079-
arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
1092+
if (*len)
1093+
arg.max_len = *len;
1094+
else if (kmsg->msg.msg_inq > 1)
1095+
arg.max_len = min_not_zero(*len, kmsg->msg.msg_inq);
10801096

10811097
ret = io_buffers_peek(req, &arg);
10821098
if (unlikely(ret < 0))
@@ -1088,7 +1104,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
10881104
req->flags |= REQ_F_NEED_CLEANUP;
10891105
}
10901106
if (arg.partial_map)
1091-
sr->retry_flags |= IO_SR_MSG_PARTIAL_MAP;
1107+
sr->flags |= IORING_RECV_PARTIAL_MAP;
10921108

10931109
/* special case 1 vec, can be a fast path */
10941110
if (ret == 1) {
@@ -1283,7 +1299,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
12831299
int ret;
12841300

12851301
zc->done_io = 0;
1286-
zc->retry_flags = 0;
12871302

12881303
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
12891304
return -EINVAL;

0 commit comments

Comments
 (0)