@@ -75,15 +75,21 @@ struct io_sr_msg {
7575 u16 flags ;
7676 /* initialised and used only by !msg send variants */
7777 u16 buf_group ;
78- unsigned short retry_flags ;
7978 void __user * msg_control ;
8079 /* used only for send zerocopy */
8180 struct io_kiocb * notif ;
8281};
8382
83+ /*
84+ * The UAPI flags are the lower 8 bits, as that's all sqe->ioprio will hold
85+ * anyway. Use the upper 8 bits for internal uses.
86+ */
8487enum sr_retry_flags {
85- IO_SR_MSG_RETRY = 1 ,
86- IO_SR_MSG_PARTIAL_MAP = 2 ,
88+ IORING_RECV_RETRY = (1U << 15 ),
89+ IORING_RECV_PARTIAL_MAP = (1U << 14 ),
90+
91+ IORING_RECV_RETRY_CLEAR = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP ,
92+ IORING_RECV_NO_RETRY = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP ,
8793};
8894
8995/*
@@ -192,7 +198,7 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,
192198
193199 req -> flags &= ~REQ_F_BL_EMPTY ;
194200 sr -> done_io = 0 ;
195- sr -> retry_flags = 0 ;
201+ sr -> flags &= ~ IORING_RECV_RETRY_CLEAR ;
196202 sr -> len = 0 ; /* get from the provided buffer */
197203}
198204
@@ -402,7 +408,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
402408 struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
403409
404410 sr -> done_io = 0 ;
405- sr -> retry_flags = 0 ;
406411 sr -> len = READ_ONCE (sqe -> len );
407412 sr -> flags = READ_ONCE (sqe -> ioprio );
408413 if (sr -> flags & ~SENDMSG_FLAGS )
@@ -756,7 +761,6 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
756761 struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
757762
758763 sr -> done_io = 0 ;
759- sr -> retry_flags = 0 ;
760764
761765 if (unlikely (sqe -> file_index || sqe -> addr2 ))
762766 return - EINVAL ;
@@ -828,7 +832,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
828832
829833 cflags |= io_put_kbufs (req , this_ret , io_bundle_nbufs (kmsg , this_ret ),
830834 issue_flags );
831- if (sr -> retry_flags & IO_SR_MSG_RETRY )
835+ if (sr -> flags & IORING_RECV_RETRY )
832836 cflags = req -> cqe .flags | (cflags & CQE_F_MASK );
833837 /* bundle with no more immediate buffers, we're done */
834838 if (req -> flags & REQ_F_BL_EMPTY )
@@ -837,12 +841,13 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
837841 * If more is available AND it was a full transfer, retry and
838842 * append to this one
839843 */
840- if (!sr -> retry_flags && kmsg -> msg .msg_inq > 1 && this_ret > 0 &&
844+ if (!(sr -> flags & IORING_RECV_NO_RETRY ) &&
845+ kmsg -> msg .msg_inq > 1 && this_ret > 0 &&
841846 !iov_iter_count (& kmsg -> msg .msg_iter )) {
842847 req -> cqe .flags = cflags & ~CQE_F_MASK ;
843848 sr -> len = kmsg -> msg .msg_inq ;
844849 sr -> done_io += this_ret ;
845- sr -> retry_flags |= IO_SR_MSG_RETRY ;
850+ sr -> flags |= IORING_RECV_RETRY ;
846851 return false;
847852 }
848853 } else {
@@ -1088,7 +1093,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
10881093 req -> flags |= REQ_F_NEED_CLEANUP ;
10891094 }
10901095 if (arg .partial_map )
1091- sr -> retry_flags |= IO_SR_MSG_PARTIAL_MAP ;
1096+ sr -> flags |= IORING_RECV_PARTIAL_MAP ;
10921097
10931098 /* special case 1 vec, can be a fast path */
10941099 if (ret == 1 ) {
@@ -1283,7 +1288,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
12831288 int ret ;
12841289
12851290 zc -> done_io = 0 ;
1286- zc -> retry_flags = 0 ;
12871291
12881292 if (unlikely (READ_ONCE (sqe -> __pad2 [0 ]) || READ_ONCE (sqe -> addr3 )))
12891293 return - EINVAL ;
0 commit comments