@@ -75,12 +75,17 @@ struct io_sr_msg {
7575 u16 flags ;
7676 /* initialised and used only by !msg send variants */
7777 u16 buf_group ;
78- bool retry ;
78+ unsigned short retry_flags ;
7979 void __user * msg_control ;
8080 /* used only for send zerocopy */
8181 struct io_kiocb * notif ;
8282};
8383
84+ enum sr_retry_flags {
85+ IO_SR_MSG_RETRY = 1 ,
86+ IO_SR_MSG_PARTIAL_MAP = 2 ,
87+ };
88+
8489/*
8590 * Number of times we'll try and do receives if there's more data. If we
8691 * exceed this limit, then add us to the back of the queue and retry from
@@ -187,7 +192,7 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,
187192
188193 req -> flags &= ~REQ_F_BL_EMPTY ;
189194 sr -> done_io = 0 ;
190- sr -> retry = false ;
195+ sr -> retry_flags = 0 ;
191196 sr -> len = 0 ; /* get from the provided buffer */
192197}
193198
@@ -397,7 +402,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
397402 struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
398403
399404 sr -> done_io = 0 ;
400- sr -> retry = false ;
405+ sr -> retry_flags = 0 ;
401406 sr -> len = READ_ONCE (sqe -> len );
402407 sr -> flags = READ_ONCE (sqe -> ioprio );
403408 if (sr -> flags & ~SENDMSG_FLAGS )
@@ -751,7 +756,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
751756 struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
752757
753758 sr -> done_io = 0 ;
754- sr -> retry = false ;
759+ sr -> retry_flags = 0 ;
755760
756761 if (unlikely (sqe -> file_index || sqe -> addr2 ))
757762 return - EINVAL ;
@@ -823,7 +828,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
823828
824829 cflags |= io_put_kbufs (req , this_ret , io_bundle_nbufs (kmsg , this_ret ),
825830 issue_flags );
826- if (sr -> retry )
831+ if (sr -> retry_flags & IO_SR_MSG_RETRY )
827832 cflags = req -> cqe .flags | (cflags & CQE_F_MASK );
828833 /* bundle with no more immediate buffers, we're done */
829834 if (req -> flags & REQ_F_BL_EMPTY )
@@ -832,12 +837,12 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
832837 * If more is available AND it was a full transfer, retry and
833838 * append to this one
834839 */
835- if (!sr -> retry && kmsg -> msg .msg_inq > 1 && this_ret > 0 &&
840+ if (!sr -> retry_flags && kmsg -> msg .msg_inq > 1 && this_ret > 0 &&
836841 !iov_iter_count (& kmsg -> msg .msg_iter )) {
837842 req -> cqe .flags = cflags & ~CQE_F_MASK ;
838843 sr -> len = kmsg -> msg .msg_inq ;
839844 sr -> done_io += this_ret ;
840- sr -> retry = true ;
845+ sr -> retry_flags |= IO_SR_MSG_RETRY ;
841846 return false;
842847 }
843848 } else {
@@ -1077,6 +1082,14 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
10771082 if (unlikely (ret < 0 ))
10781083 return ret ;
10791084
1085+ if (arg .iovs != & kmsg -> fast_iov && arg .iovs != kmsg -> vec .iovec ) {
1086+ kmsg -> vec .nr = ret ;
1087+ kmsg -> vec .iovec = arg .iovs ;
1088+ req -> flags |= REQ_F_NEED_CLEANUP ;
1089+ }
1090+ if (arg .partial_map )
1091+ sr -> retry_flags |= IO_SR_MSG_PARTIAL_MAP ;
1092+
10801093 /* special case 1 vec, can be a fast path */
10811094 if (ret == 1 ) {
10821095 sr -> buf = arg .iovs [0 ].iov_base ;
@@ -1085,11 +1098,6 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
10851098 }
10861099 iov_iter_init (& kmsg -> msg .msg_iter , ITER_DEST , arg .iovs , ret ,
10871100 arg .out_len );
1088- if (arg .iovs != & kmsg -> fast_iov && arg .iovs != kmsg -> vec .iovec ) {
1089- kmsg -> vec .nr = ret ;
1090- kmsg -> vec .iovec = arg .iovs ;
1091- req -> flags |= REQ_F_NEED_CLEANUP ;
1092- }
10931101 } else {
10941102 void __user * buf ;
10951103
@@ -1275,7 +1283,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
12751283 int ret ;
12761284
12771285 zc -> done_io = 0 ;
1278- zc -> retry = false ;
1286+ zc -> retry_flags = 0 ;
12791287
12801288 if (unlikely (READ_ONCE (sqe -> __pad2 [0 ]) || READ_ONCE (sqe -> addr3 )))
12811289 return - EINVAL ;
0 commit comments