@@ -17,6 +17,7 @@ struct io_timeout {
1717 struct file * file ;
1818 u32 off ;
1919 u32 target_seq ;
20+ u32 repeats ;
2021 struct list_head list ;
2122 /* head of the link, used by linked timeouts only */
2223 struct io_kiocb * head ;
@@ -37,8 +38,9 @@ struct io_timeout_rem {
3738static inline bool io_is_timeout_noseq (struct io_kiocb * req )
3839{
3940 struct io_timeout * timeout = io_kiocb_to_cmd (req , struct io_timeout );
41+ struct io_timeout_data * data = req -> async_data ;
4042
41- return !timeout -> off ;
43+ return !timeout -> off || data -> flags & IORING_TIMEOUT_MULTISHOT ;
4244}
4345
4446static inline void io_put_req (struct io_kiocb * req )
@@ -49,6 +51,44 @@ static inline void io_put_req(struct io_kiocb *req)
4951 }
5052}
5153
54+ static inline bool io_timeout_finish (struct io_timeout * timeout ,
55+ struct io_timeout_data * data )
56+ {
57+ if (!(data -> flags & IORING_TIMEOUT_MULTISHOT ))
58+ return true;
59+
60+ if (!timeout -> off || (timeout -> repeats && -- timeout -> repeats ))
61+ return false;
62+
63+ return true;
64+ }
65+
66+ static enum hrtimer_restart io_timeout_fn (struct hrtimer * timer );
67+
68+ static void io_timeout_complete (struct io_kiocb * req , struct io_tw_state * ts )
69+ {
70+ struct io_timeout * timeout = io_kiocb_to_cmd (req , struct io_timeout );
71+ struct io_timeout_data * data = req -> async_data ;
72+ struct io_ring_ctx * ctx = req -> ctx ;
73+
74+ if (!io_timeout_finish (timeout , data )) {
75+ bool filled ;
76+ filled = io_aux_cqe (ctx , ts -> locked , req -> cqe .user_data , - ETIME ,
77+ IORING_CQE_F_MORE , false);
78+ if (filled ) {
79+ /* re-arm timer */
80+ spin_lock_irq (& ctx -> timeout_lock );
81+ list_add (& timeout -> list , ctx -> timeout_list .prev );
82+ data -> timer .function = io_timeout_fn ;
83+ hrtimer_start (& data -> timer , timespec64_to_ktime (data -> ts ), data -> mode );
84+ spin_unlock_irq (& ctx -> timeout_lock );
85+ return ;
86+ }
87+ }
88+
89+ io_req_task_complete (req , ts );
90+ }
91+
5292static bool io_kill_timeout (struct io_kiocb * req , int status )
5393 __must_hold (& req - > ctx - > timeout_lock )
5494{
@@ -212,7 +252,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
212252 req_set_fail (req );
213253
214254 io_req_set_res (req , - ETIME , 0 );
215- req -> io_task_work .func = io_req_task_complete ;
255+ req -> io_task_work .func = io_timeout_complete ;
216256 io_req_task_work_add (req );
217257 return HRTIMER_NORESTART ;
218258}
@@ -470,16 +510,27 @@ static int __io_timeout_prep(struct io_kiocb *req,
470510 return - EINVAL ;
471511 flags = READ_ONCE (sqe -> timeout_flags );
472512 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
473- IORING_TIMEOUT_ETIME_SUCCESS ))
513+ IORING_TIMEOUT_ETIME_SUCCESS |
514+ IORING_TIMEOUT_MULTISHOT ))
474515 return - EINVAL ;
475516 /* more than one clock specified is invalid, obviously */
476517 if (hweight32 (flags & IORING_TIMEOUT_CLOCK_MASK ) > 1 )
477518 return - EINVAL ;
519+ /* multishot requests only make sense with rel values */
520+ if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS )))
521+ return - EINVAL ;
478522
479523 INIT_LIST_HEAD (& timeout -> list );
480524 timeout -> off = off ;
481525 if (unlikely (off && !req -> ctx -> off_timeout_used ))
482526 req -> ctx -> off_timeout_used = true;
527+ /*
528+ * for multishot reqs w/ fixed nr of repeats, repeats tracks the
529+ * remaining nr
530+ */
531+ timeout -> repeats = 0 ;
532+ if ((flags & IORING_TIMEOUT_MULTISHOT ) && off > 0 )
533+ timeout -> repeats = off ;
483534
484535 if (WARN_ON_ONCE (req_has_async_data (req )))
485536 return - EFAULT ;
0 commit comments