@@ -203,6 +203,40 @@ static struct tsync_work *tsync_works_provide(struct tsync_works *s,
203203 return ctx ;
204204}
205205
206+ /**
207+ * tsync_works_trim - Put the last tsync_work element
208+ *
209+ * @s: TSYNC works to trim.
210+ *
211+ * Put the last task and decrement the size of @s.
212+ *
213+ * This helper does not cancel a running task, but just reset the last element
214+ * to zero.
215+ */
216+ static void tsync_works_trim (struct tsync_works * s )
217+ {
218+ struct tsync_work * ctx ;
219+
220+ if (WARN_ON_ONCE (s -> size <= 0 ))
221+ return ;
222+
223+ ctx = s -> works [s -> size - 1 ];
224+
225+ /*
226+ * For consistency, remove the task from ctx so that it does not look like
227+ * we handed it a task_work.
228+ */
229+ put_task_struct (ctx -> task );
230+ * ctx = (typeof (* ctx )){};
231+
232+ /*
233+ * Cancel the tsync_works_provide() change to recycle the reserved memory
234+ * for the next thread, if any. This also ensures that cancel_tsync_works()
235+ * and tsync_works_release() do not see any NULL task pointers.
236+ */
237+ s -> size -- ;
238+ }
239+
206240/*
207241 * tsync_works_grow_by - preallocates space for n more contexts in s
208242 *
@@ -256,13 +290,14 @@ static int tsync_works_grow_by(struct tsync_works *s, size_t n, gfp_t flags)
256290 * tsync_works_contains - checks for presence of task in s
257291 */
258292static bool tsync_works_contains_task (const struct tsync_works * s ,
259- struct task_struct * task )
293+ const struct task_struct * task )
260294{
261295 size_t i ;
262296
263297 for (i = 0 ; i < s -> size ; i ++ )
264298 if (s -> works [i ]-> task == task )
265299 return true;
300+
266301 return false;
267302}
268303
@@ -276,14 +311,15 @@ static void tsync_works_release(struct tsync_works *s)
276311 size_t i ;
277312
278313 for (i = 0 ; i < s -> size ; i ++ ) {
279- if (!s -> works [i ]-> task )
314+ if (WARN_ON_ONCE ( !s -> works [i ]-> task ) )
280315 continue ;
281316
282317 put_task_struct (s -> works [i ]-> task );
283318 }
284319
285320 for (i = 0 ; i < s -> capacity ; i ++ )
286321 kfree (s -> works [i ]);
322+
287323 kfree (s -> works );
288324 s -> works = NULL ;
289325 s -> size = 0 ;
@@ -295,7 +331,7 @@ static void tsync_works_release(struct tsync_works *s)
295331 */
296332static size_t count_additional_threads (const struct tsync_works * works )
297333{
298- struct task_struct * thread , * caller ;
334+ const struct task_struct * caller , * thread ;
299335 size_t n = 0 ;
300336
301337 caller = current ;
@@ -334,7 +370,8 @@ static bool schedule_task_work(struct tsync_works *works,
334370 struct tsync_shared_context * shared_ctx )
335371{
336372 int err ;
337- struct task_struct * thread , * caller ;
373+ const struct task_struct * caller ;
374+ struct task_struct * thread ;
338375 struct tsync_work * ctx ;
339376 bool found_more_threads = false;
340377
@@ -379,16 +416,14 @@ static bool schedule_task_work(struct tsync_works *works,
379416
380417 init_task_work (& ctx -> work , restrict_one_thread_callback );
381418 err = task_work_add (thread , & ctx -> work , TWA_SIGNAL );
382- if (err ) {
419+ if (unlikely ( err ) ) {
383420 /*
384421 * task_work_add() only fails if the task is about to exit. We
385422 * checked that earlier, but it can happen as a race. Resume
386423 * without setting an error, as the task is probably gone in the
387- * next loop iteration. For consistency, remove the task from ctx
388- * so that it does not look like we handed it a task_work.
424+ * next loop iteration.
389425 */
390- put_task_struct (ctx -> task );
391- ctx -> task = NULL ;
426+ tsync_works_trim (works );
392427
393428 atomic_dec (& shared_ctx -> num_preparing );
394429 atomic_dec (& shared_ctx -> num_unfinished );
@@ -406,12 +441,15 @@ static bool schedule_task_work(struct tsync_works *works,
406441 * shared_ctx->num_preparing and shared_ctx->num_unfished and mark the two
407442 * completions if needed, as if the task was never scheduled.
408443 */
409- static void cancel_tsync_works (struct tsync_works * works ,
444+ static void cancel_tsync_works (const struct tsync_works * works ,
410445 struct tsync_shared_context * shared_ctx )
411446{
412- int i ;
447+ size_t i ;
413448
414449 for (i = 0 ; i < works -> size ; i ++ ) {
450+ if (WARN_ON_ONCE (!works -> works [i ]-> task ))
451+ continue ;
452+
415453 if (!task_work_cancel (works -> works [i ]-> task ,
416454 & works -> works [i ]-> work ))
417455 continue ;
@@ -447,6 +485,16 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred,
447485 shared_ctx .new_cred = new_cred ;
448486 shared_ctx .set_no_new_privs = task_no_new_privs (current );
449487
488+ /*
489+ * Serialize concurrent TSYNC operations to prevent deadlocks when
490+ * multiple threads call landlock_restrict_self() simultaneously.
491+ * If the lock is already held, we gracefully yield by restarting the
492+ * syscall. This allows the current thread to process pending
493+ * task_works before retrying.
494+ */
495+ if (!down_write_trylock (& current -> signal -> exec_update_lock ))
496+ return restart_syscall ();
497+
450498 /*
451499 * We schedule a pseudo-signal task_work for each of the calling task's
452500 * sibling threads. In the task work, each thread:
@@ -527,24 +575,30 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred,
527575 - ERESTARTNOINTR );
528576
529577 /*
530- * Cancel task works for tasks that did not start running yet,
531- * and decrement all_prepared and num_unfinished accordingly.
578+ * Opportunistic improvement: try to cancel task
579+ * works for tasks that did not start running
580+ * yet. We do not have a guarantee that it
581+ * cancels any of the enqueued task works
582+ * because task_work_run() might already have
583+ * dequeued them.
532584 */
533585 cancel_tsync_works (& works , & shared_ctx );
534586
535587 /*
536- * The remaining task works have started running, so waiting for
537- * their completion will finish.
588+ * Break the loop with error. The cleanup code
589+ * after the loop unblocks the remaining
590+ * task_works.
538591 */
539- wait_for_completion ( & shared_ctx . all_prepared ) ;
592+ break ;
540593 }
541594 }
542595 } while (found_more_threads &&
543596 !atomic_read (& shared_ctx .preparation_error ));
544597
545598 /*
546- * We now have all sibling threads blocking and in "prepared" state in the
547- * task work. Ask all threads to commit.
599+ * We now have either (a) all sibling threads blocking and in "prepared"
600+ * state in the task work, or (b) the preparation error is set. Ask all
601+ * threads to commit (or abort).
548602 */
549603 complete_all (& shared_ctx .ready_to_commit );
550604
@@ -556,6 +610,6 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred,
556610 wait_for_completion (& shared_ctx .all_finished );
557611
558612 tsync_works_release (& works );
559-
613+ up_write ( & current -> signal -> exec_update_lock );
560614 return atomic_read (& shared_ctx .preparation_error );
561615}
0 commit comments