Skip to content

Commit 6d3563a

Browse files
jankaragregkh
authored andcommitted
writeback: Avoid softlockup when switching many inodes
[ Upstream commit 66c14dccd810d42ec5c73bb8a9177489dfd62278 ] process_inode_switch_wbs_work() can be switching over 100 inodes to a different cgroup. Since switching an inode requires counting all dirty & under-writeback pages in the address space of each inode, this can take a significant amount of time. Add a possibility to reschedule after processing each inode to avoid softlockups. Acked-by: Tejun Heo <[email protected]> Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Christian Brauner <[email protected]> Signed-off-by: Sasha Levin <[email protected]>
1 parent 7381cd1 commit 6d3563a

1 file changed

Lines changed: 10 additions & 1 deletion

File tree

fs/fs-writeback.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -502,6 +502,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
502502
*/
503503
down_read(&bdi->wb_switch_rwsem);
504504

505+
inodep = isw->inodes;
505506
/*
506507
* By the time control reaches here, RCU grace period has passed
507508
* since I_WB_SWITCH assertion and all wb stat update transactions
@@ -512,6 +513,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
512513
* gives us exclusion against all wb related operations on @inode
513514
* including IO list manipulations and stat updates.
514515
*/
516+
relock:
515517
if (old_wb < new_wb) {
516518
spin_lock(&old_wb->list_lock);
517519
spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
@@ -520,10 +522,17 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
520522
spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
521523
}
522524

523-
for (inodep = isw->inodes; *inodep; inodep++) {
525+
while (*inodep) {
524526
WARN_ON_ONCE((*inodep)->i_wb != old_wb);
525527
if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
526528
nr_switched++;
529+
inodep++;
530+
if (*inodep && need_resched()) {
531+
spin_unlock(&new_wb->list_lock);
532+
spin_unlock(&old_wb->list_lock);
533+
cond_resched();
534+
goto relock;
535+
}
527536
}
528537

529538
spin_unlock(&new_wb->list_lock);

0 commit comments

Comments
 (0)