Skip to content

Commit bd408c3

Browse files
jankaragregkh
authored andcommitted
writeback: Avoid softlockup when switching many inodes
[ Upstream commit 66c14dc ] process_inode_switch_wbs_work() can be switching over 100 inodes to a different cgroup. Since switching an inode requires counting all dirty & under-writeback pages in the address space of each inode, this can take a significant amount of time. Add a possibility to reschedule after processing each inode to avoid softlockups. Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Christian Brauner <brauner@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 4bdabd5 commit bd408c3

File tree

1 file changed

+10
-1
lines changed

1 file changed

+10
-1
lines changed

fs/fs-writeback.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -503,6 +503,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
503503
*/
504504
down_read(&bdi->wb_switch_rwsem);
505505

506+
inodep = isw->inodes;
506507
/*
507508
* By the time control reaches here, RCU grace period has passed
508509
* since I_WB_SWITCH assertion and all wb stat update transactions
@@ -513,6 +514,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
513514
* gives us exclusion against all wb related operations on @inode
514515
* including IO list manipulations and stat updates.
515516
*/
517+
relock:
516518
if (old_wb < new_wb) {
517519
spin_lock(&old_wb->list_lock);
518520
spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
@@ -521,10 +523,17 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
521523
spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
522524
}
523525

524-
for (inodep = isw->inodes; *inodep; inodep++) {
526+
while (*inodep) {
525527
WARN_ON_ONCE((*inodep)->i_wb != old_wb);
526528
if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
527529
nr_switched++;
530+
inodep++;
531+
if (*inodep && need_resched()) {
532+
spin_unlock(&new_wb->list_lock);
533+
spin_unlock(&old_wb->list_lock);
534+
cond_resched();
535+
goto relock;
536+
}
528537
}
529538

530539
spin_unlock(&new_wb->list_lock);

0 commit comments

Comments
 (0)