Skip to content

Commit bf63e94

Browse files
writeback: avoid use-after-free after removing device
jira VULN-4108 cve CVE-2024-0562 commit-author Khazhismel Kumykov <[email protected]> commit f87904c When a disk is removed, bdi_unregister gets called to stop further writeback and wait for associated delayed work to complete. However, wb_inode_writeback_end() may schedule bandwidth estimation dwork after this has completed, which can result in the timer attempting to access the just freed bdi_writeback. Fix this by checking if the bdi_writeback is alive, similar to when scheduling writeback work. Since this requires wb->work_lock, and wb_inode_writeback_end() may get called from interrupt, switch wb->work_lock to an irqsafe lock. Link: https://lkml.kernel.org/r/[email protected] Fixes: 45a2966 ("writeback: fix bandwidth estimate for spiky workload") Signed-off-by: Khazhismel Kumykov <[email protected]> Reviewed-by: Jan Kara <[email protected]> Cc: Michael Stapelberg <[email protected]> Cc: Wu Fengguang <[email protected]> Cc: Alexander Viro <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> (cherry picked from commit f87904c) Signed-off-by: Pratham Patel <[email protected]>
1 parent 7a44a17 commit bf63e94

File tree

3 files changed

+16
-12
lines changed

3 files changed

+16
-12
lines changed

fs/fs-writeback.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -151,10 +151,10 @@ static void inode_io_list_del_locked(struct inode *inode,
151151

152152
static void wb_wakeup(struct bdi_writeback *wb)
153153
{
154-
spin_lock_bh(&wb->work_lock);
154+
spin_lock_irq(&wb->work_lock);
155155
if (test_bit(WB_registered, &wb->state))
156156
mod_delayed_work(bdi_wq, &wb->dwork, 0);
157-
spin_unlock_bh(&wb->work_lock);
157+
spin_unlock_irq(&wb->work_lock);
158158
}
159159

160160
static void finish_writeback_work(struct bdi_writeback *wb,
@@ -181,15 +181,15 @@ static void wb_queue_work(struct bdi_writeback *wb,
181181
if (work->done)
182182
atomic_inc(&work->done->cnt);
183183

184-
spin_lock_bh(&wb->work_lock);
184+
spin_lock_irq(&wb->work_lock);
185185

186186
if (test_bit(WB_registered, &wb->state)) {
187187
list_add_tail(&work->list, &wb->work_list);
188188
mod_delayed_work(bdi_wq, &wb->dwork, 0);
189189
} else
190190
finish_writeback_work(wb, work);
191191

192-
spin_unlock_bh(&wb->work_lock);
192+
spin_unlock_irq(&wb->work_lock);
193193
}
194194

195195
/**
@@ -1938,13 +1938,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
19381938
{
19391939
struct wb_writeback_work *work = NULL;
19401940

1941-
spin_lock_bh(&wb->work_lock);
1941+
spin_lock_irq(&wb->work_lock);
19421942
if (!list_empty(&wb->work_list)) {
19431943
work = list_entry(wb->work_list.next,
19441944
struct wb_writeback_work, list);
19451945
list_del_init(&work->list);
19461946
}
1947-
spin_unlock_bh(&wb->work_lock);
1947+
spin_unlock_irq(&wb->work_lock);
19481948
return work;
19491949
}
19501950

mm/backing-dev.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -280,10 +280,10 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
280280
unsigned long timeout;
281281

282282
timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
283-
spin_lock_bh(&wb->work_lock);
283+
spin_lock_irq(&wb->work_lock);
284284
if (test_bit(WB_registered, &wb->state))
285285
queue_delayed_work(bdi_wq, &wb->dwork, timeout);
286-
spin_unlock_bh(&wb->work_lock);
286+
spin_unlock_irq(&wb->work_lock);
287287
}
288288

289289
static void wb_update_bandwidth_workfn(struct work_struct *work)
@@ -376,12 +376,12 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
376376
static void wb_shutdown(struct bdi_writeback *wb)
377377
{
378378
/* Make sure nobody queues further work */
379-
spin_lock_bh(&wb->work_lock);
379+
spin_lock_irq(&wb->work_lock);
380380
if (!test_and_clear_bit(WB_registered, &wb->state)) {
381-
spin_unlock_bh(&wb->work_lock);
381+
spin_unlock_irq(&wb->work_lock);
382382
return;
383383
}
384-
spin_unlock_bh(&wb->work_lock);
384+
spin_unlock_irq(&wb->work_lock);
385385

386386
cgwb_remove_from_bdi_list(wb);
387387
/*

mm/page-writeback.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2754,6 +2754,7 @@ static void wb_inode_writeback_start(struct bdi_writeback *wb)
27542754

27552755
static void wb_inode_writeback_end(struct bdi_writeback *wb)
27562756
{
2757+
unsigned long flags;
27572758
atomic_dec(&wb->writeback_inodes);
27582759
/*
27592760
* Make sure estimate of writeback throughput gets updated after
@@ -2762,7 +2763,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
27622763
* that if multiple inodes end writeback at a similar time, they get
27632764
* batched into one bandwidth update.
27642765
*/
2765-
queue_delayed_work(bdi_wq, wb->bw_dwork, BANDWIDTH_INTERVAL);
2766+
spin_lock_irqsave(&wb->work_lock, flags);
2767+
if (test_bit(WB_registered, &wb->state))
2768+
queue_delayed_work(bdi_wq, wb->bw_dwork, BANDWIDTH_INTERVAL);
2769+
spin_unlock_irqrestore(&wb->work_lock, flags);
27662770
}
27672771

27682772
int test_clear_page_writeback(struct page *page)

0 commit comments

Comments
 (0)