|
17 | 17 | #include <linux/kthread.h>
|
18 | 18 | #include <linux/sched/mm.h>
|
19 | 19 |
|
| 20 | +static bool __should_discard_bucket(struct journal *, struct journal_device *); |
| 21 | + |
20 | 22 | /* Free space calculations: */
|
21 | 23 |
|
22 | 24 | static unsigned journal_space_from(struct journal_device *ja,
|
@@ -203,8 +205,7 @@ void bch2_journal_space_available(struct journal *j)
|
203 | 205 | ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
|
204 | 206 | ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
|
205 | 207 |
|
206 |
| - if (ja->discard_idx != ja->dirty_idx_ondisk) |
207 |
| - can_discard = true; |
| 208 | + can_discard |= __should_discard_bucket(j, ja); |
208 | 209 |
|
209 | 210 | max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
|
210 | 211 | nr_online++;
|
@@ -264,13 +265,19 @@ void bch2_journal_space_available(struct journal *j)
|
264 | 265 |
|
265 | 266 | /* Discards - last part of journal reclaim: */
|
266 | 267 |
|
267 |
| -static bool should_discard_bucket(struct journal *j, struct journal_device *ja) |
| 268 | +static bool __should_discard_bucket(struct journal *j, struct journal_device *ja) |
268 | 269 | {
|
269 |
| - spin_lock(&j->lock); |
270 | 270 | unsigned min_free = max(4, ja->nr / 8);
|
271 | 271 |
|
272 |
| - bool ret = bch2_journal_dev_buckets_available(j, ja, journal_space_discarded) < min_free && |
| 272 | + return bch2_journal_dev_buckets_available(j, ja, journal_space_discarded) < |
| 273 | + min_free && |
273 | 274 | ja->discard_idx != ja->dirty_idx_ondisk;
|
| 275 | +} |
| 276 | + |
| 277 | +static bool should_discard_bucket(struct journal *j, struct journal_device *ja) |
| 278 | +{ |
| 279 | + spin_lock(&j->lock); |
| 280 | + bool ret = __should_discard_bucket(j, ja); |
274 | 281 | spin_unlock(&j->lock);
|
275 | 282 |
|
276 | 283 | return ret;
|
|
0 commit comments