|
56 | 56 | #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC)
|
57 | 57 | #define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC)
|
58 | 58 |
|
59 |
| -/* Target completion latency of discarding all discardable extents */ |
60 |
| -#define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC) |
61 | 59 | #define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL)
|
62 | 60 | #define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL)
|
63 |
| -#define BTRFS_DISCARD_MAX_IOPS (10U) |
| 61 | +#define BTRFS_DISCARD_MAX_IOPS (1000U) |
64 | 62 |
|
65 | 63 | /* Monotonically decreasing minimum length filters after index 0 */
|
66 | 64 | static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
|
@@ -577,6 +575,7 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
|
577 | 575 | s32 discardable_extents;
|
578 | 576 | s64 discardable_bytes;
|
579 | 577 | u32 iops_limit;
|
| 578 | + unsigned long min_delay = BTRFS_DISCARD_MIN_DELAY_MSEC; |
580 | 579 | unsigned long delay;
|
581 | 580 |
|
582 | 581 | discardable_extents = atomic_read(&discard_ctl->discardable_extents);
|
@@ -607,13 +606,19 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
|
607 | 606 | }
|
608 | 607 |
|
609 | 608 | iops_limit = READ_ONCE(discard_ctl->iops_limit);
|
610 |
| - if (iops_limit) |
| 609 | + |
| 610 | + if (iops_limit) { |
611 | 611 | delay = MSEC_PER_SEC / iops_limit;
|
612 |
| - else |
613 |
| - delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents; |
| 612 | + } else { |
| 613 | + /* |
| 614 | + * Unset iops_limit means go as fast as possible, so allow a |
| 615 | + * delay of 0. |
| 616 | + */ |
| 617 | + delay = 0; |
| 618 | + min_delay = 0; |
| 619 | + } |
614 | 620 |
|
615 |
| - delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC, |
616 |
| - BTRFS_DISCARD_MAX_DELAY_MSEC); |
| 621 | + delay = clamp(delay, min_delay, BTRFS_DISCARD_MAX_DELAY_MSEC); |
617 | 622 | discard_ctl->delay_ms = delay;
|
618 | 623 |
|
619 | 624 | spin_unlock(&discard_ctl->lock);
|
|
0 commit comments