Skip to content

Commit b50b4c4

Browse files
Dave ChinnerChandan Babu R
authored andcommitted
xfs: background AIL push should target physical space
Currently the AIL attempts to keep 25% of the "log space" free, where the current used space is tracked by the reserve grant head. That is, it tracks both physical space used plus the amount reserved by transactions in progress. When we start tail pushing, we are trying to make space for new reservations by writing back older metadata and the log is generally physically full of dirty metadata, and reservations for modifications in flight take up whatever space the AIL can physically free up. Hence we don't really need to take into account the reservation space that has been used - we just need to keep the log tail moving as fast as we can to free up space for more reservations to be made. We know exactly how much physical space the journal is consuming in the AIL (i.e. max LSN - min LSN) so we can base push thresholds directly on this state rather than have to look at grant head reservations to determine how much to physically push out of the log. This also allows code that needs to know if log items in the current transaction need to be pushed or re-logged to simply sample the current target - they don't need to calculate the current target themselves. This avoids the need for any locking when doing such checks. Further, moving to a physical target means we don't need "push all until empty semantics" like were introduced in the previous patch. We can now test and clear the "push all" as a one-shot command to set the target to the current head of the AIL. This allows the xfsaild to maximise the use of log space right up to the point where conditions indicate that the xfsaild is not keeping up with load and it needs to work harder, and as soon as those constraints go away (i.e. external code no longer needs everything pushed) the xfsaild will return to maintaining the normal 25% free space thresholds. Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Darrick J. Wong <[email protected]> Signed-off-by: Chandan Babu R <[email protected]>
1 parent 9adf402 commit b50b4c4

File tree

4 files changed

+80
-67
lines changed

4 files changed

+80
-67
lines changed

fs/xfs/libxfs/xfs_defer.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -558,7 +558,7 @@ xfs_defer_relog(
558558
* the log threshold once per call.
559559
*/
560560
if (threshold_lsn == NULLCOMMITLSN) {
561-
threshold_lsn = xfs_ail_push_target(log->l_ailp);
561+
threshold_lsn = xfs_ail_get_push_target(log->l_ailp);
562562
if (threshold_lsn == NULLCOMMITLSN)
563563
break;
564564
}

fs/xfs/xfs_log_priv.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -625,6 +625,24 @@ xlog_wait(
625625
int xlog_wait_on_iclog(struct xlog_in_core *iclog)
626626
__releases(iclog->ic_log->l_icloglock);
627627

628+
/* Calculate the distance between two LSNs in bytes */
629+
static inline uint64_t
630+
xlog_lsn_sub(
631+
struct xlog *log,
632+
xfs_lsn_t high,
633+
xfs_lsn_t low)
634+
{
635+
uint32_t hi_cycle = CYCLE_LSN(high);
636+
uint32_t hi_block = BLOCK_LSN(high);
637+
uint32_t lo_cycle = CYCLE_LSN(low);
638+
uint32_t lo_block = BLOCK_LSN(low);
639+
640+
if (hi_cycle == lo_cycle)
641+
return BBTOB(hi_block - lo_block);
642+
ASSERT((hi_cycle == lo_cycle + 1) || xlog_is_shutdown(log));
643+
return (uint64_t)log->l_logsize - BBTOB(lo_block - hi_block);
644+
}
645+
628646
/*
629647
* The LSN is valid so long as it is behind the current LSN. If it isn't, this
630648
* means that the next log record that includes this metadata could have a

fs/xfs/xfs_trans_ail.c

Lines changed: 58 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -398,51 +398,69 @@ xfsaild_push_item(
398398
/*
399399
* Compute the LSN that we'd need to push the log tail towards in order to have
400400
* at least 25% of the log space free. If the log free space already meets this
401-
* threshold, this function returns NULLCOMMITLSN.
401+
* threshold, this function returns the lowest LSN in the AIL to slowly keep
402+
* writeback ticking over and the tail of the log moving forward.
402403
*/
403-
xfs_lsn_t
404-
__xfs_ail_push_target(
404+
static xfs_lsn_t
405+
xfs_ail_calc_push_target(
405406
struct xfs_ail *ailp)
406407
{
407-
struct xlog *log = ailp->ail_log;
408-
xfs_lsn_t threshold_lsn = 0;
409-
xfs_lsn_t last_sync_lsn;
410-
int free_blocks;
411-
int free_bytes;
412-
int threshold_block;
413-
int threshold_cycle;
414-
int free_threshold;
415-
416-
free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
417-
free_blocks = BTOBBT(free_bytes);
408+
struct xlog *log = ailp->ail_log;
409+
struct xfs_log_item *lip;
410+
xfs_lsn_t target_lsn;
411+
xfs_lsn_t max_lsn;
412+
xfs_lsn_t min_lsn;
413+
int32_t free_bytes;
414+
uint32_t target_block;
415+
uint32_t target_cycle;
416+
417+
lockdep_assert_held(&ailp->ail_lock);
418+
419+
lip = xfs_ail_max(ailp);
420+
if (!lip)
421+
return NULLCOMMITLSN;
422+
423+
max_lsn = lip->li_lsn;
424+
min_lsn = __xfs_ail_min_lsn(ailp);
418425

419426
/*
420-
* The threshold for the minimum number of free blocks is one quarter of
421-
* the entire log space.
427+
* If we are supposed to push all the items in the AIL, we want to push
428+
* to the current head. We then clear the push flag so that we don't
429+
* keep pushing newly queued items beyond where the push all command was
430+
* run. If the push waiter wants to empty the ail, it should queue
431+
* itself on the ail_empty wait queue.
422432
*/
423-
free_threshold = log->l_logBBsize >> 2;
424-
if (free_blocks >= free_threshold)
425-
return NULLCOMMITLSN;
433+
if (test_and_clear_bit(XFS_AIL_OPSTATE_PUSH_ALL, &ailp->ail_opstate))
434+
return max_lsn;
435+
436+
/* If someone wants the AIL empty, keep pushing everything we have. */
437+
if (waitqueue_active(&ailp->ail_empty))
438+
return max_lsn;
426439

427-
xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
428-
&threshold_block);
429-
threshold_block += free_threshold;
430-
if (threshold_block >= log->l_logBBsize) {
431-
threshold_block -= log->l_logBBsize;
432-
threshold_cycle += 1;
433-
}
434-
threshold_lsn = xlog_assign_lsn(threshold_cycle,
435-
threshold_block);
436440
/*
437-
* Don't pass in an lsn greater than the lsn of the last
438-
* log record known to be on disk. Use a snapshot of the last sync lsn
439-
* so that it doesn't change between the compare and the set.
441+
* Background pushing - attempt to keep 25% of the log free and if we
442+
* have that much free retain the existing target.
440443
*/
441-
last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
442-
if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
443-
threshold_lsn = last_sync_lsn;
444+
free_bytes = log->l_logsize - xlog_lsn_sub(log, max_lsn, min_lsn);
445+
if (free_bytes >= log->l_logsize >> 2)
446+
return ailp->ail_target;
447+
448+
target_cycle = CYCLE_LSN(min_lsn);
449+
target_block = BLOCK_LSN(min_lsn) + (log->l_logBBsize >> 2);
450+
if (target_block >= log->l_logBBsize) {
451+
target_block -= log->l_logBBsize;
452+
target_cycle += 1;
453+
}
454+
target_lsn = xlog_assign_lsn(target_cycle, target_block);
455+
456+
/* Cap the target to the highest LSN known to be in the AIL. */
457+
if (XFS_LSN_CMP(target_lsn, max_lsn) > 0)
458+
return max_lsn;
444459

445-
return threshold_lsn;
460+
/* If the existing target is higher than the new target, keep it. */
461+
if (XFS_LSN_CMP(ailp->ail_target, target_lsn) >= 0)
462+
return ailp->ail_target;
463+
return target_lsn;
446464
}
447465

448466
static long
@@ -453,7 +471,6 @@ xfsaild_push(
453471
struct xfs_ail_cursor cur;
454472
struct xfs_log_item *lip;
455473
xfs_lsn_t lsn;
456-
xfs_lsn_t target = NULLCOMMITLSN;
457474
long tout;
458475
int stuck = 0;
459476
int flushing = 0;
@@ -478,25 +495,8 @@ xfsaild_push(
478495
}
479496

480497
spin_lock(&ailp->ail_lock);
481-
482-
/*
483-
* If we have a sync push waiter, we always have to push till the AIL is
484-
* empty. Update the target to point to the end of the AIL so that
485-
* capture updates that occur after the sync push waiter has gone to
486-
* sleep.
487-
*/
488-
if (test_bit(XFS_AIL_OPSTATE_PUSH_ALL, &ailp->ail_opstate) ||
489-
waitqueue_active(&ailp->ail_empty)) {
490-
lip = xfs_ail_max(ailp);
491-
if (lip)
492-
target = lip->li_lsn;
493-
else
494-
clear_bit(XFS_AIL_OPSTATE_PUSH_ALL, &ailp->ail_opstate);
495-
} else {
496-
target = __xfs_ail_push_target(ailp);
497-
}
498-
499-
if (target == NULLCOMMITLSN)
498+
WRITE_ONCE(ailp->ail_target, xfs_ail_calc_push_target(ailp));
499+
if (ailp->ail_target == NULLCOMMITLSN)
500500
goto out_done;
501501

502502
/* we're done if the AIL is empty or our push has reached the end */
@@ -506,10 +506,10 @@ xfsaild_push(
506506

507507
XFS_STATS_INC(mp, xs_push_ail);
508508

509-
ASSERT(target != NULLCOMMITLSN);
509+
ASSERT(ailp->ail_target != NULLCOMMITLSN);
510510

511511
lsn = lip->li_lsn;
512-
while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
512+
while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
513513
int lock_result;
514514

515515
/*
@@ -595,7 +595,7 @@ xfsaild_push(
595595
if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
596596
ailp->ail_log_flush++;
597597

598-
if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
598+
if (!count || XFS_LSN_CMP(lsn, ailp->ail_target) >= 0) {
599599
/*
600600
* We reached the target or the AIL is empty, so wait a bit
601601
* longer for I/O to complete and remove pushed items from the

fs/xfs/xfs_trans_priv.h

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ struct xfs_ail {
5959
unsigned long ail_opstate;
6060
struct list_head ail_buf_list;
6161
wait_queue_head_t ail_empty;
62+
xfs_lsn_t ail_target;
6263
};
6364

6465
/* Push all items out of the AIL immediately. */
@@ -111,15 +112,9 @@ static inline void xfs_ail_push_all(struct xfs_ail *ailp)
111112
xfs_ail_push(ailp);
112113
}
113114

114-
xfs_lsn_t __xfs_ail_push_target(struct xfs_ail *ailp);
115-
static inline xfs_lsn_t xfs_ail_push_target(struct xfs_ail *ailp)
115+
static inline xfs_lsn_t xfs_ail_get_push_target(struct xfs_ail *ailp)
116116
{
117-
xfs_lsn_t lsn;
118-
119-
spin_lock(&ailp->ail_lock);
120-
lsn = __xfs_ail_push_target(ailp);
121-
spin_unlock(&ailp->ail_lock);
122-
return lsn;
117+
return READ_ONCE(ailp->ail_target);
123118
}
124119

125120
void xfs_ail_push_all_sync(struct xfs_ail *ailp);

0 commit comments

Comments
 (0)