@@ -694,6 +694,136 @@ xlog_cil_insert_items(
694
694
}
695
695
}
696
696
697
+ static inline void
698
+ xlog_cil_ail_insert_batch (
699
+ struct xfs_ail * ailp ,
700
+ struct xfs_ail_cursor * cur ,
701
+ struct xfs_log_item * * log_items ,
702
+ int nr_items ,
703
+ xfs_lsn_t commit_lsn )
704
+ {
705
+ int i ;
706
+
707
+ spin_lock (& ailp -> ail_lock );
708
+ /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
709
+ xfs_trans_ail_update_bulk (ailp , cur , log_items , nr_items , commit_lsn );
710
+
711
+ for (i = 0 ; i < nr_items ; i ++ ) {
712
+ struct xfs_log_item * lip = log_items [i ];
713
+
714
+ if (lip -> li_ops -> iop_unpin )
715
+ lip -> li_ops -> iop_unpin (lip , 0 );
716
+ }
717
+ }
718
+
719
+ /*
720
+ * Take the checkpoint's log vector chain of items and insert the attached log
721
+ * items into the AIL. This uses bulk insertion techniques to minimise AIL lock
722
+ * traffic.
723
+ *
724
+ * If we are called with the aborted flag set, it is because a log write during
725
+ * a CIL checkpoint commit has failed. In this case, all the items in the
726
+ * checkpoint have already gone through iop_committed and iop_committing, which
727
+ * means that checkpoint commit abort handling is treated exactly the same as an
728
+ * iclog write error even though we haven't started any IO yet. Hence in this
729
+ * case all we need to do is iop_committed processing, followed by an
730
+ * iop_unpin(aborted) call.
731
+ *
732
+ * The AIL cursor is used to optimise the insert process. If commit_lsn is not
733
+ * at the end of the AIL, the insert cursor avoids the need to walk the AIL to
734
+ * find the insertion point on every xfs_log_item_batch_insert() call. This
735
+ * saves a lot of needless list walking and is a net win, even though it
736
+ * slightly increases that amount of AIL lock traffic to set it up and tear it
737
+ * down.
738
+ */
739
+ static void
740
+ xlog_cil_ail_insert (
741
+ struct xlog * log ,
742
+ struct list_head * lv_chain ,
743
+ xfs_lsn_t commit_lsn ,
744
+ bool aborted )
745
+ {
746
+ #define LOG_ITEM_BATCH_SIZE 32
747
+ struct xfs_ail * ailp = log -> l_ailp ;
748
+ struct xfs_log_item * log_items [LOG_ITEM_BATCH_SIZE ];
749
+ struct xfs_log_vec * lv ;
750
+ struct xfs_ail_cursor cur ;
751
+ int i = 0 ;
752
+
753
+ spin_lock (& ailp -> ail_lock );
754
+ xfs_trans_ail_cursor_last (ailp , & cur , commit_lsn );
755
+ spin_unlock (& ailp -> ail_lock );
756
+
757
+ /* unpin all the log items */
758
+ list_for_each_entry (lv , lv_chain , lv_list ) {
759
+ struct xfs_log_item * lip = lv -> lv_item ;
760
+ xfs_lsn_t item_lsn ;
761
+
762
+ if (aborted )
763
+ set_bit (XFS_LI_ABORTED , & lip -> li_flags );
764
+
765
+ if (lip -> li_ops -> flags & XFS_ITEM_RELEASE_WHEN_COMMITTED ) {
766
+ lip -> li_ops -> iop_release (lip );
767
+ continue ;
768
+ }
769
+
770
+ if (lip -> li_ops -> iop_committed )
771
+ item_lsn = lip -> li_ops -> iop_committed (lip , commit_lsn );
772
+ else
773
+ item_lsn = commit_lsn ;
774
+
775
+ /* item_lsn of -1 means the item needs no further processing */
776
+ if (XFS_LSN_CMP (item_lsn , (xfs_lsn_t )- 1 ) == 0 )
777
+ continue ;
778
+
779
+ /*
780
+ * if we are aborting the operation, no point in inserting the
781
+ * object into the AIL as we are in a shutdown situation.
782
+ */
783
+ if (aborted ) {
784
+ ASSERT (xlog_is_shutdown (ailp -> ail_log ));
785
+ if (lip -> li_ops -> iop_unpin )
786
+ lip -> li_ops -> iop_unpin (lip , 1 );
787
+ continue ;
788
+ }
789
+
790
+ if (item_lsn != commit_lsn ) {
791
+
792
+ /*
793
+ * Not a bulk update option due to unusual item_lsn.
794
+ * Push into AIL immediately, rechecking the lsn once
795
+ * we have the ail lock. Then unpin the item. This does
796
+ * not affect the AIL cursor the bulk insert path is
797
+ * using.
798
+ */
799
+ spin_lock (& ailp -> ail_lock );
800
+ if (XFS_LSN_CMP (item_lsn , lip -> li_lsn ) > 0 )
801
+ xfs_trans_ail_update (ailp , lip , item_lsn );
802
+ else
803
+ spin_unlock (& ailp -> ail_lock );
804
+ if (lip -> li_ops -> iop_unpin )
805
+ lip -> li_ops -> iop_unpin (lip , 0 );
806
+ continue ;
807
+ }
808
+
809
+ /* Item is a candidate for bulk AIL insert. */
810
+ log_items [i ++ ] = lv -> lv_item ;
811
+ if (i >= LOG_ITEM_BATCH_SIZE ) {
812
+ xlog_cil_ail_insert_batch (ailp , & cur , log_items ,
813
+ LOG_ITEM_BATCH_SIZE , commit_lsn );
814
+ i = 0 ;
815
+ }
816
+ }
817
+
818
+ /* make sure we insert the remainder! */
819
+ if (i )
820
+ xlog_cil_ail_insert_batch (ailp , & cur , log_items , i , commit_lsn );
821
+
822
+ spin_lock (& ailp -> ail_lock );
823
+ xfs_trans_ail_cursor_done (& cur );
824
+ spin_unlock (& ailp -> ail_lock );
825
+ }
826
+
697
827
static void
698
828
xlog_cil_free_logvec (
699
829
struct list_head * lv_chain )
@@ -733,7 +863,7 @@ xlog_cil_committed(
733
863
spin_unlock (& ctx -> cil -> xc_push_lock );
734
864
}
735
865
736
- xfs_trans_committed_bulk (ctx -> cil -> xc_log -> l_ailp , & ctx -> lv_chain ,
866
+ xlog_cil_ail_insert (ctx -> cil -> xc_log , & ctx -> lv_chain ,
737
867
ctx -> start_lsn , abort );
738
868
739
869
xfs_extent_busy_sort (& ctx -> busy_extents .extent_list );
0 commit comments