@@ -478,8 +478,6 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
478478{
479479 tg -> bytes_disp [rw ] = 0 ;
480480 tg -> io_disp [rw ] = 0 ;
481- tg -> carryover_bytes [rw ] = 0 ;
482- tg -> carryover_ios [rw ] = 0 ;
483481
484482 /*
485483 * Previous slice has expired. We must have trimmed it after last
@@ -498,16 +496,14 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
498496}
499497
500498static inline void throtl_start_new_slice (struct throtl_grp * tg , bool rw ,
501- bool clear_carryover )
499+ bool clear )
502500{
503- tg -> bytes_disp [rw ] = 0 ;
504- tg -> io_disp [rw ] = 0 ;
501+ if (clear ) {
502+ tg -> bytes_disp [rw ] = 0 ;
503+ tg -> io_disp [rw ] = 0 ;
504+ }
505505 tg -> slice_start [rw ] = jiffies ;
506506 tg -> slice_end [rw ] = jiffies + tg -> td -> throtl_slice ;
507- if (clear_carryover ) {
508- tg -> carryover_bytes [rw ] = 0 ;
509- tg -> carryover_ios [rw ] = 0 ;
510- }
511507
512508 throtl_log (& tg -> service_queue ,
513509 "[%c] new slice start=%lu end=%lu jiffies=%lu" ,
@@ -617,20 +613,16 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
617613 */
618614 time_elapsed -= tg -> td -> throtl_slice ;
619615 bytes_trim = calculate_bytes_allowed (tg_bps_limit (tg , rw ),
620- time_elapsed ) +
621- tg -> carryover_bytes [rw ];
622- io_trim = calculate_io_allowed (tg_iops_limit (tg , rw ), time_elapsed ) +
623- tg -> carryover_ios [rw ];
616+ time_elapsed );
617+ io_trim = calculate_io_allowed (tg_iops_limit (tg , rw ), time_elapsed );
624618 if (bytes_trim <= 0 && io_trim <= 0 )
625619 return ;
626620
627- tg -> carryover_bytes [rw ] = 0 ;
628621 if ((long long )tg -> bytes_disp [rw ] >= bytes_trim )
629622 tg -> bytes_disp [rw ] -= bytes_trim ;
630623 else
631624 tg -> bytes_disp [rw ] = 0 ;
632625
633- tg -> carryover_ios [rw ] = 0 ;
634626 if ((int )tg -> io_disp [rw ] >= io_trim )
635627 tg -> io_disp [rw ] -= io_trim ;
636628 else
@@ -645,7 +637,8 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
645637 jiffies );
646638}
647639
648- static void __tg_update_carryover (struct throtl_grp * tg , bool rw )
640+ static void __tg_update_carryover (struct throtl_grp * tg , bool rw ,
641+ long long * bytes , int * ios )
649642{
650643 unsigned long jiffy_elapsed = jiffies - tg -> slice_start [rw ];
651644 u64 bps_limit = tg_bps_limit (tg , rw );
@@ -658,26 +651,28 @@ static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
658651 * configuration.
659652 */
660653 if (bps_limit != U64_MAX )
661- tg -> carryover_bytes [rw ] +=
662- calculate_bytes_allowed (bps_limit , jiffy_elapsed ) -
654+ * bytes = calculate_bytes_allowed (bps_limit , jiffy_elapsed ) -
663655 tg -> bytes_disp [rw ];
664656 if (iops_limit != UINT_MAX )
665- tg -> carryover_ios [rw ] +=
666- calculate_io_allowed (iops_limit , jiffy_elapsed ) -
657+ * ios = calculate_io_allowed (iops_limit , jiffy_elapsed ) -
667658 tg -> io_disp [rw ];
659+ tg -> bytes_disp [rw ] -= * bytes ;
660+ tg -> io_disp [rw ] -= * ios ;
668661}
669662
670663static void tg_update_carryover (struct throtl_grp * tg )
671664{
665+ long long bytes [2 ] = {0 };
666+ int ios [2 ] = {0 };
667+
672668 if (tg -> service_queue .nr_queued [READ ])
673- __tg_update_carryover (tg , READ );
669+ __tg_update_carryover (tg , READ , & bytes [ READ ], & ios [ READ ] );
674670 if (tg -> service_queue .nr_queued [WRITE ])
675- __tg_update_carryover (tg , WRITE );
671+ __tg_update_carryover (tg , WRITE , & bytes [ WRITE ], & ios [ WRITE ] );
676672
677673 /* see comments in struct throtl_grp for meaning of these fields. */
678674 throtl_log (& tg -> service_queue , "%s: %lld %lld %d %d\n" , __func__ ,
679- tg -> carryover_bytes [READ ], tg -> carryover_bytes [WRITE ],
680- tg -> carryover_ios [READ ], tg -> carryover_ios [WRITE ]);
675+ bytes [READ ], bytes [WRITE ], ios [READ ], ios [WRITE ]);
681676}
682677
683678static unsigned long tg_within_iops_limit (struct throtl_grp * tg , struct bio * bio ,
@@ -695,8 +690,7 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
695690
696691 /* Round up to the next throttle slice, wait time must be nonzero */
697692 jiffy_elapsed_rnd = roundup (jiffy_elapsed + 1 , tg -> td -> throtl_slice );
698- io_allowed = calculate_io_allowed (iops_limit , jiffy_elapsed_rnd ) +
699- tg -> carryover_ios [rw ];
693+ io_allowed = calculate_io_allowed (iops_limit , jiffy_elapsed_rnd );
700694 if (io_allowed > 0 && tg -> io_disp [rw ] + 1 <= io_allowed )
701695 return 0 ;
702696
@@ -729,8 +723,7 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
729723 jiffy_elapsed_rnd = tg -> td -> throtl_slice ;
730724
731725 jiffy_elapsed_rnd = roundup (jiffy_elapsed_rnd , tg -> td -> throtl_slice );
732- bytes_allowed = calculate_bytes_allowed (bps_limit , jiffy_elapsed_rnd ) +
733- tg -> carryover_bytes [rw ];
726+ bytes_allowed = calculate_bytes_allowed (bps_limit , jiffy_elapsed_rnd );
734727 if (bytes_allowed > 0 && tg -> bytes_disp [rw ] + bio_size <= bytes_allowed )
735728 return 0 ;
736729
0 commit comments