@@ -38,8 +38,6 @@ struct throtl_data
3838 /* Total Number of queued bios on READ and WRITE lists */
3939 unsigned int nr_queued [2 ];
4040
41- unsigned int throtl_slice ;
42-
4341 /* Work for dispatching throttled bios */
4442 struct work_struct dispatch_work ;
4543};
@@ -446,7 +444,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
446444static void throtl_schedule_pending_timer (struct throtl_service_queue * sq ,
447445 unsigned long expires )
448446{
449- unsigned long max_expire = jiffies + 8 * sq_to_td ( sq ) -> throtl_slice ;
447+ unsigned long max_expire = jiffies + 8 * DFL_THROTL_SLICE ;
450448
451449 /*
452450 * Since we are adjusting the throttle limit dynamically, the sleep
@@ -514,7 +512,7 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
514512 if (time_after (start , tg -> slice_start [rw ]))
515513 tg -> slice_start [rw ] = start ;
516514
517- tg -> slice_end [rw ] = jiffies + tg -> td -> throtl_slice ;
515+ tg -> slice_end [rw ] = jiffies + DFL_THROTL_SLICE ;
518516 throtl_log (& tg -> service_queue ,
519517 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu" ,
520518 rw == READ ? 'R' : 'W' , tg -> slice_start [rw ],
@@ -529,7 +527,7 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
529527 tg -> io_disp [rw ] = 0 ;
530528 }
531529 tg -> slice_start [rw ] = jiffies ;
532- tg -> slice_end [rw ] = jiffies + tg -> td -> throtl_slice ;
530+ tg -> slice_end [rw ] = jiffies + DFL_THROTL_SLICE ;
533531
534532 throtl_log (& tg -> service_queue ,
535533 "[%c] new slice start=%lu end=%lu jiffies=%lu" ,
@@ -540,7 +538,7 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
540538static inline void throtl_set_slice_end (struct throtl_grp * tg , bool rw ,
541539 unsigned long jiffy_end )
542540{
543- tg -> slice_end [rw ] = roundup (jiffy_end , tg -> td -> throtl_slice );
541+ tg -> slice_end [rw ] = roundup (jiffy_end , DFL_THROTL_SLICE );
544542}
545543
546544static inline void throtl_extend_slice (struct throtl_grp * tg , bool rw ,
@@ -671,12 +669,12 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
671669 * sooner, then we need to reduce slice_end. A high bogus slice_end
672670 * is bad because it does not allow new slice to start.
673671 */
674- throtl_set_slice_end (tg , rw , jiffies + tg -> td -> throtl_slice );
672+ throtl_set_slice_end (tg , rw , jiffies + DFL_THROTL_SLICE );
675673
676674 time_elapsed = rounddown (jiffies - tg -> slice_start [rw ],
677- tg -> td -> throtl_slice );
675+ DFL_THROTL_SLICE );
678676 /* Don't trim slice until at least 2 slices are used */
679- if (time_elapsed < tg -> td -> throtl_slice * 2 )
677+ if (time_elapsed < DFL_THROTL_SLICE * 2 )
680678 return ;
681679
682680 /*
@@ -687,7 +685,7 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
687685 * lower rate than expected. Therefore, other than the above rounddown,
688686 * one extra slice is preserved for deviation.
689687 */
690- time_elapsed -= tg -> td -> throtl_slice ;
688+ time_elapsed -= DFL_THROTL_SLICE ;
691689 bytes_trim = throtl_trim_bps (tg , rw , time_elapsed );
692690 io_trim = throtl_trim_iops (tg , rw , time_elapsed );
693691 if (!bytes_trim && !io_trim )
@@ -697,7 +695,7 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
697695
698696 throtl_log (& tg -> service_queue ,
699697 "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu" ,
700- rw == READ ? 'R' : 'W' , time_elapsed / tg -> td -> throtl_slice ,
698+ rw == READ ? 'R' : 'W' , time_elapsed / DFL_THROTL_SLICE ,
701699 bytes_trim , io_trim , tg -> slice_start [rw ], tg -> slice_end [rw ],
702700 jiffies );
703701}
@@ -768,7 +766,7 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
768766 jiffy_elapsed = jiffies - tg -> slice_start [rw ];
769767
770768 /* Round up to the next throttle slice, wait time must be nonzero */
771- jiffy_elapsed_rnd = roundup (jiffy_elapsed + 1 , tg -> td -> throtl_slice );
769+ jiffy_elapsed_rnd = roundup (jiffy_elapsed + 1 , DFL_THROTL_SLICE );
772770 io_allowed = calculate_io_allowed (iops_limit , jiffy_elapsed_rnd );
773771 if (io_allowed > 0 && tg -> io_disp [rw ] + 1 <= io_allowed )
774772 return 0 ;
@@ -794,9 +792,9 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
794792
795793 /* Slice has just started. Consider one slice interval */
796794 if (!jiffy_elapsed )
797- jiffy_elapsed_rnd = tg -> td -> throtl_slice ;
795+ jiffy_elapsed_rnd = DFL_THROTL_SLICE ;
798796
799- jiffy_elapsed_rnd = roundup (jiffy_elapsed_rnd , tg -> td -> throtl_slice );
797+ jiffy_elapsed_rnd = roundup (jiffy_elapsed_rnd , DFL_THROTL_SLICE );
800798 bytes_allowed = calculate_bytes_allowed (bps_limit , jiffy_elapsed_rnd );
801799 /* Need to consider the case of bytes_allowed overflow. */
802800 if ((bytes_allowed > 0 && tg -> bytes_disp [rw ] + bio_size <= bytes_allowed )
@@ -848,7 +846,7 @@ static void tg_update_slice(struct throtl_grp *tg, bool rw)
848846 sq_queued (& tg -> service_queue , rw ) == 0 )
849847 throtl_start_new_slice (tg , rw , true);
850848 else
851- throtl_extend_slice (tg , rw , jiffies + tg -> td -> throtl_slice );
849+ throtl_extend_slice (tg , rw , jiffies + DFL_THROTL_SLICE );
852850}
853851
854852static unsigned long tg_dispatch_bps_time (struct throtl_grp * tg , struct bio * bio )
@@ -1337,12 +1335,8 @@ static int blk_throtl_init(struct gendisk *disk)
13371335 if (ret ) {
13381336 q -> td = NULL ;
13391337 kfree (td );
1340- goto out ;
13411338 }
13421339
1343- td -> throtl_slice = DFL_THROTL_SLICE ;
1344-
1345- out :
13461340 blk_mq_unquiesce_queue (disk -> queue );
13471341 blk_mq_unfreeze_queue (disk -> queue , memflags );
13481342
0 commit comments