@@ -5751,6 +5751,51 @@ static const struct kobj_type md_ktype = {
5751
5751
5752
5752
int mdp_major = 0 ;
5753
5753
5754
+ /* stack the limit for all rdevs into lim */
5755
+ void mddev_stack_rdev_limits (struct mddev * mddev , struct queue_limits * lim )
5756
+ {
5757
+ struct md_rdev * rdev ;
5758
+
5759
+ rdev_for_each (rdev , mddev ) {
5760
+ queue_limits_stack_bdev (lim , rdev -> bdev , rdev -> data_offset ,
5761
+ mddev -> gendisk -> disk_name );
5762
+ }
5763
+ }
5764
+ EXPORT_SYMBOL_GPL (mddev_stack_rdev_limits );
5765
+
5766
+ /* apply the extra stacking limits from a new rdev into mddev */
5767
+ int mddev_stack_new_rdev (struct mddev * mddev , struct md_rdev * rdev )
5768
+ {
5769
+ struct queue_limits lim ;
5770
+
5771
+ if (mddev_is_dm (mddev ))
5772
+ return 0 ;
5773
+
5774
+ lim = queue_limits_start_update (mddev -> queue );
5775
+ queue_limits_stack_bdev (& lim , rdev -> bdev , rdev -> data_offset ,
5776
+ mddev -> gendisk -> disk_name );
5777
+ return queue_limits_commit_update (mddev -> queue , & lim );
5778
+ }
5779
+ EXPORT_SYMBOL_GPL (mddev_stack_new_rdev );
5780
+
5781
+ /* update the optimal I/O size after a reshape */
5782
+ void mddev_update_io_opt (struct mddev * mddev , unsigned int nr_stripes )
5783
+ {
5784
+ struct queue_limits lim ;
5785
+
5786
+ if (mddev_is_dm (mddev ))
5787
+ return ;
5788
+
5789
+ /* don't bother updating io_opt if we can't suspend the array */
5790
+ if (mddev_suspend (mddev , false) < 0 )
5791
+ return ;
5792
+ lim = queue_limits_start_update (mddev -> gendisk -> queue );
5793
+ lim .io_opt = lim .io_min * nr_stripes ;
5794
+ queue_limits_commit_update (mddev -> gendisk -> queue , & lim );
5795
+ mddev_resume (mddev );
5796
+ }
5797
+ EXPORT_SYMBOL_GPL (mddev_update_io_opt );
5798
+
5754
5799
static void mddev_delayed_delete (struct work_struct * ws )
5755
5800
{
5756
5801
struct mddev * mddev = container_of (ws , struct mddev , del_work );
0 commit comments