@@ -379,6 +379,19 @@ static void raid0_free(struct mddev *mddev, void *priv)
379
379
free_conf (mddev , conf );
380
380
}
381
381
382
+ static int raid0_set_limits (struct mddev * mddev )
383
+ {
384
+ struct queue_limits lim ;
385
+
386
+ blk_set_stacking_limits (& lim );
387
+ lim .max_hw_sectors = mddev -> chunk_sectors ;
388
+ lim .max_write_zeroes_sectors = mddev -> chunk_sectors ;
389
+ lim .io_min = mddev -> chunk_sectors << 9 ;
390
+ lim .io_opt = lim .io_min * mddev -> raid_disks ;
391
+ mddev_stack_rdev_limits (mddev , & lim );
392
+ return queue_limits_set (mddev -> queue , & lim );
393
+ }
394
+
382
395
static int raid0_run (struct mddev * mddev )
383
396
{
384
397
struct r0conf * conf ;
@@ -400,19 +413,9 @@ static int raid0_run(struct mddev *mddev)
400
413
}
401
414
conf = mddev -> private ;
402
415
if (!mddev_is_dm (mddev )) {
403
- struct md_rdev * rdev ;
404
-
405
- blk_queue_max_hw_sectors (mddev -> queue , mddev -> chunk_sectors );
406
- blk_queue_max_write_zeroes_sectors (mddev -> queue , mddev -> chunk_sectors );
407
-
408
- blk_queue_io_min (mddev -> queue , mddev -> chunk_sectors << 9 );
409
- blk_queue_io_opt (mddev -> queue ,
410
- (mddev -> chunk_sectors << 9 ) * mddev -> raid_disks );
411
-
412
- rdev_for_each (rdev , mddev ) {
413
- disk_stack_limits (mddev -> gendisk , rdev -> bdev ,
414
- rdev -> data_offset << 9 );
415
- }
416
+ ret = raid0_set_limits (mddev );
417
+ if (ret )
418
+ goto out_free_conf ;
416
419
}
417
420
418
421
/* calculate array device size */
@@ -426,8 +429,10 @@ static int raid0_run(struct mddev *mddev)
426
429
427
430
ret = md_integrity_register (mddev );
428
431
if (ret )
429
- free_conf (mddev , conf );
430
-
432
+ goto out_free_conf ;
433
+ return 0 ;
434
+ out_free_conf :
435
+ free_conf (mddev , conf );
431
436
return ret ;
432
437
}
433
438
0 commit comments