Skip to content

Commit 332bd07

Browse files
mauelshaMike Snitzer
authored andcommitted
dm raid: fix accesses beyond end of raid member array
On dm-raid table load (using raid_ctr), dm-raid allocates an array rs->devs[rs->raid_disks] for the raid device members. rs->raid_disks is defined by the number of raid metadata and image tupples passed into the target's constructor. In the case of RAID layout changes being requested, that number can be different from the current number of members for existing raid sets as defined in their superblocks. Example RAID layout changes include: - raid1 legs being added/removed - raid4/5/6/10 number of stripes changed (stripe reshaping) - takeover to higher raid level (e.g. raid5 -> raid6) When accessing array members, rs->raid_disks must be used in control loops instead of the potentially larger value in rs->md.raid_disks. Otherwise it will cause memory access beyond the end of the rs->devs array. Fix this by changing code that is prone to out-of-bounds access. Also fix validate_raid_redundancy() to validate all devices that are added. Also, use braces to help clean up raid_iterate_devices(). The out-of-bounds memory accesses was discovered using KASAN. This commit was verified to pass all LVM2 RAID tests (with KASAN enabled). Cc: [email protected] Signed-off-by: Heinz Mauelshagen <[email protected]> Signed-off-by: Mike Snitzer <[email protected]>
1 parent 90736eb commit 332bd07

File tree

1 file changed

+18
-16
lines changed

1 file changed

+18
-16
lines changed

drivers/md/dm-raid.c

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1001,12 +1001,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
10011001
static int validate_raid_redundancy(struct raid_set *rs)
10021002
{
10031003
unsigned int i, rebuild_cnt = 0;
1004-
unsigned int rebuilds_per_group = 0, copies;
1004+
unsigned int rebuilds_per_group = 0, copies, raid_disks;
10051005
unsigned int group_size, last_group_start;
10061006

1007-
for (i = 0; i < rs->md.raid_disks; i++)
1008-
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1009-
!rs->dev[i].rdev.sb_page)
1007+
for (i = 0; i < rs->raid_disks; i++)
1008+
if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
1009+
((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1010+
!rs->dev[i].rdev.sb_page)))
10101011
rebuild_cnt++;
10111012

10121013
switch (rs->md.level) {
@@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(struct raid_set *rs)
10461047
* A A B B C
10471048
* C D D E E
10481049
*/
1050+
raid_disks = min(rs->raid_disks, rs->md.raid_disks);
10491051
if (__is_raid10_near(rs->md.new_layout)) {
1050-
for (i = 0; i < rs->md.raid_disks; i++) {
1052+
for (i = 0; i < raid_disks; i++) {
10511053
if (!(i % copies))
10521054
rebuilds_per_group = 0;
10531055
if ((!rs->dev[i].rdev.sb_page ||
@@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(struct raid_set *rs)
10701072
* results in the need to treat the last (potentially larger)
10711073
* set differently.
10721074
*/
1073-
group_size = (rs->md.raid_disks / copies);
1074-
last_group_start = (rs->md.raid_disks / group_size) - 1;
1075+
group_size = (raid_disks / copies);
1076+
last_group_start = (raid_disks / group_size) - 1;
10751077
last_group_start *= group_size;
1076-
for (i = 0; i < rs->md.raid_disks; i++) {
1078+
for (i = 0; i < raid_disks; i++) {
10771079
if (!(i % copies) && !(i > last_group_start))
10781080
rebuilds_per_group = 0;
10791081
if ((!rs->dev[i].rdev.sb_page ||
@@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)
15881590
{
15891591
int i;
15901592

1591-
for (i = 0; i < rs->md.raid_disks; i++) {
1593+
for (i = 0; i < rs->raid_disks; i++) {
15921594
struct md_rdev *rdev = &rs->dev[i].rdev;
15931595

15941596
if (!test_bit(Journal, &rdev->flags) &&
@@ -3766,13 +3768,13 @@ static int raid_iterate_devices(struct dm_target *ti,
37663768
unsigned int i;
37673769
int r = 0;
37683770

3769-
for (i = 0; !r && i < rs->md.raid_disks; i++)
3770-
if (rs->dev[i].data_dev)
3771-
r = fn(ti,
3772-
rs->dev[i].data_dev,
3773-
0, /* No offset on data devs */
3774-
rs->md.dev_sectors,
3775-
data);
3771+
for (i = 0; !r && i < rs->raid_disks; i++) {
3772+
if (rs->dev[i].data_dev) {
3773+
r = fn(ti, rs->dev[i].data_dev,
3774+
0, /* No offset on data devs */
3775+
rs->md.dev_sectors, data);
3776+
}
3777+
}
37763778

37773779
return r;
37783780
}

0 commit comments

Comments
 (0)