@@ -1377,7 +1377,7 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1377
1377
struct request_queue * q = bdev_get_queue (dev -> bdev );
1378
1378
enum blk_zoned_model * zoned_model = data ;
1379
1379
1380
- return ! q || blk_queue_zoned_model (q ) != * zoned_model ;
1380
+ return blk_queue_zoned_model (q ) != * zoned_model ;
1381
1381
}
1382
1382
1383
1383
static bool dm_table_supports_zoned_model (struct dm_table * t ,
@@ -1407,7 +1407,7 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
1407
1407
struct request_queue * q = bdev_get_queue (dev -> bdev );
1408
1408
unsigned int * zone_sectors = data ;
1409
1409
1410
- return ! q || blk_queue_zone_sectors (q ) != * zone_sectors ;
1410
+ return blk_queue_zone_sectors (q ) != * zone_sectors ;
1411
1411
}
1412
1412
1413
1413
static int validate_hardware_zoned_model (struct dm_table * table ,
@@ -1556,7 +1556,7 @@ static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1556
1556
unsigned long flush = (unsigned long ) data ;
1557
1557
struct request_queue * q = bdev_get_queue (dev -> bdev );
1558
1558
1559
- return q && (q -> queue_flags & flush );
1559
+ return (q -> queue_flags & flush );
1560
1560
}
1561
1561
1562
1562
static bool dm_table_supports_flush (struct dm_table * t , unsigned long flush )
@@ -1606,23 +1606,23 @@ static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1606
1606
{
1607
1607
struct request_queue * q = bdev_get_queue (dev -> bdev );
1608
1608
1609
- return q && !blk_queue_nonrot (q );
1609
+ return !blk_queue_nonrot (q );
1610
1610
}
1611
1611
1612
1612
static int device_is_not_random (struct dm_target * ti , struct dm_dev * dev ,
1613
1613
sector_t start , sector_t len , void * data )
1614
1614
{
1615
1615
struct request_queue * q = bdev_get_queue (dev -> bdev );
1616
1616
1617
- return q && !blk_queue_add_random (q );
1617
+ return !blk_queue_add_random (q );
1618
1618
}
1619
1619
1620
1620
static int device_not_write_same_capable (struct dm_target * ti , struct dm_dev * dev ,
1621
1621
sector_t start , sector_t len , void * data )
1622
1622
{
1623
1623
struct request_queue * q = bdev_get_queue (dev -> bdev );
1624
1624
1625
- return q && !q -> limits .max_write_same_sectors ;
1625
+ return !q -> limits .max_write_same_sectors ;
1626
1626
}
1627
1627
1628
1628
static bool dm_table_supports_write_same (struct dm_table * t )
@@ -1649,7 +1649,7 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *
1649
1649
{
1650
1650
struct request_queue * q = bdev_get_queue (dev -> bdev );
1651
1651
1652
- return q && !q -> limits .max_write_zeroes_sectors ;
1652
+ return !q -> limits .max_write_zeroes_sectors ;
1653
1653
}
1654
1654
1655
1655
static bool dm_table_supports_write_zeroes (struct dm_table * t )
@@ -1676,7 +1676,7 @@ static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1676
1676
{
1677
1677
struct request_queue * q = bdev_get_queue (dev -> bdev );
1678
1678
1679
- return q && !blk_queue_nowait (q );
1679
+ return !blk_queue_nowait (q );
1680
1680
}
1681
1681
1682
1682
static bool dm_table_supports_nowait (struct dm_table * t )
@@ -1703,7 +1703,7 @@ static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1703
1703
{
1704
1704
struct request_queue * q = bdev_get_queue (dev -> bdev );
1705
1705
1706
- return q && !blk_queue_discard (q );
1706
+ return !blk_queue_discard (q );
1707
1707
}
1708
1708
1709
1709
static bool dm_table_supports_discards (struct dm_table * t )
@@ -1737,7 +1737,7 @@ static int device_not_secure_erase_capable(struct dm_target *ti,
1737
1737
{
1738
1738
struct request_queue * q = bdev_get_queue (dev -> bdev );
1739
1739
1740
- return q && !blk_queue_secure_erase (q );
1740
+ return !blk_queue_secure_erase (q );
1741
1741
}
1742
1742
1743
1743
static bool dm_table_supports_secure_erase (struct dm_table * t )
@@ -1765,7 +1765,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
1765
1765
{
1766
1766
struct request_queue * q = bdev_get_queue (dev -> bdev );
1767
1767
1768
- return q && blk_queue_stable_writes (q );
1768
+ return blk_queue_stable_writes (q );
1769
1769
}
1770
1770
1771
1771
void dm_table_set_restrictions (struct dm_table * t , struct request_queue * q ,
0 commit comments