@@ -1292,7 +1292,8 @@ static void dmio_complete(unsigned long error, void *context)
1292
1292
}
1293
1293
1294
1294
static void use_dmio (struct dm_buffer * b , enum req_op op , sector_t sector ,
1295
- unsigned int n_sectors , unsigned int offset )
1295
+ unsigned int n_sectors , unsigned int offset ,
1296
+ unsigned short ioprio )
1296
1297
{
1297
1298
int r ;
1298
1299
struct dm_io_request io_req = {
@@ -1315,7 +1316,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1315
1316
io_req .mem .ptr .vma = (char * )b -> data + offset ;
1316
1317
}
1317
1318
1318
- r = dm_io (& io_req , 1 , & region , NULL );
1319
+ r = dm_io (& io_req , 1 , & region , NULL , ioprio );
1319
1320
if (unlikely (r ))
1320
1321
b -> end_io (b , errno_to_blk_status (r ));
1321
1322
}
@@ -1331,21 +1332,23 @@ static void bio_complete(struct bio *bio)
1331
1332
}
1332
1333
1333
1334
static void use_bio (struct dm_buffer * b , enum req_op op , sector_t sector ,
1334
- unsigned int n_sectors , unsigned int offset )
1335
+ unsigned int n_sectors , unsigned int offset ,
1336
+ unsigned short ioprio )
1335
1337
{
1336
1338
struct bio * bio ;
1337
1339
char * ptr ;
1338
1340
unsigned int len ;
1339
1341
1340
1342
bio = bio_kmalloc (1 , GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN );
1341
1343
if (!bio ) {
1342
- use_dmio (b , op , sector , n_sectors , offset );
1344
+ use_dmio (b , op , sector , n_sectors , offset , ioprio );
1343
1345
return ;
1344
1346
}
1345
1347
bio_init (bio , b -> c -> bdev , bio -> bi_inline_vecs , 1 , op );
1346
1348
bio -> bi_iter .bi_sector = sector ;
1347
1349
bio -> bi_end_io = bio_complete ;
1348
1350
bio -> bi_private = b ;
1351
+ bio -> bi_ioprio = ioprio ;
1349
1352
1350
1353
ptr = (char * )b -> data + offset ;
1351
1354
len = n_sectors << SECTOR_SHIFT ;
@@ -1368,7 +1371,7 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
1368
1371
return sector ;
1369
1372
}
1370
1373
1371
- static void submit_io (struct dm_buffer * b , enum req_op op ,
1374
+ static void submit_io (struct dm_buffer * b , enum req_op op , unsigned short ioprio ,
1372
1375
void (* end_io )(struct dm_buffer * , blk_status_t ))
1373
1376
{
1374
1377
unsigned int n_sectors ;
@@ -1398,9 +1401,9 @@ static void submit_io(struct dm_buffer *b, enum req_op op,
1398
1401
}
1399
1402
1400
1403
if (b -> data_mode != DATA_MODE_VMALLOC )
1401
- use_bio (b , op , sector , n_sectors , offset );
1404
+ use_bio (b , op , sector , n_sectors , offset , ioprio );
1402
1405
else
1403
- use_dmio (b , op , sector , n_sectors , offset );
1406
+ use_dmio (b , op , sector , n_sectors , offset , ioprio );
1404
1407
}
1405
1408
1406
1409
/*
@@ -1456,7 +1459,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
1456
1459
b -> write_end = b -> dirty_end ;
1457
1460
1458
1461
if (!write_list )
1459
- submit_io (b , REQ_OP_WRITE , write_endio );
1462
+ submit_io (b , REQ_OP_WRITE , IOPRIO_DEFAULT , write_endio );
1460
1463
else
1461
1464
list_add_tail (& b -> write_list , write_list );
1462
1465
}
@@ -1470,7 +1473,7 @@ static void __flush_write_list(struct list_head *write_list)
1470
1473
struct dm_buffer * b =
1471
1474
list_entry (write_list -> next , struct dm_buffer , write_list );
1472
1475
list_del (& b -> write_list );
1473
- submit_io (b , REQ_OP_WRITE , write_endio );
1476
+ submit_io (b , REQ_OP_WRITE , IOPRIO_DEFAULT , write_endio );
1474
1477
cond_resched ();
1475
1478
}
1476
1479
blk_finish_plug (& plug );
@@ -1852,7 +1855,8 @@ static void read_endio(struct dm_buffer *b, blk_status_t status)
1852
1855
* and uses dm_bufio_mark_buffer_dirty to write new data back).
1853
1856
*/
1854
1857
static void * new_read (struct dm_bufio_client * c , sector_t block ,
1855
- enum new_flag nf , struct dm_buffer * * bp )
1858
+ enum new_flag nf , struct dm_buffer * * bp ,
1859
+ unsigned short ioprio )
1856
1860
{
1857
1861
int need_submit = 0 ;
1858
1862
struct dm_buffer * b ;
@@ -1905,7 +1909,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
1905
1909
return NULL ;
1906
1910
1907
1911
if (need_submit )
1908
- submit_io (b , REQ_OP_READ , read_endio );
1912
+ submit_io (b , REQ_OP_READ , ioprio , read_endio );
1909
1913
1910
1914
if (nf != NF_GET ) /* we already tested this condition above */
1911
1915
wait_on_bit_io (& b -> state , B_READING , TASK_UNINTERRUPTIBLE );
@@ -1926,32 +1930,46 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
1926
1930
void * dm_bufio_get (struct dm_bufio_client * c , sector_t block ,
1927
1931
struct dm_buffer * * bp )
1928
1932
{
1929
- return new_read (c , block , NF_GET , bp );
1933
+ return new_read (c , block , NF_GET , bp , IOPRIO_DEFAULT );
1930
1934
}
1931
1935
EXPORT_SYMBOL_GPL (dm_bufio_get );
1932
1936
1933
- void * dm_bufio_read (struct dm_bufio_client * c , sector_t block ,
1934
- struct dm_buffer * * bp )
1937
+ static void * __dm_bufio_read (struct dm_bufio_client * c , sector_t block ,
1938
+ struct dm_buffer * * bp , unsigned short ioprio )
1935
1939
{
1936
1940
if (WARN_ON_ONCE (dm_bufio_in_request ()))
1937
1941
return ERR_PTR (- EINVAL );
1938
1942
1939
- return new_read (c , block , NF_READ , bp );
1943
+ return new_read (c , block , NF_READ , bp , ioprio );
1944
+ }
1945
+
1946
+ void * dm_bufio_read (struct dm_bufio_client * c , sector_t block ,
1947
+ struct dm_buffer * * bp )
1948
+ {
1949
+ return __dm_bufio_read (c , block , bp , IOPRIO_DEFAULT );
1940
1950
}
1941
1951
EXPORT_SYMBOL_GPL (dm_bufio_read );
1942
1952
1953
+ void * dm_bufio_read_with_ioprio (struct dm_bufio_client * c , sector_t block ,
1954
+ struct dm_buffer * * bp , unsigned short ioprio )
1955
+ {
1956
+ return __dm_bufio_read (c , block , bp , ioprio );
1957
+ }
1958
+ EXPORT_SYMBOL_GPL (dm_bufio_read_with_ioprio );
1959
+
1943
1960
void * dm_bufio_new (struct dm_bufio_client * c , sector_t block ,
1944
1961
struct dm_buffer * * bp )
1945
1962
{
1946
1963
if (WARN_ON_ONCE (dm_bufio_in_request ()))
1947
1964
return ERR_PTR (- EINVAL );
1948
1965
1949
- return new_read (c , block , NF_FRESH , bp );
1966
+ return new_read (c , block , NF_FRESH , bp , IOPRIO_DEFAULT );
1950
1967
}
1951
1968
EXPORT_SYMBOL_GPL (dm_bufio_new );
1952
1969
1953
- void dm_bufio_prefetch (struct dm_bufio_client * c ,
1954
- sector_t block , unsigned int n_blocks )
1970
+ static void __dm_bufio_prefetch (struct dm_bufio_client * c ,
1971
+ sector_t block , unsigned int n_blocks ,
1972
+ unsigned short ioprio )
1955
1973
{
1956
1974
struct blk_plug plug ;
1957
1975
@@ -1987,7 +2005,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
1987
2005
dm_bufio_unlock (c );
1988
2006
1989
2007
if (need_submit )
1990
- submit_io (b , REQ_OP_READ , read_endio );
2008
+ submit_io (b , REQ_OP_READ , ioprio , read_endio );
1991
2009
dm_bufio_release (b );
1992
2010
1993
2011
cond_resched ();
@@ -2002,8 +2020,20 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
2002
2020
flush_plug :
2003
2021
blk_finish_plug (& plug );
2004
2022
}
2023
+
2024
+ void dm_bufio_prefetch (struct dm_bufio_client * c , sector_t block , unsigned int n_blocks )
2025
+ {
2026
+ return __dm_bufio_prefetch (c , block , n_blocks , IOPRIO_DEFAULT );
2027
+ }
2005
2028
EXPORT_SYMBOL_GPL (dm_bufio_prefetch );
2006
2029
2030
+ void dm_bufio_prefetch_with_ioprio (struct dm_bufio_client * c , sector_t block ,
2031
+ unsigned int n_blocks , unsigned short ioprio )
2032
+ {
2033
+ return __dm_bufio_prefetch (c , block , n_blocks , ioprio );
2034
+ }
2035
+ EXPORT_SYMBOL_GPL (dm_bufio_prefetch_with_ioprio );
2036
+
2007
2037
void dm_bufio_release (struct dm_buffer * b )
2008
2038
{
2009
2039
struct dm_bufio_client * c = b -> c ;
@@ -2167,7 +2197,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
2167
2197
if (WARN_ON_ONCE (dm_bufio_in_request ()))
2168
2198
return - EINVAL ;
2169
2199
2170
- return dm_io (& io_req , 1 , & io_reg , NULL );
2200
+ return dm_io (& io_req , 1 , & io_reg , NULL , IOPRIO_DEFAULT );
2171
2201
}
2172
2202
EXPORT_SYMBOL_GPL (dm_bufio_issue_flush );
2173
2203
@@ -2191,7 +2221,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
2191
2221
if (WARN_ON_ONCE (dm_bufio_in_request ()))
2192
2222
return - EINVAL ; /* discards are optional */
2193
2223
2194
- return dm_io (& io_req , 1 , & io_reg , NULL );
2224
+ return dm_io (& io_req , 1 , & io_reg , NULL , IOPRIO_DEFAULT );
2195
2225
}
2196
2226
EXPORT_SYMBOL_GPL (dm_bufio_issue_discard );
2197
2227
@@ -2968,6 +2998,6 @@ MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2968
2998
module_param_named (current_allocated_bytes , dm_bufio_current_allocated , ulong , 0444 );
2969
2999
MODULE_PARM_DESC (current_allocated_bytes , "Memory currently used by the cache" );
2970
3000
2971
- MODULE_AUTHOR ("Mikulas Patocka <dm-devel@redhat.com >" );
3001
+ MODULE_AUTHOR ("Mikulas Patocka <dm-devel@lists.linux.dev >" );
2972
3002
MODULE_DESCRIPTION (DM_NAME " buffered I/O library" );
2973
3003
MODULE_LICENSE ("GPL" );
0 commit comments