14
14
#include "sysemu/block-backend.h"
15
15
#include "block/block_int.h"
16
16
#include "block/blockjob.h"
17
+ #include "block/coroutines.h"
17
18
#include "block/throttle-groups.h"
18
19
#include "hw/qdev-core.h"
19
20
#include "sysemu/blockdev.h"
@@ -1204,7 +1205,7 @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
1204
1205
}
1205
1206
1206
1207
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1207
- static int coroutine_fn
1208
+ int coroutine_fn
1208
1209
blk_co_do_preadv (BlockBackend * blk , int64_t offset , int64_t bytes ,
1209
1210
QEMUIOVector * qiov , BdrvRequestFlags flags )
1210
1211
{
@@ -1249,7 +1250,7 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
1249
1250
}
1250
1251
1251
1252
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1252
- static int coroutine_fn
1253
+ int coroutine_fn
1253
1254
blk_co_do_pwritev_part (BlockBackend * blk , int64_t offset , int64_t bytes ,
1254
1255
QEMUIOVector * qiov , size_t qiov_offset ,
1255
1256
BdrvRequestFlags flags )
@@ -1306,6 +1307,20 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
1306
1307
return blk_co_pwritev_part (blk , offset , bytes , qiov , 0 , flags );
1307
1308
}
1308
1309
1310
+ static int coroutine_fn blk_pwritev_part (BlockBackend * blk , int64_t offset ,
1311
+ int64_t bytes ,
1312
+ QEMUIOVector * qiov , size_t qiov_offset ,
1313
+ BdrvRequestFlags flags )
1314
+ {
1315
+ int ret ;
1316
+
1317
+ blk_inc_in_flight (blk );
1318
+ ret = blk_do_pwritev_part (blk , offset , bytes , qiov , qiov_offset , flags );
1319
+ blk_dec_in_flight (blk );
1320
+
1321
+ return ret ;
1322
+ }
1323
+
1309
1324
typedef struct BlkRwCo {
1310
1325
BlockBackend * blk ;
1311
1326
int64_t offset ;
@@ -1314,58 +1329,11 @@ typedef struct BlkRwCo {
1314
1329
BdrvRequestFlags flags ;
1315
1330
} BlkRwCo ;
1316
1331
1317
- static void blk_read_entry (void * opaque )
1318
- {
1319
- BlkRwCo * rwco = opaque ;
1320
- QEMUIOVector * qiov = rwco -> iobuf ;
1321
-
1322
- rwco -> ret = blk_co_do_preadv (rwco -> blk , rwco -> offset , qiov -> size ,
1323
- qiov , rwco -> flags );
1324
- aio_wait_kick ();
1325
- }
1326
-
1327
- static void blk_write_entry (void * opaque )
1328
- {
1329
- BlkRwCo * rwco = opaque ;
1330
- QEMUIOVector * qiov = rwco -> iobuf ;
1331
-
1332
- rwco -> ret = blk_co_do_pwritev_part (rwco -> blk , rwco -> offset , qiov -> size ,
1333
- qiov , 0 , rwco -> flags );
1334
- aio_wait_kick ();
1335
- }
1336
-
1337
- static int blk_prw (BlockBackend * blk , int64_t offset , uint8_t * buf ,
1338
- int64_t bytes , CoroutineEntry co_entry ,
1339
- BdrvRequestFlags flags )
1340
- {
1341
- QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF (qiov , buf , bytes );
1342
- BlkRwCo rwco = {
1343
- .blk = blk ,
1344
- .offset = offset ,
1345
- .iobuf = & qiov ,
1346
- .flags = flags ,
1347
- .ret = NOT_DONE ,
1348
- };
1349
-
1350
- blk_inc_in_flight (blk );
1351
- if (qemu_in_coroutine ()) {
1352
- /* Fast-path if already in coroutine context */
1353
- co_entry (& rwco );
1354
- } else {
1355
- Coroutine * co = qemu_coroutine_create (co_entry , & rwco );
1356
- bdrv_coroutine_enter (blk_bs (blk ), co );
1357
- BDRV_POLL_WHILE (blk_bs (blk ), rwco .ret == NOT_DONE );
1358
- }
1359
- blk_dec_in_flight (blk );
1360
-
1361
- return rwco .ret ;
1362
- }
1363
-
1364
1332
int blk_pwrite_zeroes (BlockBackend * blk , int64_t offset ,
1365
1333
int bytes , BdrvRequestFlags flags )
1366
1334
{
1367
- return blk_prw (blk , offset , NULL , bytes , blk_write_entry ,
1368
- flags | BDRV_REQ_ZERO_WRITE );
1335
+ return blk_pwritev_part (blk , offset , bytes , NULL , 0 ,
1336
+ flags | BDRV_REQ_ZERO_WRITE );
1369
1337
}
1370
1338
1371
1339
int blk_make_zero (BlockBackend * blk , BdrvRequestFlags flags )
@@ -1510,22 +1478,25 @@ BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1510
1478
1511
1479
int blk_pread (BlockBackend * blk , int64_t offset , void * buf , int count )
1512
1480
{
1513
- int ret = blk_prw (blk , offset , buf , count , blk_read_entry , 0 );
1514
- if (ret < 0 ) {
1515
- return ret ;
1516
- }
1517
- return count ;
1481
+ int ret ;
1482
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF (qiov , buf , count );
1483
+
1484
+ blk_inc_in_flight (blk );
1485
+ ret = blk_do_preadv (blk , offset , count , & qiov , 0 );
1486
+ blk_dec_in_flight (blk );
1487
+
1488
+ return ret < 0 ? ret : count ;
1518
1489
}
1519
1490
1520
1491
int blk_pwrite (BlockBackend * blk , int64_t offset , const void * buf , int count ,
1521
1492
BdrvRequestFlags flags )
1522
1493
{
1523
- int ret = blk_prw ( blk , offset , ( void * ) buf , count , blk_write_entry ,
1524
- flags );
1525
- if ( ret < 0 ) {
1526
- return ret ;
1527
- }
1528
- return count ;
1494
+ int ret ;
1495
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF ( qiov , buf , count );
1496
+
1497
+ ret = blk_pwritev_part ( blk , offset , count , & qiov , 0 , flags ) ;
1498
+
1499
+ return ret < 0 ? ret : count ;
1529
1500
}
1530
1501
1531
1502
int64_t blk_getlength (BlockBackend * blk )
@@ -1582,7 +1553,7 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
1582
1553
}
1583
1554
1584
1555
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1585
- static int coroutine_fn
1556
+ int coroutine_fn
1586
1557
blk_co_do_ioctl (BlockBackend * blk , unsigned long int req , void * buf )
1587
1558
{
1588
1559
blk_wait_while_drained (blk );
@@ -1594,18 +1565,15 @@ blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1594
1565
return bdrv_co_ioctl (blk_bs (blk ), req , buf );
1595
1566
}
1596
1567
1597
- static void blk_ioctl_entry ( void * opaque )
1568
+ int blk_ioctl ( BlockBackend * blk , unsigned long int req , void * buf )
1598
1569
{
1599
- BlkRwCo * rwco = opaque ;
1600
- QEMUIOVector * qiov = rwco -> iobuf ;
1570
+ int ret ;
1601
1571
1602
- rwco -> ret = blk_co_do_ioctl ( rwco -> blk , rwco -> offset , qiov -> iov [ 0 ]. iov_base );
1603
- aio_wait_kick ( );
1604
- }
1572
+ blk_inc_in_flight ( blk );
1573
+ ret = blk_do_ioctl ( blk , req , buf );
1574
+ blk_dec_in_flight ( blk );
1605
1575
1606
- int blk_ioctl (BlockBackend * blk , unsigned long int req , void * buf )
1607
- {
1608
- return blk_prw (blk , req , buf , 0 , blk_ioctl_entry , 0 );
1576
+ return ret ;
1609
1577
}
1610
1578
1611
1579
static void blk_aio_ioctl_entry (void * opaque )
@@ -1625,7 +1593,7 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1625
1593
}
1626
1594
1627
1595
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1628
- static int coroutine_fn
1596
+ int coroutine_fn
1629
1597
blk_co_do_pdiscard (BlockBackend * blk , int64_t offset , int64_t bytes )
1630
1598
{
1631
1599
int ret ;
@@ -1669,22 +1637,19 @@ int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
1669
1637
return ret ;
1670
1638
}
1671
1639
1672
- static void blk_pdiscard_entry ( void * opaque )
1640
+ int blk_pdiscard ( BlockBackend * blk , int64_t offset , int bytes )
1673
1641
{
1674
- BlkRwCo * rwco = opaque ;
1675
- QEMUIOVector * qiov = rwco -> iobuf ;
1642
+ int ret ;
1676
1643
1677
- rwco -> ret = blk_co_do_pdiscard ( rwco -> blk , rwco -> offset , qiov -> size );
1678
- aio_wait_kick ( );
1679
- }
1644
+ blk_inc_in_flight ( blk );
1645
+ ret = blk_do_pdiscard ( blk , offset , bytes );
1646
+ blk_dec_in_flight ( blk );
1680
1647
1681
- int blk_pdiscard (BlockBackend * blk , int64_t offset , int bytes )
1682
- {
1683
- return blk_prw (blk , offset , NULL , bytes , blk_pdiscard_entry , 0 );
1648
+ return ret ;
1684
1649
}
1685
1650
1686
1651
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1687
- static int coroutine_fn blk_co_do_flush (BlockBackend * blk )
1652
+ int coroutine_fn blk_co_do_flush (BlockBackend * blk )
1688
1653
{
1689
1654
blk_wait_while_drained (blk );
1690
1655
@@ -1721,16 +1686,15 @@ int coroutine_fn blk_co_flush(BlockBackend *blk)
1721
1686
return ret ;
1722
1687
}
1723
1688
1724
- static void blk_flush_entry (void * opaque )
1725
- {
1726
- BlkRwCo * rwco = opaque ;
1727
- rwco -> ret = blk_co_do_flush (rwco -> blk );
1728
- aio_wait_kick ();
1729
- }
1730
-
1731
1689
int blk_flush (BlockBackend * blk )
1732
1690
{
1733
- return blk_prw (blk , 0 , NULL , 0 , blk_flush_entry , 0 );
1691
+ int ret ;
1692
+
1693
+ blk_inc_in_flight (blk );
1694
+ ret = blk_do_flush (blk );
1695
+ blk_dec_in_flight (blk );
1696
+
1697
+ return ret ;
1734
1698
}
1735
1699
1736
1700
void blk_drain (BlockBackend * blk )
@@ -2224,8 +2188,9 @@ int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
2224
2188
int blk_pwrite_compressed (BlockBackend * blk , int64_t offset , const void * buf ,
2225
2189
int count )
2226
2190
{
2227
- return blk_prw (blk , offset , (void * ) buf , count , blk_write_entry ,
2228
- BDRV_REQ_WRITE_COMPRESSED );
2191
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF (qiov , buf , count );
2192
+ return blk_pwritev_part (blk , offset , count , & qiov , 0 ,
2193
+ BDRV_REQ_WRITE_COMPRESSED );
2229
2194
}
2230
2195
2231
2196
int blk_truncate (BlockBackend * blk , int64_t offset , bool exact ,
0 commit comments