@@ -657,7 +657,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
657
657
658
658
dmabuf -> file = file ;
659
659
660
- mutex_init (& dmabuf -> lock );
661
660
INIT_LIST_HEAD (& dmabuf -> attachments );
662
661
663
662
mutex_lock (& db_list .lock );
@@ -795,6 +794,70 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
795
794
return sg_table ;
796
795
}
797
796
797
+ /**
798
+ * DOC: locking convention
799
+ *
800
+ * In order to avoid deadlock situations between dma-buf exports and importers,
801
+ * all dma-buf API users must follow the common dma-buf locking convention.
802
+ *
803
+ * Convention for importers
804
+ *
805
+ * 1. Importers must hold the dma-buf reservation lock when calling these
806
+ * functions:
807
+ *
808
+ * - dma_buf_pin()
809
+ * - dma_buf_unpin()
810
+ * - dma_buf_map_attachment()
811
+ * - dma_buf_unmap_attachment()
812
+ * - dma_buf_vmap()
813
+ * - dma_buf_vunmap()
814
+ *
815
+ * 2. Importers must not hold the dma-buf reservation lock when calling these
816
+ * functions:
817
+ *
818
+ * - dma_buf_attach()
819
+ * - dma_buf_dynamic_attach()
820
+ * - dma_buf_detach()
821
+ * - dma_buf_export(
822
+ * - dma_buf_fd()
823
+ * - dma_buf_get()
824
+ * - dma_buf_put()
825
+ * - dma_buf_mmap()
826
+ * - dma_buf_begin_cpu_access()
827
+ * - dma_buf_end_cpu_access()
828
+ * - dma_buf_map_attachment_unlocked()
829
+ * - dma_buf_unmap_attachment_unlocked()
830
+ * - dma_buf_vmap_unlocked()
831
+ * - dma_buf_vunmap_unlocked()
832
+ *
833
+ * Convention for exporters
834
+ *
835
+ * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
836
+ * reservation and exporter can take the lock:
837
+ *
838
+ * - &dma_buf_ops.attach()
839
+ * - &dma_buf_ops.detach()
840
+ * - &dma_buf_ops.release()
841
+ * - &dma_buf_ops.begin_cpu_access()
842
+ * - &dma_buf_ops.end_cpu_access()
843
+ *
844
+ * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
845
+ * reservation and exporter can't take the lock:
846
+ *
847
+ * - &dma_buf_ops.pin()
848
+ * - &dma_buf_ops.unpin()
849
+ * - &dma_buf_ops.map_dma_buf()
850
+ * - &dma_buf_ops.unmap_dma_buf()
851
+ * - &dma_buf_ops.mmap()
852
+ * - &dma_buf_ops.vmap()
853
+ * - &dma_buf_ops.vunmap()
854
+ *
855
+ * 3. Exporters must hold the dma-buf reservation lock when calling these
856
+ * functions:
857
+ *
858
+ * - dma_buf_move_notify()
859
+ */
860
+
798
861
/**
799
862
* dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
800
863
* @dmabuf: [in] buffer to attach device to.
@@ -859,8 +922,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
859
922
dma_buf_is_dynamic (dmabuf )) {
860
923
struct sg_table * sgt ;
861
924
925
+ dma_resv_lock (attach -> dmabuf -> resv , NULL );
862
926
if (dma_buf_is_dynamic (attach -> dmabuf )) {
863
- dma_resv_lock (attach -> dmabuf -> resv , NULL );
864
927
ret = dmabuf -> ops -> pin (attach );
865
928
if (ret )
866
929
goto err_unlock ;
@@ -873,8 +936,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
873
936
ret = PTR_ERR (sgt );
874
937
goto err_unpin ;
875
938
}
876
- if (dma_buf_is_dynamic (attach -> dmabuf ))
877
- dma_resv_unlock (attach -> dmabuf -> resv );
939
+ dma_resv_unlock (attach -> dmabuf -> resv );
878
940
attach -> sgt = sgt ;
879
941
attach -> dir = DMA_BIDIRECTIONAL ;
880
942
}
@@ -890,8 +952,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
890
952
dmabuf -> ops -> unpin (attach );
891
953
892
954
err_unlock :
893
- if (dma_buf_is_dynamic (attach -> dmabuf ))
894
- dma_resv_unlock (attach -> dmabuf -> resv );
955
+ dma_resv_unlock (attach -> dmabuf -> resv );
895
956
896
957
dma_buf_detach (dmabuf , attach );
897
958
return ERR_PTR (ret );
@@ -937,21 +998,19 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
937
998
if (WARN_ON (!dmabuf || !attach ))
938
999
return ;
939
1000
1001
+ dma_resv_lock (attach -> dmabuf -> resv , NULL );
1002
+
940
1003
if (attach -> sgt ) {
941
- if (dma_buf_is_dynamic (attach -> dmabuf ))
942
- dma_resv_lock (attach -> dmabuf -> resv , NULL );
943
1004
944
1005
__unmap_dma_buf (attach , attach -> sgt , attach -> dir );
945
1006
946
- if (dma_buf_is_dynamic (attach -> dmabuf )) {
1007
+ if (dma_buf_is_dynamic (attach -> dmabuf ))
947
1008
dmabuf -> ops -> unpin (attach );
948
- dma_resv_unlock (attach -> dmabuf -> resv );
949
- }
950
1009
}
951
-
952
- dma_resv_lock (dmabuf -> resv , NULL );
953
1010
list_del (& attach -> node );
1011
+
954
1012
dma_resv_unlock (dmabuf -> resv );
1013
+
955
1014
if (dmabuf -> ops -> detach )
956
1015
dmabuf -> ops -> detach (dmabuf , attach );
957
1016
@@ -1042,8 +1101,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1042
1101
if (WARN_ON (!attach || !attach -> dmabuf ))
1043
1102
return ERR_PTR (- EINVAL );
1044
1103
1045
- if (dma_buf_attachment_is_dynamic (attach ))
1046
- dma_resv_assert_held (attach -> dmabuf -> resv );
1104
+ dma_resv_assert_held (attach -> dmabuf -> resv );
1047
1105
1048
1106
if (attach -> sgt ) {
1049
1107
/*
@@ -1058,7 +1116,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1058
1116
}
1059
1117
1060
1118
if (dma_buf_is_dynamic (attach -> dmabuf )) {
1061
- dma_resv_assert_held (attach -> dmabuf -> resv );
1062
1119
if (!IS_ENABLED (CONFIG_DMABUF_MOVE_NOTIFY )) {
1063
1120
r = attach -> dmabuf -> ops -> pin (attach );
1064
1121
if (r )
@@ -1100,6 +1157,34 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1100
1157
}
1101
1158
EXPORT_SYMBOL_NS_GPL (dma_buf_map_attachment , DMA_BUF );
1102
1159
1160
+ /**
1161
+ * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1162
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1163
+ * dma_buf_ops.
1164
+ * @attach: [in] attachment whose scatterlist is to be returned
1165
+ * @direction: [in] direction of DMA transfer
1166
+ *
1167
+ * Unlocked variant of dma_buf_map_attachment().
1168
+ */
1169
+ struct sg_table *
1170
+ dma_buf_map_attachment_unlocked (struct dma_buf_attachment * attach ,
1171
+ enum dma_data_direction direction )
1172
+ {
1173
+ struct sg_table * sg_table ;
1174
+
1175
+ might_sleep ();
1176
+
1177
+ if (WARN_ON (!attach || !attach -> dmabuf ))
1178
+ return ERR_PTR (- EINVAL );
1179
+
1180
+ dma_resv_lock (attach -> dmabuf -> resv , NULL );
1181
+ sg_table = dma_buf_map_attachment (attach , direction );
1182
+ dma_resv_unlock (attach -> dmabuf -> resv );
1183
+
1184
+ return sg_table ;
1185
+ }
1186
+ EXPORT_SYMBOL_NS_GPL (dma_buf_map_attachment_unlocked , DMA_BUF );
1187
+
1103
1188
/**
1104
1189
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1105
1190
* deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
@@ -1119,15 +1204,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1119
1204
if (WARN_ON (!attach || !attach -> dmabuf || !sg_table ))
1120
1205
return ;
1121
1206
1122
- if (dma_buf_attachment_is_dynamic (attach ))
1123
- dma_resv_assert_held (attach -> dmabuf -> resv );
1207
+ dma_resv_assert_held (attach -> dmabuf -> resv );
1124
1208
1125
1209
if (attach -> sgt == sg_table )
1126
1210
return ;
1127
1211
1128
- if (dma_buf_is_dynamic (attach -> dmabuf ))
1129
- dma_resv_assert_held (attach -> dmabuf -> resv );
1130
-
1131
1212
__unmap_dma_buf (attach , sg_table , direction );
1132
1213
1133
1214
if (dma_buf_is_dynamic (attach -> dmabuf ) &&
@@ -1136,6 +1217,31 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1136
1217
}
1137
1218
EXPORT_SYMBOL_NS_GPL (dma_buf_unmap_attachment , DMA_BUF );
1138
1219
1220
+ /**
1221
+ * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1222
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1223
+ * dma_buf_ops.
1224
+ * @attach: [in] attachment to unmap buffer from
1225
+ * @sg_table: [in] scatterlist info of the buffer to unmap
1226
+ * @direction: [in] direction of DMA transfer
1227
+ *
1228
+ * Unlocked variant of dma_buf_unmap_attachment().
1229
+ */
1230
+ void dma_buf_unmap_attachment_unlocked (struct dma_buf_attachment * attach ,
1231
+ struct sg_table * sg_table ,
1232
+ enum dma_data_direction direction )
1233
+ {
1234
+ might_sleep ();
1235
+
1236
+ if (WARN_ON (!attach || !attach -> dmabuf || !sg_table ))
1237
+ return ;
1238
+
1239
+ dma_resv_lock (attach -> dmabuf -> resv , NULL );
1240
+ dma_buf_unmap_attachment (attach , sg_table , direction );
1241
+ dma_resv_unlock (attach -> dmabuf -> resv );
1242
+ }
1243
+ EXPORT_SYMBOL_NS_GPL (dma_buf_unmap_attachment_unlocked , DMA_BUF );
1244
+
1139
1245
/**
1140
1246
* dma_buf_move_notify - notify attachments that DMA-buf is moving
1141
1247
*
@@ -1347,6 +1453,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1347
1453
int dma_buf_mmap (struct dma_buf * dmabuf , struct vm_area_struct * vma ,
1348
1454
unsigned long pgoff )
1349
1455
{
1456
+ int ret ;
1457
+
1350
1458
if (WARN_ON (!dmabuf || !vma ))
1351
1459
return - EINVAL ;
1352
1460
@@ -1367,7 +1475,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1367
1475
vma_set_file (vma , dmabuf -> file );
1368
1476
vma -> vm_pgoff = pgoff ;
1369
1477
1370
- return dmabuf -> ops -> mmap (dmabuf , vma );
1478
+ dma_resv_lock (dmabuf -> resv , NULL );
1479
+ ret = dmabuf -> ops -> mmap (dmabuf , vma );
1480
+ dma_resv_unlock (dmabuf -> resv );
1481
+
1482
+ return ret ;
1371
1483
}
1372
1484
EXPORT_SYMBOL_NS_GPL (dma_buf_mmap , DMA_BUF );
1373
1485
@@ -1390,41 +1502,67 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1390
1502
int dma_buf_vmap (struct dma_buf * dmabuf , struct iosys_map * map )
1391
1503
{
1392
1504
struct iosys_map ptr ;
1393
- int ret = 0 ;
1505
+ int ret ;
1394
1506
1395
1507
iosys_map_clear (map );
1396
1508
1397
1509
if (WARN_ON (!dmabuf ))
1398
1510
return - EINVAL ;
1399
1511
1512
+ dma_resv_assert_held (dmabuf -> resv );
1513
+
1400
1514
if (!dmabuf -> ops -> vmap )
1401
1515
return - EINVAL ;
1402
1516
1403
- mutex_lock (& dmabuf -> lock );
1404
1517
if (dmabuf -> vmapping_counter ) {
1405
1518
dmabuf -> vmapping_counter ++ ;
1406
1519
BUG_ON (iosys_map_is_null (& dmabuf -> vmap_ptr ));
1407
1520
* map = dmabuf -> vmap_ptr ;
1408
- goto out_unlock ;
1521
+ return 0 ;
1409
1522
}
1410
1523
1411
1524
BUG_ON (iosys_map_is_set (& dmabuf -> vmap_ptr ));
1412
1525
1413
1526
ret = dmabuf -> ops -> vmap (dmabuf , & ptr );
1414
1527
if (WARN_ON_ONCE (ret ))
1415
- goto out_unlock ;
1528
+ return ret ;
1416
1529
1417
1530
dmabuf -> vmap_ptr = ptr ;
1418
1531
dmabuf -> vmapping_counter = 1 ;
1419
1532
1420
1533
* map = dmabuf -> vmap_ptr ;
1421
1534
1422
- out_unlock :
1423
- mutex_unlock (& dmabuf -> lock );
1424
- return ret ;
1535
+ return 0 ;
1425
1536
}
1426
1537
EXPORT_SYMBOL_NS_GPL (dma_buf_vmap , DMA_BUF );
1427
1538
1539
+ /**
1540
+ * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1541
+ * address space. Same restrictions as for vmap and friends apply.
1542
+ * @dmabuf: [in] buffer to vmap
1543
+ * @map: [out] returns the vmap pointer
1544
+ *
1545
+ * Unlocked version of dma_buf_vmap()
1546
+ *
1547
+ * Returns 0 on success, or a negative errno code otherwise.
1548
+ */
1549
+ int dma_buf_vmap_unlocked (struct dma_buf * dmabuf , struct iosys_map * map )
1550
+ {
1551
+ int ret ;
1552
+
1553
+ iosys_map_clear (map );
1554
+
1555
+ if (WARN_ON (!dmabuf ))
1556
+ return - EINVAL ;
1557
+
1558
+ dma_resv_lock (dmabuf -> resv , NULL );
1559
+ ret = dma_buf_vmap (dmabuf , map );
1560
+ dma_resv_unlock (dmabuf -> resv );
1561
+
1562
+ return ret ;
1563
+ }
1564
+ EXPORT_SYMBOL_NS_GPL (dma_buf_vmap_unlocked , DMA_BUF );
1565
+
1428
1566
/**
1429
1567
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1430
1568
* @dmabuf: [in] buffer to vunmap
@@ -1435,20 +1573,36 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1435
1573
if (WARN_ON (!dmabuf ))
1436
1574
return ;
1437
1575
1576
+ dma_resv_assert_held (dmabuf -> resv );
1577
+
1438
1578
BUG_ON (iosys_map_is_null (& dmabuf -> vmap_ptr ));
1439
1579
BUG_ON (dmabuf -> vmapping_counter == 0 );
1440
1580
BUG_ON (!iosys_map_is_equal (& dmabuf -> vmap_ptr , map ));
1441
1581
1442
- mutex_lock (& dmabuf -> lock );
1443
1582
if (-- dmabuf -> vmapping_counter == 0 ) {
1444
1583
if (dmabuf -> ops -> vunmap )
1445
1584
dmabuf -> ops -> vunmap (dmabuf , map );
1446
1585
iosys_map_clear (& dmabuf -> vmap_ptr );
1447
1586
}
1448
- mutex_unlock (& dmabuf -> lock );
1449
1587
}
1450
1588
EXPORT_SYMBOL_NS_GPL (dma_buf_vunmap , DMA_BUF );
1451
1589
1590
+ /**
1591
+ * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1592
+ * @dmabuf: [in] buffer to vunmap
1593
+ * @map: [in] vmap pointer to vunmap
1594
+ */
1595
+ void dma_buf_vunmap_unlocked (struct dma_buf * dmabuf , struct iosys_map * map )
1596
+ {
1597
+ if (WARN_ON (!dmabuf ))
1598
+ return ;
1599
+
1600
+ dma_resv_lock (dmabuf -> resv , NULL );
1601
+ dma_buf_vunmap (dmabuf , map );
1602
+ dma_resv_unlock (dmabuf -> resv );
1603
+ }
1604
+ EXPORT_SYMBOL_NS_GPL (dma_buf_vunmap_unlocked , DMA_BUF );
1605
+
1452
1606
#ifdef CONFIG_DEBUG_FS
1453
1607
static int dma_buf_debug_show (struct seq_file * s , void * unused )
1454
1608
{
0 commit comments