@@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init);
1171
1171
1172
1172
#ifdef CONFIG_ARM_DMA_USE_IOMMU
1173
1173
1174
+ static int __dma_info_to_prot (enum dma_data_direction dir , unsigned long attrs )
1175
+ {
1176
+ int prot = 0 ;
1177
+
1178
+ if (attrs & DMA_ATTR_PRIVILEGED )
1179
+ prot |= IOMMU_PRIV ;
1180
+
1181
+ switch (dir ) {
1182
+ case DMA_BIDIRECTIONAL :
1183
+ return prot | IOMMU_READ | IOMMU_WRITE ;
1184
+ case DMA_TO_DEVICE :
1185
+ return prot | IOMMU_READ ;
1186
+ case DMA_FROM_DEVICE :
1187
+ return prot | IOMMU_WRITE ;
1188
+ default :
1189
+ return prot ;
1190
+ }
1191
+ }
1192
+
1174
1193
/* IOMMU */
1175
1194
1176
1195
static int extend_iommu_mapping (struct dma_iommu_mapping * mapping );
@@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1394
1413
* Create a mapping in device IO address space for specified pages
1395
1414
*/
1396
1415
static dma_addr_t
1397
- __iommu_create_mapping (struct device * dev , struct page * * pages , size_t size )
1416
+ __iommu_create_mapping (struct device * dev , struct page * * pages , size_t size ,
1417
+ unsigned long attrs )
1398
1418
{
1399
1419
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping (dev );
1400
1420
unsigned int count = PAGE_ALIGN (size ) >> PAGE_SHIFT ;
@@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1419
1439
1420
1440
len = (j - i ) << PAGE_SHIFT ;
1421
1441
ret = iommu_map (mapping -> domain , iova , phys , len ,
1422
- IOMMU_READ | IOMMU_WRITE );
1442
+ __dma_info_to_prot ( DMA_BIDIRECTIONAL , attrs ) );
1423
1443
if (ret < 0 )
1424
1444
goto fail ;
1425
1445
iova += len ;
@@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1476
1496
}
1477
1497
1478
1498
static void * __iommu_alloc_simple (struct device * dev , size_t size , gfp_t gfp ,
1479
- dma_addr_t * handle , int coherent_flag )
1499
+ dma_addr_t * handle , int coherent_flag ,
1500
+ unsigned long attrs )
1480
1501
{
1481
1502
struct page * page ;
1482
1503
void * addr ;
@@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1488
1509
if (!addr )
1489
1510
return NULL ;
1490
1511
1491
- * handle = __iommu_create_mapping (dev , & page , size );
1512
+ * handle = __iommu_create_mapping (dev , & page , size , attrs );
1492
1513
if (* handle == DMA_ERROR_CODE )
1493
1514
goto err_mapping ;
1494
1515
@@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1522
1543
1523
1544
if (coherent_flag == COHERENT || !gfpflags_allow_blocking (gfp ))
1524
1545
return __iommu_alloc_simple (dev , size , gfp , handle ,
1525
- coherent_flag );
1546
+ coherent_flag , attrs );
1526
1547
1527
1548
/*
1528
1549
* Following is a work-around (a.k.a. hack) to prevent pages
@@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1537
1558
if (!pages )
1538
1559
return NULL ;
1539
1560
1540
- * handle = __iommu_create_mapping (dev , pages , size );
1561
+ * handle = __iommu_create_mapping (dev , pages , size , attrs );
1541
1562
if (* handle == DMA_ERROR_CODE )
1542
1563
goto err_buffer ;
1543
1564
@@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1672
1693
GFP_KERNEL );
1673
1694
}
1674
1695
1675
- static int __dma_direction_to_prot (enum dma_data_direction dir )
1676
- {
1677
- int prot ;
1678
-
1679
- switch (dir ) {
1680
- case DMA_BIDIRECTIONAL :
1681
- prot = IOMMU_READ | IOMMU_WRITE ;
1682
- break ;
1683
- case DMA_TO_DEVICE :
1684
- prot = IOMMU_READ ;
1685
- break ;
1686
- case DMA_FROM_DEVICE :
1687
- prot = IOMMU_WRITE ;
1688
- break ;
1689
- default :
1690
- prot = 0 ;
1691
- }
1692
-
1693
- return prot ;
1694
- }
1695
-
1696
1696
/*
1697
1697
* Map a part of the scatter-gather list into contiguous io address space
1698
1698
*/
@@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1722
1722
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC ) == 0 )
1723
1723
__dma_page_cpu_to_dev (sg_page (s ), s -> offset , s -> length , dir );
1724
1724
1725
- prot = __dma_direction_to_prot (dir );
1725
+ prot = __dma_info_to_prot (dir , attrs );
1726
1726
1727
1727
ret = iommu_map (mapping -> domain , iova , phys , len , prot );
1728
1728
if (ret < 0 )
@@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1930
1930
if (dma_addr == DMA_ERROR_CODE )
1931
1931
return dma_addr ;
1932
1932
1933
- prot = __dma_direction_to_prot (dir );
1933
+ prot = __dma_info_to_prot (dir , attrs );
1934
1934
1935
1935
ret = iommu_map (mapping -> domain , dma_addr , page_to_phys (page ), len , prot );
1936
1936
if (ret < 0 )
@@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
2036
2036
if (dma_addr == DMA_ERROR_CODE )
2037
2037
return dma_addr ;
2038
2038
2039
- prot = __dma_direction_to_prot (dir ) | IOMMU_MMIO ;
2039
+ prot = __dma_info_to_prot (dir , attrs ) | IOMMU_MMIO ;
2040
2040
2041
2041
ret = iommu_map (mapping -> domain , dma_addr , addr , len , prot );
2042
2042
if (ret < 0 )
0 commit comments