14
14
* Generic iommu implementation
15
15
*/
16
16
17
- /*
18
- * The coherent mask may be smaller than the real mask, check if we can
19
- * really use a direct window.
20
- */
21
- static inline bool dma_iommu_alloc_bypass (struct device * dev )
22
- {
23
- return dev -> archdata .iommu_bypass && !iommu_fixed_is_weak &&
24
- dma_direct_supported (dev , dev -> coherent_dma_mask );
25
- }
26
-
27
- static inline bool dma_iommu_map_bypass (struct device * dev ,
28
- unsigned long attrs )
29
- {
30
- return dev -> archdata .iommu_bypass &&
31
- (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING ));
32
- }
33
-
34
17
/* Allocates a contiguous real buffer and creates mappings over it.
35
18
* Returns the virtual address of the buffer and sets dma_handle
36
19
* to the dma address (mapping) of the first page.
@@ -39,8 +22,6 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
39
22
dma_addr_t * dma_handle , gfp_t flag ,
40
23
unsigned long attrs )
41
24
{
42
- if (dma_iommu_alloc_bypass (dev ))
43
- return dma_direct_alloc (dev , size , dma_handle , flag , attrs );
44
25
return iommu_alloc_coherent (dev , get_iommu_table_base (dev ), size ,
45
26
dma_handle , dev -> coherent_dma_mask , flag ,
46
27
dev_to_node (dev ));
@@ -50,11 +31,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
50
31
void * vaddr , dma_addr_t dma_handle ,
51
32
unsigned long attrs )
52
33
{
53
- if (dma_iommu_alloc_bypass (dev ))
54
- dma_direct_free (dev , size , vaddr , dma_handle , attrs );
55
- else
56
- iommu_free_coherent (get_iommu_table_base (dev ), size , vaddr ,
57
- dma_handle );
34
+ iommu_free_coherent (get_iommu_table_base (dev ), size , vaddr , dma_handle );
58
35
}
59
36
60
37
/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -67,9 +44,6 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
67
44
enum dma_data_direction direction ,
68
45
unsigned long attrs )
69
46
{
70
- if (dma_iommu_map_bypass (dev , attrs ))
71
- return dma_direct_map_page (dev , page , offset , size , direction ,
72
- attrs );
73
47
return iommu_map_page (dev , get_iommu_table_base (dev ), page , offset ,
74
48
size , dma_get_mask (dev ), direction , attrs );
75
49
}
@@ -79,20 +53,15 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
79
53
size_t size , enum dma_data_direction direction ,
80
54
unsigned long attrs )
81
55
{
82
- if (!dma_iommu_map_bypass (dev , attrs ))
83
- iommu_unmap_page (get_iommu_table_base (dev ), dma_handle , size ,
84
- direction , attrs );
85
- else
86
- dma_direct_unmap_page (dev , dma_handle , size , direction , attrs );
56
+ iommu_unmap_page (get_iommu_table_base (dev ), dma_handle , size , direction ,
57
+ attrs );
87
58
}
88
59
89
60
90
61
static int dma_iommu_map_sg (struct device * dev , struct scatterlist * sglist ,
91
62
int nelems , enum dma_data_direction direction ,
92
63
unsigned long attrs )
93
64
{
94
- if (dma_iommu_map_bypass (dev , attrs ))
95
- return dma_direct_map_sg (dev , sglist , nelems , direction , attrs );
96
65
return ppc_iommu_map_sg (dev , get_iommu_table_base (dev ), sglist , nelems ,
97
66
dma_get_mask (dev ), direction , attrs );
98
67
}
@@ -101,20 +70,18 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
101
70
int nelems , enum dma_data_direction direction ,
102
71
unsigned long attrs )
103
72
{
104
- if (!dma_iommu_map_bypass (dev , attrs ))
105
- ppc_iommu_unmap_sg (get_iommu_table_base (dev ), sglist , nelems ,
73
+ ppc_iommu_unmap_sg (get_iommu_table_base (dev ), sglist , nelems ,
106
74
direction , attrs );
107
- else
108
- dma_direct_unmap_sg (dev , sglist , nelems , direction , attrs );
109
75
}
110
76
111
77
static bool dma_iommu_bypass_supported (struct device * dev , u64 mask )
112
78
{
113
79
struct pci_dev * pdev = to_pci_dev (dev );
114
80
struct pci_controller * phb = pci_bus_to_host (pdev -> bus );
115
81
116
- return phb -> controller_ops .iommu_bypass_supported &&
117
- phb -> controller_ops .iommu_bypass_supported (pdev , mask );
82
+ if (iommu_fixed_is_weak || !phb -> controller_ops .iommu_bypass_supported )
83
+ return false;
84
+ return phb -> controller_ops .iommu_bypass_supported (pdev , mask );
118
85
}
119
86
120
87
/* We support DMA to/from any memory page via the iommu */
@@ -123,7 +90,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
123
90
struct iommu_table * tbl = get_iommu_table_base (dev );
124
91
125
92
if (dev_is_pci (dev ) && dma_iommu_bypass_supported (dev , mask )) {
126
- dev -> archdata . iommu_bypass = true;
93
+ dev -> dma_ops_bypass = true;
127
94
dev_dbg (dev , "iommu: 64-bit OK, using fixed ops\n" );
128
95
return 1 ;
129
96
}
@@ -141,7 +108,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
141
108
}
142
109
143
110
dev_dbg (dev , "iommu: not 64-bit, using default ops\n" );
144
- dev -> archdata . iommu_bypass = false;
111
+ dev -> dma_ops_bypass = false;
145
112
return 1 ;
146
113
}
147
114
@@ -153,47 +120,12 @@ u64 dma_iommu_get_required_mask(struct device *dev)
153
120
if (!tbl )
154
121
return 0 ;
155
122
156
- if (dev_is_pci (dev )) {
157
- u64 bypass_mask = dma_direct_get_required_mask (dev );
158
-
159
- if (dma_iommu_bypass_supported (dev , bypass_mask ))
160
- return bypass_mask ;
161
- }
162
-
163
123
mask = 1ULL < (fls_long (tbl -> it_offset + tbl -> it_size ) - 1 );
164
124
mask += mask - 1 ;
165
125
166
126
return mask ;
167
127
}
168
128
169
- static void dma_iommu_sync_for_cpu (struct device * dev , dma_addr_t addr ,
170
- size_t size , enum dma_data_direction dir )
171
- {
172
- if (dma_iommu_alloc_bypass (dev ))
173
- dma_direct_sync_single_for_cpu (dev , addr , size , dir );
174
- }
175
-
176
- static void dma_iommu_sync_for_device (struct device * dev , dma_addr_t addr ,
177
- size_t sz , enum dma_data_direction dir )
178
- {
179
- if (dma_iommu_alloc_bypass (dev ))
180
- dma_direct_sync_single_for_device (dev , addr , sz , dir );
181
- }
182
-
183
- extern void dma_iommu_sync_sg_for_cpu (struct device * dev ,
184
- struct scatterlist * sgl , int nents , enum dma_data_direction dir )
185
- {
186
- if (dma_iommu_alloc_bypass (dev ))
187
- dma_direct_sync_sg_for_cpu (dev , sgl , nents , dir );
188
- }
189
-
190
- extern void dma_iommu_sync_sg_for_device (struct device * dev ,
191
- struct scatterlist * sgl , int nents , enum dma_data_direction dir )
192
- {
193
- if (dma_iommu_alloc_bypass (dev ))
194
- dma_direct_sync_sg_for_device (dev , sgl , nents , dir );
195
- }
196
-
197
129
const struct dma_map_ops dma_iommu_ops = {
198
130
.alloc = dma_iommu_alloc_coherent ,
199
131
.free = dma_iommu_free_coherent ,
@@ -203,10 +135,6 @@ const struct dma_map_ops dma_iommu_ops = {
203
135
.map_page = dma_iommu_map_page ,
204
136
.unmap_page = dma_iommu_unmap_page ,
205
137
.get_required_mask = dma_iommu_get_required_mask ,
206
- .sync_single_for_cpu = dma_iommu_sync_for_cpu ,
207
- .sync_single_for_device = dma_iommu_sync_for_device ,
208
- .sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu ,
209
- .sync_sg_for_device = dma_iommu_sync_sg_for_device ,
210
138
.mmap = dma_common_mmap ,
211
139
.get_sgtable = dma_common_get_sgtable ,
212
140
};
0 commit comments