@@ -105,9 +105,35 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
105
105
}
106
106
EXPORT_SYMBOL (dmam_alloc_attrs );
107
107
108
- static inline bool dma_is_direct (const struct dma_map_ops * ops )
108
+ static bool dma_go_direct (struct device * dev , dma_addr_t mask ,
109
+ const struct dma_map_ops * ops )
109
110
{
110
- return likely (!ops );
111
+ if (likely (!ops ))
112
+ return true;
113
+ #ifdef CONFIG_DMA_OPS_BYPASS
114
+ if (dev -> dma_ops_bypass )
115
+ return min_not_zero (mask , dev -> bus_dma_limit ) >=
116
+ dma_direct_get_required_mask (dev );
117
+ #endif
118
+ return false;
119
+ }
120
+
121
+
122
+ /*
123
+ * Check if the devices uses a direct mapping for streaming DMA operations.
124
+ * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
125
+ * enough.
126
+ */
127
+ static inline bool dma_alloc_direct (struct device * dev ,
128
+ const struct dma_map_ops * ops )
129
+ {
130
+ return dma_go_direct (dev , dev -> coherent_dma_mask , ops );
131
+ }
132
+
133
+ static inline bool dma_map_direct (struct device * dev ,
134
+ const struct dma_map_ops * ops )
135
+ {
136
+ return dma_go_direct (dev , * dev -> dma_mask , ops );
111
137
}
112
138
113
139
dma_addr_t dma_map_page_attrs (struct device * dev , struct page * page ,
@@ -118,7 +144,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
118
144
dma_addr_t addr ;
119
145
120
146
BUG_ON (!valid_dma_direction (dir ));
121
- if (dma_is_direct ( ops ))
147
+ if (dma_map_direct ( dev , ops ))
122
148
addr = dma_direct_map_page (dev , page , offset , size , dir , attrs );
123
149
else
124
150
addr = ops -> map_page (dev , page , offset , size , dir , attrs );
@@ -134,7 +160,7 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
134
160
const struct dma_map_ops * ops = get_dma_ops (dev );
135
161
136
162
BUG_ON (!valid_dma_direction (dir ));
137
- if (dma_is_direct ( ops ))
163
+ if (dma_map_direct ( dev , ops ))
138
164
dma_direct_unmap_page (dev , addr , size , dir , attrs );
139
165
else if (ops -> unmap_page )
140
166
ops -> unmap_page (dev , addr , size , dir , attrs );
@@ -153,7 +179,7 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
153
179
int ents ;
154
180
155
181
BUG_ON (!valid_dma_direction (dir ));
156
- if (dma_is_direct ( ops ))
182
+ if (dma_map_direct ( dev , ops ))
157
183
ents = dma_direct_map_sg (dev , sg , nents , dir , attrs );
158
184
else
159
185
ents = ops -> map_sg (dev , sg , nents , dir , attrs );
@@ -172,7 +198,7 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
172
198
173
199
BUG_ON (!valid_dma_direction (dir ));
174
200
debug_dma_unmap_sg (dev , sg , nents , dir );
175
- if (dma_is_direct ( ops ))
201
+ if (dma_map_direct ( dev , ops ))
176
202
dma_direct_unmap_sg (dev , sg , nents , dir , attrs );
177
203
else if (ops -> unmap_sg )
178
204
ops -> unmap_sg (dev , sg , nents , dir , attrs );
@@ -191,7 +217,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
191
217
if (WARN_ON_ONCE (pfn_valid (PHYS_PFN (phys_addr ))))
192
218
return DMA_MAPPING_ERROR ;
193
219
194
- if (dma_is_direct ( ops ))
220
+ if (dma_map_direct ( dev , ops ))
195
221
addr = dma_direct_map_resource (dev , phys_addr , size , dir , attrs );
196
222
else if (ops -> map_resource )
197
223
addr = ops -> map_resource (dev , phys_addr , size , dir , attrs );
@@ -207,7 +233,7 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
207
233
const struct dma_map_ops * ops = get_dma_ops (dev );
208
234
209
235
BUG_ON (!valid_dma_direction (dir ));
210
- if (!dma_is_direct ( ops ) && ops -> unmap_resource )
236
+ if (!dma_map_direct ( dev , ops ) && ops -> unmap_resource )
211
237
ops -> unmap_resource (dev , addr , size , dir , attrs );
212
238
debug_dma_unmap_resource (dev , addr , size , dir );
213
239
}
@@ -219,7 +245,7 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
219
245
const struct dma_map_ops * ops = get_dma_ops (dev );
220
246
221
247
BUG_ON (!valid_dma_direction (dir ));
222
- if (dma_is_direct ( ops ))
248
+ if (dma_map_direct ( dev , ops ))
223
249
dma_direct_sync_single_for_cpu (dev , addr , size , dir );
224
250
else if (ops -> sync_single_for_cpu )
225
251
ops -> sync_single_for_cpu (dev , addr , size , dir );
@@ -233,7 +259,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
233
259
const struct dma_map_ops * ops = get_dma_ops (dev );
234
260
235
261
BUG_ON (!valid_dma_direction (dir ));
236
- if (dma_is_direct ( ops ))
262
+ if (dma_map_direct ( dev , ops ))
237
263
dma_direct_sync_single_for_device (dev , addr , size , dir );
238
264
else if (ops -> sync_single_for_device )
239
265
ops -> sync_single_for_device (dev , addr , size , dir );
@@ -247,7 +273,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
247
273
const struct dma_map_ops * ops = get_dma_ops (dev );
248
274
249
275
BUG_ON (!valid_dma_direction (dir ));
250
- if (dma_is_direct ( ops ))
276
+ if (dma_map_direct ( dev , ops ))
251
277
dma_direct_sync_sg_for_cpu (dev , sg , nelems , dir );
252
278
else if (ops -> sync_sg_for_cpu )
253
279
ops -> sync_sg_for_cpu (dev , sg , nelems , dir );
@@ -261,7 +287,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
261
287
const struct dma_map_ops * ops = get_dma_ops (dev );
262
288
263
289
BUG_ON (!valid_dma_direction (dir ));
264
- if (dma_is_direct ( ops ))
290
+ if (dma_map_direct ( dev , ops ))
265
291
dma_direct_sync_sg_for_device (dev , sg , nelems , dir );
266
292
else if (ops -> sync_sg_for_device )
267
293
ops -> sync_sg_for_device (dev , sg , nelems , dir );
@@ -302,7 +328,7 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
302
328
{
303
329
const struct dma_map_ops * ops = get_dma_ops (dev );
304
330
305
- if (dma_is_direct ( ops ))
331
+ if (dma_alloc_direct ( dev , ops ))
306
332
return dma_direct_get_sgtable (dev , sgt , cpu_addr , dma_addr ,
307
333
size , attrs );
308
334
if (!ops -> get_sgtable )
@@ -372,7 +398,7 @@ bool dma_can_mmap(struct device *dev)
372
398
{
373
399
const struct dma_map_ops * ops = get_dma_ops (dev );
374
400
375
- if (dma_is_direct ( ops ))
401
+ if (dma_alloc_direct ( dev , ops ))
376
402
return dma_direct_can_mmap (dev );
377
403
return ops -> mmap != NULL ;
378
404
}
@@ -397,7 +423,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
397
423
{
398
424
const struct dma_map_ops * ops = get_dma_ops (dev );
399
425
400
- if (dma_is_direct ( ops ))
426
+ if (dma_alloc_direct ( dev , ops ))
401
427
return dma_direct_mmap (dev , vma , cpu_addr , dma_addr , size ,
402
428
attrs );
403
429
if (!ops -> mmap )
@@ -410,7 +436,7 @@ u64 dma_get_required_mask(struct device *dev)
410
436
{
411
437
const struct dma_map_ops * ops = get_dma_ops (dev );
412
438
413
- if (dma_is_direct ( ops ))
439
+ if (dma_alloc_direct ( dev , ops ))
414
440
return dma_direct_get_required_mask (dev );
415
441
if (ops -> get_required_mask )
416
442
return ops -> get_required_mask (dev );
@@ -441,7 +467,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
441
467
/* let the implementation decide on the zone to allocate from: */
442
468
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM );
443
469
444
- if (dma_is_direct ( ops ))
470
+ if (dma_alloc_direct ( dev , ops ))
445
471
cpu_addr = dma_direct_alloc (dev , size , dma_handle , flag , attrs );
446
472
else if (ops -> alloc )
447
473
cpu_addr = ops -> alloc (dev , size , dma_handle , flag , attrs );
@@ -473,7 +499,7 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
473
499
return ;
474
500
475
501
debug_dma_free_coherent (dev , size , cpu_addr , dma_handle );
476
- if (dma_is_direct ( ops ))
502
+ if (dma_alloc_direct ( dev , ops ))
477
503
dma_direct_free (dev , size , cpu_addr , dma_handle , attrs );
478
504
else if (ops -> free )
479
505
ops -> free (dev , size , cpu_addr , dma_handle , attrs );
@@ -484,7 +510,11 @@ int dma_supported(struct device *dev, u64 mask)
484
510
{
485
511
const struct dma_map_ops * ops = get_dma_ops (dev );
486
512
487
- if (dma_is_direct (ops ))
513
+ /*
514
+ * ->dma_supported sets the bypass flag, so we must always call
515
+ * into the method here unless the device is truly direct mapped.
516
+ */
517
+ if (!ops )
488
518
return dma_direct_supported (dev , mask );
489
519
if (!ops -> dma_supported )
490
520
return 1 ;
@@ -540,7 +570,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
540
570
541
571
BUG_ON (!valid_dma_direction (dir ));
542
572
543
- if (dma_is_direct ( ops ))
573
+ if (dma_alloc_direct ( dev , ops ))
544
574
arch_dma_cache_sync (dev , vaddr , size , dir );
545
575
else if (ops -> cache_sync )
546
576
ops -> cache_sync (dev , vaddr , size , dir );
@@ -552,7 +582,7 @@ size_t dma_max_mapping_size(struct device *dev)
552
582
const struct dma_map_ops * ops = get_dma_ops (dev );
553
583
size_t size = SIZE_MAX ;
554
584
555
- if (dma_is_direct ( ops ))
585
+ if (dma_map_direct ( dev , ops ))
556
586
size = dma_direct_max_mapping_size (dev );
557
587
else if (ops && ops -> max_mapping_size )
558
588
size = ops -> max_mapping_size (dev );
@@ -565,7 +595,7 @@ bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
565
595
{
566
596
const struct dma_map_ops * ops = get_dma_ops (dev );
567
597
568
- if (dma_is_direct ( ops ))
598
+ if (dma_map_direct ( dev , ops ))
569
599
return dma_direct_need_sync (dev , dma_addr );
570
600
return ops -> sync_single_for_cpu || ops -> sync_single_for_device ;
571
601
}
0 commit comments