@@ -188,73 +188,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
188
188
}
189
189
#endif /* CONFIG_DMA_DECLARE_COHERENT */
190
190
191
- static inline bool dma_is_direct (const struct dma_map_ops * ops )
192
- {
193
- return likely (!ops );
194
- }
195
-
196
- /*
197
- * All the dma_direct_* declarations are here just for the indirect call bypass,
198
- * and must not be used directly drivers!
199
- */
200
- dma_addr_t dma_direct_map_page (struct device * dev , struct page * page ,
201
- unsigned long offset , size_t size , enum dma_data_direction dir ,
202
- unsigned long attrs );
203
- int dma_direct_map_sg (struct device * dev , struct scatterlist * sgl , int nents ,
204
- enum dma_data_direction dir , unsigned long attrs );
205
- dma_addr_t dma_direct_map_resource (struct device * dev , phys_addr_t paddr ,
206
- size_t size , enum dma_data_direction dir , unsigned long attrs );
207
-
208
- #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE ) || \
209
- defined(CONFIG_SWIOTLB )
210
- void dma_direct_sync_single_for_device (struct device * dev ,
211
- dma_addr_t addr , size_t size , enum dma_data_direction dir );
212
- void dma_direct_sync_sg_for_device (struct device * dev ,
213
- struct scatterlist * sgl , int nents , enum dma_data_direction dir );
214
- #else
215
- static inline void dma_direct_sync_single_for_device (struct device * dev ,
216
- dma_addr_t addr , size_t size , enum dma_data_direction dir )
217
- {
218
- }
219
- static inline void dma_direct_sync_sg_for_device (struct device * dev ,
220
- struct scatterlist * sgl , int nents , enum dma_data_direction dir )
221
- {
222
- }
223
- #endif
224
-
225
- #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU ) || \
226
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL ) || \
227
- defined(CONFIG_SWIOTLB )
228
- void dma_direct_unmap_page (struct device * dev , dma_addr_t addr ,
229
- size_t size , enum dma_data_direction dir , unsigned long attrs );
230
- void dma_direct_unmap_sg (struct device * dev , struct scatterlist * sgl ,
231
- int nents , enum dma_data_direction dir , unsigned long attrs );
232
- void dma_direct_sync_single_for_cpu (struct device * dev ,
233
- dma_addr_t addr , size_t size , enum dma_data_direction dir );
234
- void dma_direct_sync_sg_for_cpu (struct device * dev ,
235
- struct scatterlist * sgl , int nents , enum dma_data_direction dir );
236
- #else
237
- static inline void dma_direct_unmap_page (struct device * dev , dma_addr_t addr ,
238
- size_t size , enum dma_data_direction dir , unsigned long attrs )
239
- {
240
- }
241
- static inline void dma_direct_unmap_sg (struct device * dev ,
242
- struct scatterlist * sgl , int nents , enum dma_data_direction dir ,
243
- unsigned long attrs )
244
- {
245
- }
246
- static inline void dma_direct_sync_single_for_cpu (struct device * dev ,
247
- dma_addr_t addr , size_t size , enum dma_data_direction dir )
248
- {
249
- }
250
- static inline void dma_direct_sync_sg_for_cpu (struct device * dev ,
251
- struct scatterlist * sgl , int nents , enum dma_data_direction dir )
252
- {
253
- }
254
- #endif
255
-
256
- size_t dma_direct_max_mapping_size (struct device * dev );
257
-
258
191
#ifdef CONFIG_HAS_DMA
259
192
#include <asm/dma-mapping.h>
260
193
@@ -271,164 +204,6 @@ static inline void set_dma_ops(struct device *dev,
271
204
dev -> dma_ops = dma_ops ;
272
205
}
273
206
274
- static inline dma_addr_t dma_map_page_attrs (struct device * dev ,
275
- struct page * page , size_t offset , size_t size ,
276
- enum dma_data_direction dir , unsigned long attrs )
277
- {
278
- const struct dma_map_ops * ops = get_dma_ops (dev );
279
- dma_addr_t addr ;
280
-
281
- BUG_ON (!valid_dma_direction (dir ));
282
- if (dma_is_direct (ops ))
283
- addr = dma_direct_map_page (dev , page , offset , size , dir , attrs );
284
- else
285
- addr = ops -> map_page (dev , page , offset , size , dir , attrs );
286
- debug_dma_map_page (dev , page , offset , size , dir , addr );
287
-
288
- return addr ;
289
- }
290
-
291
- static inline void dma_unmap_page_attrs (struct device * dev , dma_addr_t addr ,
292
- size_t size , enum dma_data_direction dir , unsigned long attrs )
293
- {
294
- const struct dma_map_ops * ops = get_dma_ops (dev );
295
-
296
- BUG_ON (!valid_dma_direction (dir ));
297
- if (dma_is_direct (ops ))
298
- dma_direct_unmap_page (dev , addr , size , dir , attrs );
299
- else if (ops -> unmap_page )
300
- ops -> unmap_page (dev , addr , size , dir , attrs );
301
- debug_dma_unmap_page (dev , addr , size , dir );
302
- }
303
-
304
- /*
305
- * dma_maps_sg_attrs returns 0 on error and > 0 on success.
306
- * It should never return a value < 0.
307
- */
308
- static inline int dma_map_sg_attrs (struct device * dev , struct scatterlist * sg ,
309
- int nents , enum dma_data_direction dir ,
310
- unsigned long attrs )
311
- {
312
- const struct dma_map_ops * ops = get_dma_ops (dev );
313
- int ents ;
314
-
315
- BUG_ON (!valid_dma_direction (dir ));
316
- if (dma_is_direct (ops ))
317
- ents = dma_direct_map_sg (dev , sg , nents , dir , attrs );
318
- else
319
- ents = ops -> map_sg (dev , sg , nents , dir , attrs );
320
- BUG_ON (ents < 0 );
321
- debug_dma_map_sg (dev , sg , nents , ents , dir );
322
-
323
- return ents ;
324
- }
325
-
326
- static inline void dma_unmap_sg_attrs (struct device * dev , struct scatterlist * sg ,
327
- int nents , enum dma_data_direction dir ,
328
- unsigned long attrs )
329
- {
330
- const struct dma_map_ops * ops = get_dma_ops (dev );
331
-
332
- BUG_ON (!valid_dma_direction (dir ));
333
- debug_dma_unmap_sg (dev , sg , nents , dir );
334
- if (dma_is_direct (ops ))
335
- dma_direct_unmap_sg (dev , sg , nents , dir , attrs );
336
- else if (ops -> unmap_sg )
337
- ops -> unmap_sg (dev , sg , nents , dir , attrs );
338
- }
339
-
340
- static inline dma_addr_t dma_map_resource (struct device * dev ,
341
- phys_addr_t phys_addr ,
342
- size_t size ,
343
- enum dma_data_direction dir ,
344
- unsigned long attrs )
345
- {
346
- const struct dma_map_ops * ops = get_dma_ops (dev );
347
- dma_addr_t addr = DMA_MAPPING_ERROR ;
348
-
349
- BUG_ON (!valid_dma_direction (dir ));
350
-
351
- /* Don't allow RAM to be mapped */
352
- if (WARN_ON_ONCE (pfn_valid (PHYS_PFN (phys_addr ))))
353
- return DMA_MAPPING_ERROR ;
354
-
355
- if (dma_is_direct (ops ))
356
- addr = dma_direct_map_resource (dev , phys_addr , size , dir , attrs );
357
- else if (ops -> map_resource )
358
- addr = ops -> map_resource (dev , phys_addr , size , dir , attrs );
359
-
360
- debug_dma_map_resource (dev , phys_addr , size , dir , addr );
361
- return addr ;
362
- }
363
-
364
- static inline void dma_unmap_resource (struct device * dev , dma_addr_t addr ,
365
- size_t size , enum dma_data_direction dir ,
366
- unsigned long attrs )
367
- {
368
- const struct dma_map_ops * ops = get_dma_ops (dev );
369
-
370
- BUG_ON (!valid_dma_direction (dir ));
371
- if (!dma_is_direct (ops ) && ops -> unmap_resource )
372
- ops -> unmap_resource (dev , addr , size , dir , attrs );
373
- debug_dma_unmap_resource (dev , addr , size , dir );
374
- }
375
-
376
- static inline void dma_sync_single_for_cpu (struct device * dev , dma_addr_t addr ,
377
- size_t size ,
378
- enum dma_data_direction dir )
379
- {
380
- const struct dma_map_ops * ops = get_dma_ops (dev );
381
-
382
- BUG_ON (!valid_dma_direction (dir ));
383
- if (dma_is_direct (ops ))
384
- dma_direct_sync_single_for_cpu (dev , addr , size , dir );
385
- else if (ops -> sync_single_for_cpu )
386
- ops -> sync_single_for_cpu (dev , addr , size , dir );
387
- debug_dma_sync_single_for_cpu (dev , addr , size , dir );
388
- }
389
-
390
- static inline void dma_sync_single_for_device (struct device * dev ,
391
- dma_addr_t addr , size_t size ,
392
- enum dma_data_direction dir )
393
- {
394
- const struct dma_map_ops * ops = get_dma_ops (dev );
395
-
396
- BUG_ON (!valid_dma_direction (dir ));
397
- if (dma_is_direct (ops ))
398
- dma_direct_sync_single_for_device (dev , addr , size , dir );
399
- else if (ops -> sync_single_for_device )
400
- ops -> sync_single_for_device (dev , addr , size , dir );
401
- debug_dma_sync_single_for_device (dev , addr , size , dir );
402
- }
403
-
404
- static inline void
405
- dma_sync_sg_for_cpu (struct device * dev , struct scatterlist * sg ,
406
- int nelems , enum dma_data_direction dir )
407
- {
408
- const struct dma_map_ops * ops = get_dma_ops (dev );
409
-
410
- BUG_ON (!valid_dma_direction (dir ));
411
- if (dma_is_direct (ops ))
412
- dma_direct_sync_sg_for_cpu (dev , sg , nelems , dir );
413
- else if (ops -> sync_sg_for_cpu )
414
- ops -> sync_sg_for_cpu (dev , sg , nelems , dir );
415
- debug_dma_sync_sg_for_cpu (dev , sg , nelems , dir );
416
- }
417
-
418
- static inline void
419
- dma_sync_sg_for_device (struct device * dev , struct scatterlist * sg ,
420
- int nelems , enum dma_data_direction dir )
421
- {
422
- const struct dma_map_ops * ops = get_dma_ops (dev );
423
-
424
- BUG_ON (!valid_dma_direction (dir ));
425
- if (dma_is_direct (ops ))
426
- dma_direct_sync_sg_for_device (dev , sg , nelems , dir );
427
- else if (ops -> sync_sg_for_device )
428
- ops -> sync_sg_for_device (dev , sg , nelems , dir );
429
- debug_dma_sync_sg_for_device (dev , sg , nelems , dir );
430
-
431
- }
432
207
433
208
static inline int dma_mapping_error (struct device * dev , dma_addr_t dma_addr )
434
209
{
@@ -439,6 +214,28 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
439
214
return 0 ;
440
215
}
441
216
217
+ dma_addr_t dma_map_page_attrs (struct device * dev , struct page * page ,
218
+ size_t offset , size_t size , enum dma_data_direction dir ,
219
+ unsigned long attrs );
220
+ void dma_unmap_page_attrs (struct device * dev , dma_addr_t addr , size_t size ,
221
+ enum dma_data_direction dir , unsigned long attrs );
222
+ int dma_map_sg_attrs (struct device * dev , struct scatterlist * sg , int nents ,
223
+ enum dma_data_direction dir , unsigned long attrs );
224
+ void dma_unmap_sg_attrs (struct device * dev , struct scatterlist * sg ,
225
+ int nents , enum dma_data_direction dir ,
226
+ unsigned long attrs );
227
+ dma_addr_t dma_map_resource (struct device * dev , phys_addr_t phys_addr ,
228
+ size_t size , enum dma_data_direction dir , unsigned long attrs );
229
+ void dma_unmap_resource (struct device * dev , dma_addr_t addr , size_t size ,
230
+ enum dma_data_direction dir , unsigned long attrs );
231
+ void dma_sync_single_for_cpu (struct device * dev , dma_addr_t addr , size_t size ,
232
+ enum dma_data_direction dir );
233
+ void dma_sync_single_for_device (struct device * dev , dma_addr_t addr ,
234
+ size_t size , enum dma_data_direction dir );
235
+ void dma_sync_sg_for_cpu (struct device * dev , struct scatterlist * sg ,
236
+ int nelems , enum dma_data_direction dir );
237
+ void dma_sync_sg_for_device (struct device * dev , struct scatterlist * sg ,
238
+ int nelems , enum dma_data_direction dir );
442
239
void * dma_alloc_attrs (struct device * dev , size_t size , dma_addr_t * dma_handle ,
443
240
gfp_t flag , unsigned long attrs );
444
241
void dma_free_attrs (struct device * dev , size_t size , void * cpu_addr ,
0 commit comments