@@ -117,14 +117,6 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
117
117
size_t size , enum dma_data_direction dir , unsigned long attrs );
118
118
void dma_unmap_resource (struct device * dev , dma_addr_t addr , size_t size ,
119
119
enum dma_data_direction dir , unsigned long attrs );
120
- void dma_sync_single_for_cpu (struct device * dev , dma_addr_t addr , size_t size ,
121
- enum dma_data_direction dir );
122
- void dma_sync_single_for_device (struct device * dev , dma_addr_t addr ,
123
- size_t size , enum dma_data_direction dir );
124
- void dma_sync_sg_for_cpu (struct device * dev , struct scatterlist * sg ,
125
- int nelems , enum dma_data_direction dir );
126
- void dma_sync_sg_for_device (struct device * dev , struct scatterlist * sg ,
127
- int nelems , enum dma_data_direction dir );
128
120
void * dma_alloc_attrs (struct device * dev , size_t size , dma_addr_t * dma_handle ,
129
121
gfp_t flag , unsigned long attrs );
130
122
void dma_free_attrs (struct device * dev , size_t size , void * cpu_addr ,
@@ -147,7 +139,6 @@ u64 dma_get_required_mask(struct device *dev);
147
139
bool dma_addressing_limited (struct device * dev );
148
140
size_t dma_max_mapping_size (struct device * dev );
149
141
size_t dma_opt_mapping_size (struct device * dev );
150
- bool dma_need_sync (struct device * dev , dma_addr_t dma_addr );
151
142
unsigned long dma_get_merge_boundary (struct device * dev );
152
143
struct sg_table * dma_alloc_noncontiguous (struct device * dev , size_t size ,
153
144
enum dma_data_direction dir , gfp_t gfp , unsigned long attrs );
@@ -195,22 +186,6 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
195
186
size_t size , enum dma_data_direction dir , unsigned long attrs )
196
187
{
197
188
}
198
- static inline void dma_sync_single_for_cpu (struct device * dev , dma_addr_t addr ,
199
- size_t size , enum dma_data_direction dir )
200
- {
201
- }
202
- static inline void dma_sync_single_for_device (struct device * dev ,
203
- dma_addr_t addr , size_t size , enum dma_data_direction dir )
204
- {
205
- }
206
- static inline void dma_sync_sg_for_cpu (struct device * dev ,
207
- struct scatterlist * sg , int nelems , enum dma_data_direction dir )
208
- {
209
- }
210
- static inline void dma_sync_sg_for_device (struct device * dev ,
211
- struct scatterlist * sg , int nelems , enum dma_data_direction dir )
212
- {
213
- }
214
189
static inline int dma_mapping_error (struct device * dev , dma_addr_t dma_addr )
215
190
{
216
191
return - ENOMEM ;
@@ -277,10 +252,6 @@ static inline size_t dma_opt_mapping_size(struct device *dev)
277
252
{
278
253
return 0 ;
279
254
}
280
- static inline bool dma_need_sync (struct device * dev , dma_addr_t dma_addr )
281
- {
282
- return false;
283
- }
284
255
static inline unsigned long dma_get_merge_boundary (struct device * dev )
285
256
{
286
257
return 0 ;
@@ -310,6 +281,39 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
310
281
}
311
282
#endif /* CONFIG_HAS_DMA */
312
283
284
+ #if defined(CONFIG_HAS_DMA ) && defined(CONFIG_DMA_NEED_SYNC )
285
+ void dma_sync_single_for_cpu (struct device * dev , dma_addr_t addr , size_t size ,
286
+ enum dma_data_direction dir );
287
+ void dma_sync_single_for_device (struct device * dev , dma_addr_t addr ,
288
+ size_t size , enum dma_data_direction dir );
289
+ void dma_sync_sg_for_cpu (struct device * dev , struct scatterlist * sg ,
290
+ int nelems , enum dma_data_direction dir );
291
+ void dma_sync_sg_for_device (struct device * dev , struct scatterlist * sg ,
292
+ int nelems , enum dma_data_direction dir );
293
+ bool dma_need_sync (struct device * dev , dma_addr_t dma_addr );
294
+ #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
295
+ static inline void dma_sync_single_for_cpu (struct device * dev , dma_addr_t addr ,
296
+ size_t size , enum dma_data_direction dir )
297
+ {
298
+ }
299
+ static inline void dma_sync_single_for_device (struct device * dev ,
300
+ dma_addr_t addr , size_t size , enum dma_data_direction dir )
301
+ {
302
+ }
303
+ static inline void dma_sync_sg_for_cpu (struct device * dev ,
304
+ struct scatterlist * sg , int nelems , enum dma_data_direction dir )
305
+ {
306
+ }
307
+ static inline void dma_sync_sg_for_device (struct device * dev ,
308
+ struct scatterlist * sg , int nelems , enum dma_data_direction dir )
309
+ {
310
+ }
311
+ static inline bool dma_need_sync (struct device * dev , dma_addr_t dma_addr )
312
+ {
313
+ return false;
314
+ }
315
+ #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
316
+
313
317
struct page * dma_alloc_pages (struct device * dev , size_t size ,
314
318
dma_addr_t * dma_handle , enum dma_data_direction dir , gfp_t gfp );
315
319
void dma_free_pages (struct device * dev , size_t size , struct page * page ,
0 commit comments