Skip to content

Commit fe7514b

Browse files
alobakinChristoph Hellwig
authored andcommitted
dma: compile-out DMA sync op calls when not used
Some platforms do have DMA, but DMA there is always direct and coherent. Currently, even on such platforms DMA sync operations are compiled and called. Add a new hidden Kconfig symbol, DMA_NEED_SYNC, and set it only when either sync operations are needed or there is DMA ops or swiotlb or DMA debug is enabled. Compile global dma_sync_*() and dma_need_sync() only when it's set, otherwise provide empty inline stubs. The change allows for future optimizations of DMA sync calls depending on runtime conditions. Signed-off-by: Alexander Lobakin <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 2650073 commit fe7514b

File tree

3 files changed

+50
-39
lines changed

3 files changed

+50
-39
lines changed

include/linux/dma-mapping.h

Lines changed: 33 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -117,14 +117,6 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
117117
size_t size, enum dma_data_direction dir, unsigned long attrs);
118118
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
119119
enum dma_data_direction dir, unsigned long attrs);
120-
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
121-
enum dma_data_direction dir);
122-
void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
123-
size_t size, enum dma_data_direction dir);
124-
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
125-
int nelems, enum dma_data_direction dir);
126-
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
127-
int nelems, enum dma_data_direction dir);
128120
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
129121
gfp_t flag, unsigned long attrs);
130122
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
@@ -147,7 +139,6 @@ u64 dma_get_required_mask(struct device *dev);
147139
bool dma_addressing_limited(struct device *dev);
148140
size_t dma_max_mapping_size(struct device *dev);
149141
size_t dma_opt_mapping_size(struct device *dev);
150-
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
151142
unsigned long dma_get_merge_boundary(struct device *dev);
152143
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
153144
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
@@ -195,22 +186,6 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
195186
size_t size, enum dma_data_direction dir, unsigned long attrs)
196187
{
197188
}
198-
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
199-
size_t size, enum dma_data_direction dir)
200-
{
201-
}
202-
static inline void dma_sync_single_for_device(struct device *dev,
203-
dma_addr_t addr, size_t size, enum dma_data_direction dir)
204-
{
205-
}
206-
static inline void dma_sync_sg_for_cpu(struct device *dev,
207-
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
208-
{
209-
}
210-
static inline void dma_sync_sg_for_device(struct device *dev,
211-
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
212-
{
213-
}
214189
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
215190
{
216191
return -ENOMEM;
@@ -277,10 +252,6 @@ static inline size_t dma_opt_mapping_size(struct device *dev)
277252
{
278253
return 0;
279254
}
280-
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
281-
{
282-
return false;
283-
}
284255
static inline unsigned long dma_get_merge_boundary(struct device *dev)
285256
{
286257
return 0;
@@ -310,6 +281,39 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
310281
}
311282
#endif /* CONFIG_HAS_DMA */
312283

284+
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
285+
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
286+
enum dma_data_direction dir);
287+
void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
288+
size_t size, enum dma_data_direction dir);
289+
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
290+
int nelems, enum dma_data_direction dir);
291+
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
292+
int nelems, enum dma_data_direction dir);
293+
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
294+
#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
295+
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
296+
size_t size, enum dma_data_direction dir)
297+
{
298+
}
299+
static inline void dma_sync_single_for_device(struct device *dev,
300+
dma_addr_t addr, size_t size, enum dma_data_direction dir)
301+
{
302+
}
303+
static inline void dma_sync_sg_for_cpu(struct device *dev,
304+
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
305+
{
306+
}
307+
static inline void dma_sync_sg_for_device(struct device *dev,
308+
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
309+
{
310+
}
311+
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
312+
{
313+
return false;
314+
}
315+
#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
316+
313317
struct page *dma_alloc_pages(struct device *dev, size_t size,
314318
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
315319
void dma_free_pages(struct device *dev, size_t size, struct page *page,

kernel/dma/Kconfig

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,11 @@ config DMA_BOUNCE_UNALIGNED_KMALLOC
107107
bool
108108
depends on SWIOTLB
109109

110+
config DMA_NEED_SYNC
111+
def_bool ARCH_HAS_SYNC_DMA_FOR_DEVICE || ARCH_HAS_SYNC_DMA_FOR_CPU || \
112+
ARCH_HAS_SYNC_DMA_FOR_CPU_ALL || DMA_API_DEBUG || DMA_OPS || \
113+
SWIOTLB
114+
110115
config DMA_RESTRICTED_POOL
111116
bool "DMA Restricted Pool"
112117
depends on OF && OF_RESERVED_MEM && SWIOTLB

kernel/dma/mapping.c

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -329,6 +329,7 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
329329
}
330330
EXPORT_SYMBOL(dma_unmap_resource);
331331

332+
#ifdef CONFIG_DMA_NEED_SYNC
332333
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
333334
enum dma_data_direction dir)
334335
{
@@ -385,6 +386,17 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
385386
}
386387
EXPORT_SYMBOL(dma_sync_sg_for_device);
387388

389+
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
390+
{
391+
const struct dma_map_ops *ops = get_dma_ops(dev);
392+
393+
if (dma_map_direct(dev, ops))
394+
return dma_direct_need_sync(dev, dma_addr);
395+
return ops->sync_single_for_cpu || ops->sync_single_for_device;
396+
}
397+
EXPORT_SYMBOL_GPL(dma_need_sync);
398+
#endif /* CONFIG_DMA_NEED_SYNC */
399+
388400
/*
389401
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
390402
* that the intention is to allow exporting memory allocated via the
@@ -841,16 +853,6 @@ size_t dma_opt_mapping_size(struct device *dev)
841853
}
842854
EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
843855

844-
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
845-
{
846-
const struct dma_map_ops *ops = get_dma_ops(dev);
847-
848-
if (dma_map_direct(dev, ops))
849-
return dma_direct_need_sync(dev, dma_addr);
850-
return ops->sync_single_for_cpu || ops->sync_single_for_device;
851-
}
852-
EXPORT_SYMBOL_GPL(dma_need_sync);
853-
854856
unsigned long dma_get_merge_boundary(struct device *dev)
855857
{
856858
const struct dma_map_ops *ops = get_dma_ops(dev);

0 commit comments

Comments
 (0)