Skip to content

Commit faf4ef8

Browse files
author
Christoph Hellwig
committed
dma-direct: add support for dma_coherent_default_memory
Add an option to allocate uncached memory for dma_alloc_coherent from the global dma_coherent_default_memory. This will allow to move arm-nommu (and eventually other platforms) to use generic code for allocating uncached memory from a pre-populated pool. Note that this is a different pool from the one that platforms that can remap at runtime use for GFP_ATOMIC allocations for now, although there might be opportunities to eventually end up with a common codebase for the two use cases. Signed-off-by: Christoph Hellwig <[email protected]> Tested-by: Dillon Min <[email protected]>
1 parent 2a047e0 commit faf4ef8

File tree

2 files changed

+19
-0
lines changed

2 files changed

+19
-0
lines changed

kernel/dma/Kconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,10 @@ config DMA_COHERENT_POOL
9393
select GENERIC_ALLOCATOR
9494
bool
9595

96+
config DMA_GLOBAL_POOL
97+
select DMA_DECLARE_COHERENT
98+
bool
99+
96100
config DMA_REMAP
97101
bool
98102
depends on MMU

kernel/dma/direct.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,9 +156,14 @@ void *dma_direct_alloc(struct device *dev, size_t size,
156156

157157
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
158158
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
159+
!IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
159160
!dev_is_dma_coherent(dev))
160161
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
161162

163+
if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
164+
!dev_is_dma_coherent(dev))
165+
return dma_alloc_from_global_coherent(dev, size, dma_handle);
166+
162167
/*
163168
* Remapping or decrypting memory may block. If either is required and
164169
* we can't block, allocate the memory from the atomic pools.
@@ -255,11 +260,19 @@ void dma_direct_free(struct device *dev, size_t size,
255260

256261
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
257262
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
263+
!IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
258264
!dev_is_dma_coherent(dev)) {
259265
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
260266
return;
261267
}
262268

269+
if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
270+
!dev_is_dma_coherent(dev)) {
271+
if (!dma_release_from_global_coherent(page_order, cpu_addr))
272+
WARN_ON_ONCE(1);
273+
return;
274+
}
275+
263276
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
264277
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
265278
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
@@ -462,6 +475,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
462475

463476
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
464477
return ret;
478+
if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
479+
return ret;
465480

466481
if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
467482
return -ENXIO;

0 commit comments

Comments
 (0)