Skip to content

Commit 31b089b

Browse files
author
Christoph Hellwig
committed
ARM/nommu: use the generic dma-direct code for non-coherent devices
Select the right options to just use the generic dma-direct code instead of reimplementing it. Signed-off-by: Christoph Hellwig <[email protected]> Tested-by: Dillon Min <[email protected]>
1 parent faf4ef8 commit 31b089b

File tree

2 files changed

+9
-169
lines changed

2 files changed

+9
-169
lines changed

arch/arm/Kconfig

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ config ARM
1818
select ARCH_HAS_SET_MEMORY
1919
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
2020
select ARCH_HAS_STRICT_MODULE_RWX if MMU
21-
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
22-
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
21+
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU
22+
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
2323
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
2424
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
2525
select ARCH_HAVE_CUSTOM_GPIO_H
@@ -44,6 +44,7 @@ config ARM
4444
select CPU_PM if SUSPEND || CPU_IDLE
4545
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
4646
select DMA_DECLARE_COHERENT
47+
select DMA_GLOBAL_POOL if !MMU
4748
select DMA_OPS
4849
select DMA_REMAP if MMU
4950
select EDAC_SUPPORT

arch/arm/mm/dma-mapping-nommu.c

Lines changed: 6 additions & 167 deletions
Original file line numberDiff line numberDiff line change
@@ -5,78 +5,16 @@
55
* Copyright (C) 2000-2004 Russell King
66
*/
77

8-
#include <linux/export.h>
9-
#include <linux/mm.h>
10-
#include <linux/dma-direct.h>
118
#include <linux/dma-map-ops.h>
12-
#include <linux/scatterlist.h>
13-
149
#include <asm/cachetype.h>
1510
#include <asm/cacheflush.h>
1611
#include <asm/outercache.h>
1712
#include <asm/cp15.h>
1813

1914
#include "dma.h"
2015

21-
/*
22-
* The generic direct mapping code is used if
23-
* - MMU/MPU is off
24-
* - cpu is v7m w/o cache support
25-
* - device is coherent
26-
* otherwise arm_nommu_dma_ops is used.
27-
*
28-
* arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
29-
* [1] on how to declare such memory).
30-
*
31-
* [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
32-
*/
33-
34-
static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
35-
dma_addr_t *dma_handle, gfp_t gfp,
36-
unsigned long attrs)
37-
38-
{
39-
void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
40-
41-
/*
42-
* dma_alloc_from_global_coherent() may fail because:
43-
*
44-
* - no consistent DMA region has been defined, so we can't
45-
* continue.
46-
* - there is no space left in consistent DMA region, so we
47-
* only can fallback to generic allocator if we are
48-
* advertised that consistency is not required.
49-
*/
50-
51-
WARN_ON_ONCE(ret == NULL);
52-
return ret;
53-
}
54-
55-
static void arm_nommu_dma_free(struct device *dev, size_t size,
56-
void *cpu_addr, dma_addr_t dma_addr,
57-
unsigned long attrs)
58-
{
59-
int ret = dma_release_from_global_coherent(get_order(size), cpu_addr);
60-
61-
WARN_ON_ONCE(ret == 0);
62-
}
63-
64-
static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
65-
void *cpu_addr, dma_addr_t dma_addr, size_t size,
66-
unsigned long attrs)
67-
{
68-
int ret;
69-
70-
if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
71-
return ret;
72-
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
73-
return ret;
74-
return -ENXIO;
75-
}
76-
77-
78-
static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
79-
enum dma_data_direction dir)
16+
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
17+
enum dma_data_direction dir)
8018
{
8119
dmac_map_area(__va(paddr), size, dir);
8220

@@ -86,111 +24,15 @@ static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
8624
outer_clean_range(paddr, paddr + size);
8725
}
8826

89-
static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
90-
enum dma_data_direction dir)
27+
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
28+
enum dma_data_direction dir)
9129
{
9230
if (dir != DMA_TO_DEVICE) {
9331
outer_inv_range(paddr, paddr + size);
9432
dmac_unmap_area(__va(paddr), size, dir);
9533
}
9634
}
9735

98-
static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
99-
unsigned long offset, size_t size,
100-
enum dma_data_direction dir,
101-
unsigned long attrs)
102-
{
103-
dma_addr_t handle = page_to_phys(page) + offset;
104-
105-
__dma_page_cpu_to_dev(handle, size, dir);
106-
107-
return handle;
108-
}
109-
110-
static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
111-
size_t size, enum dma_data_direction dir,
112-
unsigned long attrs)
113-
{
114-
__dma_page_dev_to_cpu(handle, size, dir);
115-
}
116-
117-
118-
static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
119-
int nents, enum dma_data_direction dir,
120-
unsigned long attrs)
121-
{
122-
int i;
123-
struct scatterlist *sg;
124-
125-
for_each_sg(sgl, sg, nents, i) {
126-
sg_dma_address(sg) = sg_phys(sg);
127-
sg_dma_len(sg) = sg->length;
128-
__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
129-
}
130-
131-
return nents;
132-
}
133-
134-
static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
135-
int nents, enum dma_data_direction dir,
136-
unsigned long attrs)
137-
{
138-
struct scatterlist *sg;
139-
int i;
140-
141-
for_each_sg(sgl, sg, nents, i)
142-
__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
143-
}
144-
145-
static void arm_nommu_dma_sync_single_for_device(struct device *dev,
146-
dma_addr_t handle, size_t size, enum dma_data_direction dir)
147-
{
148-
__dma_page_cpu_to_dev(handle, size, dir);
149-
}
150-
151-
static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
152-
dma_addr_t handle, size_t size, enum dma_data_direction dir)
153-
{
154-
__dma_page_cpu_to_dev(handle, size, dir);
155-
}
156-
157-
static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
158-
int nents, enum dma_data_direction dir)
159-
{
160-
struct scatterlist *sg;
161-
int i;
162-
163-
for_each_sg(sgl, sg, nents, i)
164-
__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
165-
}
166-
167-
static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
168-
int nents, enum dma_data_direction dir)
169-
{
170-
struct scatterlist *sg;
171-
int i;
172-
173-
for_each_sg(sgl, sg, nents, i)
174-
__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
175-
}
176-
177-
const struct dma_map_ops arm_nommu_dma_ops = {
178-
.alloc = arm_nommu_dma_alloc,
179-
.free = arm_nommu_dma_free,
180-
.alloc_pages = dma_direct_alloc_pages,
181-
.free_pages = dma_direct_free_pages,
182-
.mmap = arm_nommu_dma_mmap,
183-
.map_page = arm_nommu_dma_map_page,
184-
.unmap_page = arm_nommu_dma_unmap_page,
185-
.map_sg = arm_nommu_dma_map_sg,
186-
.unmap_sg = arm_nommu_dma_unmap_sg,
187-
.sync_single_for_device = arm_nommu_dma_sync_single_for_device,
188-
.sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
189-
.sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
190-
.sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
191-
};
192-
EXPORT_SYMBOL(arm_nommu_dma_ops);
193-
19436
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
19537
const struct iommu_ops *iommu, bool coherent)
19638
{
@@ -201,14 +43,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
20143
* enough to check if MPU is in use or not since in absense of
20244
* MPU system memory map is used.
20345
*/
204-
dev->archdata.dma_coherent = (cacheid) ? coherent : true;
46+
dev->dma_coherent = cacheid ? coherent : true;
20547
} else {
20648
/*
20749
* Assume coherent DMA in case MMU/MPU has not been set up.
20850
*/
209-
dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
51+
dev->dma_coherent = (get_cr() & CR_M) ? coherent : true;
21052
}
211-
212-
if (!dev->archdata.dma_coherent)
213-
set_dma_ops(dev, &arm_nommu_dma_ops);
21453
}

0 commit comments

Comments
 (0)