5
5
* Copyright (C) 2000-2004 Russell King
6
6
*/
7
7
8
- #include <linux/export.h>
9
- #include <linux/mm.h>
10
- #include <linux/dma-direct.h>
11
8
#include <linux/dma-map-ops.h>
12
- #include <linux/scatterlist.h>
13
-
14
9
#include <asm/cachetype.h>
15
10
#include <asm/cacheflush.h>
16
11
#include <asm/outercache.h>
17
12
#include <asm/cp15.h>
18
13
19
14
#include "dma.h"
20
15
21
- /*
22
- * The generic direct mapping code is used if
23
- * - MMU/MPU is off
24
- * - cpu is v7m w/o cache support
25
- * - device is coherent
26
- * otherwise arm_nommu_dma_ops is used.
27
- *
28
- * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
29
- * [1] on how to declare such memory).
30
- *
31
- * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
32
- */
33
-
34
- static void * arm_nommu_dma_alloc (struct device * dev , size_t size ,
35
- dma_addr_t * dma_handle , gfp_t gfp ,
36
- unsigned long attrs )
37
-
38
- {
39
- void * ret = dma_alloc_from_global_coherent (dev , size , dma_handle );
40
-
41
- /*
42
- * dma_alloc_from_global_coherent() may fail because:
43
- *
44
- * - no consistent DMA region has been defined, so we can't
45
- * continue.
46
- * - there is no space left in consistent DMA region, so we
47
- * only can fallback to generic allocator if we are
48
- * advertised that consistency is not required.
49
- */
50
-
51
- WARN_ON_ONCE (ret == NULL );
52
- return ret ;
53
- }
54
-
55
- static void arm_nommu_dma_free (struct device * dev , size_t size ,
56
- void * cpu_addr , dma_addr_t dma_addr ,
57
- unsigned long attrs )
58
- {
59
- int ret = dma_release_from_global_coherent (get_order (size ), cpu_addr );
60
-
61
- WARN_ON_ONCE (ret == 0 );
62
- }
63
-
64
- static int arm_nommu_dma_mmap (struct device * dev , struct vm_area_struct * vma ,
65
- void * cpu_addr , dma_addr_t dma_addr , size_t size ,
66
- unsigned long attrs )
67
- {
68
- int ret ;
69
-
70
- if (dma_mmap_from_global_coherent (vma , cpu_addr , size , & ret ))
71
- return ret ;
72
- if (dma_mmap_from_dev_coherent (dev , vma , cpu_addr , size , & ret ))
73
- return ret ;
74
- return - ENXIO ;
75
- }
76
-
77
-
78
- static void __dma_page_cpu_to_dev (phys_addr_t paddr , size_t size ,
79
- enum dma_data_direction dir )
16
+ void arch_sync_dma_for_device (phys_addr_t paddr , size_t size ,
17
+ enum dma_data_direction dir )
80
18
{
81
19
dmac_map_area (__va (paddr ), size , dir );
82
20
@@ -86,111 +24,15 @@ static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
86
24
outer_clean_range (paddr , paddr + size );
87
25
}
88
26
89
- static void __dma_page_dev_to_cpu (phys_addr_t paddr , size_t size ,
90
- enum dma_data_direction dir )
27
+ void arch_sync_dma_for_cpu (phys_addr_t paddr , size_t size ,
28
+ enum dma_data_direction dir )
91
29
{
92
30
if (dir != DMA_TO_DEVICE ) {
93
31
outer_inv_range (paddr , paddr + size );
94
32
dmac_unmap_area (__va (paddr ), size , dir );
95
33
}
96
34
}
97
35
98
- static dma_addr_t arm_nommu_dma_map_page (struct device * dev , struct page * page ,
99
- unsigned long offset , size_t size ,
100
- enum dma_data_direction dir ,
101
- unsigned long attrs )
102
- {
103
- dma_addr_t handle = page_to_phys (page ) + offset ;
104
-
105
- __dma_page_cpu_to_dev (handle , size , dir );
106
-
107
- return handle ;
108
- }
109
-
110
- static void arm_nommu_dma_unmap_page (struct device * dev , dma_addr_t handle ,
111
- size_t size , enum dma_data_direction dir ,
112
- unsigned long attrs )
113
- {
114
- __dma_page_dev_to_cpu (handle , size , dir );
115
- }
116
-
117
-
118
- static int arm_nommu_dma_map_sg (struct device * dev , struct scatterlist * sgl ,
119
- int nents , enum dma_data_direction dir ,
120
- unsigned long attrs )
121
- {
122
- int i ;
123
- struct scatterlist * sg ;
124
-
125
- for_each_sg (sgl , sg , nents , i ) {
126
- sg_dma_address (sg ) = sg_phys (sg );
127
- sg_dma_len (sg ) = sg -> length ;
128
- __dma_page_cpu_to_dev (sg_dma_address (sg ), sg_dma_len (sg ), dir );
129
- }
130
-
131
- return nents ;
132
- }
133
-
134
- static void arm_nommu_dma_unmap_sg (struct device * dev , struct scatterlist * sgl ,
135
- int nents , enum dma_data_direction dir ,
136
- unsigned long attrs )
137
- {
138
- struct scatterlist * sg ;
139
- int i ;
140
-
141
- for_each_sg (sgl , sg , nents , i )
142
- __dma_page_dev_to_cpu (sg_dma_address (sg ), sg_dma_len (sg ), dir );
143
- }
144
-
145
- static void arm_nommu_dma_sync_single_for_device (struct device * dev ,
146
- dma_addr_t handle , size_t size , enum dma_data_direction dir )
147
- {
148
- __dma_page_cpu_to_dev (handle , size , dir );
149
- }
150
-
151
- static void arm_nommu_dma_sync_single_for_cpu (struct device * dev ,
152
- dma_addr_t handle , size_t size , enum dma_data_direction dir )
153
- {
154
- __dma_page_cpu_to_dev (handle , size , dir );
155
- }
156
-
157
- static void arm_nommu_dma_sync_sg_for_device (struct device * dev , struct scatterlist * sgl ,
158
- int nents , enum dma_data_direction dir )
159
- {
160
- struct scatterlist * sg ;
161
- int i ;
162
-
163
- for_each_sg (sgl , sg , nents , i )
164
- __dma_page_cpu_to_dev (sg_dma_address (sg ), sg_dma_len (sg ), dir );
165
- }
166
-
167
- static void arm_nommu_dma_sync_sg_for_cpu (struct device * dev , struct scatterlist * sgl ,
168
- int nents , enum dma_data_direction dir )
169
- {
170
- struct scatterlist * sg ;
171
- int i ;
172
-
173
- for_each_sg (sgl , sg , nents , i )
174
- __dma_page_dev_to_cpu (sg_dma_address (sg ), sg_dma_len (sg ), dir );
175
- }
176
-
177
- const struct dma_map_ops arm_nommu_dma_ops = {
178
- .alloc = arm_nommu_dma_alloc ,
179
- .free = arm_nommu_dma_free ,
180
- .alloc_pages = dma_direct_alloc_pages ,
181
- .free_pages = dma_direct_free_pages ,
182
- .mmap = arm_nommu_dma_mmap ,
183
- .map_page = arm_nommu_dma_map_page ,
184
- .unmap_page = arm_nommu_dma_unmap_page ,
185
- .map_sg = arm_nommu_dma_map_sg ,
186
- .unmap_sg = arm_nommu_dma_unmap_sg ,
187
- .sync_single_for_device = arm_nommu_dma_sync_single_for_device ,
188
- .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu ,
189
- .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device ,
190
- .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu ,
191
- };
192
- EXPORT_SYMBOL (arm_nommu_dma_ops );
193
-
194
36
void arch_setup_dma_ops (struct device * dev , u64 dma_base , u64 size ,
195
37
const struct iommu_ops * iommu , bool coherent )
196
38
{
@@ -201,14 +43,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
201
43
* enough to check if MPU is in use or not since in absense of
202
44
* MPU system memory map is used.
203
45
*/
204
- dev -> archdata . dma_coherent = ( cacheid ) ? coherent : true;
46
+ dev -> dma_coherent = cacheid ? coherent : true;
205
47
} else {
206
48
/*
207
49
* Assume coherent DMA in case MMU/MPU has not been set up.
208
50
*/
209
- dev -> archdata . dma_coherent = (get_cr () & CR_M ) ? coherent : true;
51
+ dev -> dma_coherent = (get_cr () & CR_M ) ? coherent : true;
210
52
}
211
-
212
- if (!dev -> archdata .dma_coherent )
213
- set_dma_ops (dev , & arm_nommu_dma_ops );
214
53
}
0 commit comments