3
3
* Copyright (C) 2012 ARM Ltd.
4
4
* Copyright (C) 2020 Google LLC
5
5
*/
6
+ #include <linux/cma.h>
6
7
#include <linux/debugfs.h>
8
+ #include <linux/dma-contiguous.h>
7
9
#include <linux/dma-direct.h>
8
10
#include <linux/dma-noncoherent.h>
9
11
#include <linux/init.h>
@@ -55,6 +57,29 @@ static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
55
57
pool_size_kernel += size ;
56
58
}
57
59
60
+ static bool cma_in_zone (gfp_t gfp )
61
+ {
62
+ unsigned long size ;
63
+ phys_addr_t end ;
64
+ struct cma * cma ;
65
+
66
+ cma = dev_get_cma_area (NULL );
67
+ if (!cma )
68
+ return false;
69
+
70
+ size = cma_get_size (cma );
71
+ if (!size )
72
+ return false;
73
+
74
+ /* CMA can't cross zone boundaries, see cma_activate_area() */
75
+ end = cma_get_base (cma ) + size - 1 ;
76
+ if (IS_ENABLED (CONFIG_ZONE_DMA ) && (gfp & GFP_DMA ))
77
+ return end <= DMA_BIT_MASK (zone_dma_bits );
78
+ if (IS_ENABLED (CONFIG_ZONE_DMA32 ) && (gfp & GFP_DMA32 ))
79
+ return end <= DMA_BIT_MASK (32 );
80
+ return true;
81
+ }
82
+
58
83
static int atomic_pool_expand (struct gen_pool * pool , size_t pool_size ,
59
84
gfp_t gfp )
60
85
{
@@ -68,7 +93,11 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
68
93
69
94
do {
70
95
pool_size = 1 << (PAGE_SHIFT + order );
71
- page = alloc_pages (gfp , order );
96
+ if (cma_in_zone (gfp ))
97
+ page = dma_alloc_from_contiguous (NULL , 1 << order ,
98
+ order , false);
99
+ if (!page )
100
+ page = alloc_pages (gfp , order );
72
101
} while (!page && order -- > 0 );
73
102
if (!page )
74
103
goto out ;
@@ -196,93 +225,75 @@ static int __init dma_atomic_pool_init(void)
196
225
}
197
226
postcore_initcall (dma_atomic_pool_init );
198
227
199
- static inline struct gen_pool * dma_guess_pool_from_device (struct device * dev )
228
+ static inline struct gen_pool * dma_guess_pool (struct gen_pool * prev , gfp_t gfp )
200
229
{
201
- u64 phys_mask ;
202
- gfp_t gfp ;
203
-
204
- gfp = dma_direct_optimal_gfp_mask (dev , dev -> coherent_dma_mask ,
205
- & phys_mask );
206
- if (IS_ENABLED (CONFIG_ZONE_DMA ) && gfp == GFP_DMA )
230
+ if (prev == NULL ) {
231
+ if (IS_ENABLED (CONFIG_ZONE_DMA32 ) && (gfp & GFP_DMA32 ))
232
+ return atomic_pool_dma32 ;
233
+ if (IS_ENABLED (CONFIG_ZONE_DMA ) && (gfp & GFP_DMA ))
234
+ return atomic_pool_dma ;
235
+ return atomic_pool_kernel ;
236
+ }
237
+ if (prev == atomic_pool_kernel )
238
+ return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma ;
239
+ if (prev == atomic_pool_dma32 )
207
240
return atomic_pool_dma ;
208
- if (IS_ENABLED (CONFIG_ZONE_DMA32 ) && gfp == GFP_DMA32 )
209
- return atomic_pool_dma32 ;
210
- return atomic_pool_kernel ;
241
+ return NULL ;
211
242
}
212
243
213
- static inline struct gen_pool * dma_get_safer_pool (struct gen_pool * bad_pool )
244
+ static struct page * __dma_alloc_from_pool (struct device * dev , size_t size ,
245
+ struct gen_pool * pool , void * * cpu_addr ,
246
+ bool (* phys_addr_ok )(struct device * , phys_addr_t , size_t ))
214
247
{
215
- if ( bad_pool == atomic_pool_kernel )
216
- return atomic_pool_dma32 ? : atomic_pool_dma ;
248
+ unsigned long addr ;
249
+ phys_addr_t phys ;
217
250
218
- if (bad_pool == atomic_pool_dma32 )
219
- return atomic_pool_dma ;
251
+ addr = gen_pool_alloc (pool , size );
252
+ if (!addr )
253
+ return NULL ;
220
254
221
- return NULL ;
222
- }
255
+ phys = gen_pool_virt_to_phys (pool , addr );
256
+ if (phys_addr_ok && !phys_addr_ok (dev , phys , size )) {
257
+ gen_pool_free (pool , addr , size );
258
+ return NULL ;
259
+ }
223
260
224
- static inline struct gen_pool * dma_guess_pool (struct device * dev ,
225
- struct gen_pool * bad_pool )
226
- {
227
- if (bad_pool )
228
- return dma_get_safer_pool (bad_pool );
261
+ if (gen_pool_avail (pool ) < atomic_pool_size )
262
+ schedule_work (& atomic_pool_work );
229
263
230
- return dma_guess_pool_from_device (dev );
264
+ * cpu_addr = (void * )addr ;
265
+ memset (* cpu_addr , 0 , size );
266
+ return pfn_to_page (__phys_to_pfn (phys ));
231
267
}
232
268
233
- void * dma_alloc_from_pool (struct device * dev , size_t size ,
234
- struct page * * ret_page , gfp_t flags )
269
+ struct page * dma_alloc_from_pool (struct device * dev , size_t size ,
270
+ void * * cpu_addr , gfp_t gfp ,
271
+ bool (* phys_addr_ok )(struct device * , phys_addr_t , size_t ))
235
272
{
236
273
struct gen_pool * pool = NULL ;
237
- unsigned long val = 0 ;
238
- void * ptr = NULL ;
239
- phys_addr_t phys ;
240
-
241
- while (1 ) {
242
- pool = dma_guess_pool (dev , pool );
243
- if (!pool ) {
244
- WARN (1 , "Failed to get suitable pool for %s\n" ,
245
- dev_name (dev ));
246
- break ;
247
- }
248
-
249
- val = gen_pool_alloc (pool , size );
250
- if (!val )
251
- continue ;
252
-
253
- phys = gen_pool_virt_to_phys (pool , val );
254
- if (dma_coherent_ok (dev , phys , size ))
255
- break ;
256
-
257
- gen_pool_free (pool , val , size );
258
- val = 0 ;
259
- }
260
-
261
-
262
- if (val ) {
263
- * ret_page = pfn_to_page (__phys_to_pfn (phys ));
264
- ptr = (void * )val ;
265
- memset (ptr , 0 , size );
274
+ struct page * page ;
266
275
267
- if (gen_pool_avail (pool ) < atomic_pool_size )
268
- schedule_work (& atomic_pool_work );
276
+ while ((pool = dma_guess_pool (pool , gfp ))) {
277
+ page = __dma_alloc_from_pool (dev , size , pool , cpu_addr ,
278
+ phys_addr_ok );
279
+ if (page )
280
+ return page ;
269
281
}
270
282
271
- return ptr ;
283
+ WARN (1 , "Failed to get suitable pool for %s\n" , dev_name (dev ));
284
+ return NULL ;
272
285
}
273
286
274
287
bool dma_free_from_pool (struct device * dev , void * start , size_t size )
275
288
{
276
289
struct gen_pool * pool = NULL ;
277
290
278
- while (1 ) {
279
- pool = dma_guess_pool (dev , pool );
280
- if (!pool )
281
- return false;
282
-
283
- if (gen_pool_has_addr (pool , (unsigned long )start , size )) {
284
- gen_pool_free (pool , (unsigned long )start , size );
285
- return true;
286
- }
291
+ while ((pool = dma_guess_pool (pool , 0 ))) {
292
+ if (!gen_pool_has_addr (pool , (unsigned long )start , size ))
293
+ continue ;
294
+ gen_pool_free (pool , (unsigned long )start , size );
295
+ return true;
287
296
}
297
+
298
+ return false;
288
299
}
0 commit comments