@@ -219,49 +219,19 @@ const struct dma_map_ops arm_coherent_dma_ops = {
219
219
};
220
220
EXPORT_SYMBOL (arm_coherent_dma_ops );
221
221
222
- static int __dma_supported (struct device * dev , u64 mask , bool warn )
222
+ static int __dma_supported (struct device * dev , u64 mask )
223
223
{
224
224
unsigned long max_dma_pfn = min (max_pfn - 1 , arm_dma_pfn_limit );
225
225
226
226
/*
227
227
* Translate the device's DMA mask to a PFN limit. This
228
228
* PFN number includes the page which we can DMA to.
229
229
*/
230
- if (dma_to_pfn (dev , mask ) < max_dma_pfn ) {
231
- if (warn )
232
- dev_warn (dev , "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n" ,
233
- mask ,
234
- dma_to_pfn (dev , 0 ), dma_to_pfn (dev , mask ) + 1 ,
235
- max_dma_pfn + 1 );
230
+ if (dma_to_pfn (dev , mask ) < max_dma_pfn )
236
231
return 0 ;
237
- }
238
-
239
232
return 1 ;
240
233
}
241
234
242
- static u64 get_coherent_dma_mask (struct device * dev )
243
- {
244
- u64 mask = (u64 )DMA_BIT_MASK (32 );
245
-
246
- if (dev ) {
247
- mask = dev -> coherent_dma_mask ;
248
-
249
- /*
250
- * Sanity check the DMA mask - it must be non-zero, and
251
- * must be able to be satisfied by a DMA allocation.
252
- */
253
- if (mask == 0 ) {
254
- dev_warn (dev , "coherent DMA mask is unset\n" );
255
- return 0 ;
256
- }
257
-
258
- if (!__dma_supported (dev , mask , true))
259
- return 0 ;
260
- }
261
-
262
- return mask ;
263
- }
264
-
265
235
static void __dma_clear_buffer (struct page * page , size_t size , int coherent_flag )
266
236
{
267
237
/*
@@ -688,7 +658,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
688
658
gfp_t gfp , pgprot_t prot , bool is_coherent ,
689
659
unsigned long attrs , const void * caller )
690
660
{
691
- u64 mask = get_coherent_dma_mask ( dev ) ;
661
+ u64 mask = dev -> coherent_dma_mask ;
692
662
struct page * page = NULL ;
693
663
void * addr ;
694
664
bool allowblock , cma ;
@@ -712,9 +682,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
712
682
}
713
683
#endif
714
684
715
- if (!mask )
716
- return NULL ;
717
-
718
685
buf = kzalloc (sizeof (* buf ),
719
686
gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM ));
720
687
if (!buf )
@@ -1095,7 +1062,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1095
1062
*/
1096
1063
int arm_dma_supported (struct device * dev , u64 mask )
1097
1064
{
1098
- return __dma_supported (dev , mask , false );
1065
+ return __dma_supported (dev , mask );
1099
1066
}
1100
1067
1101
1068
static const struct dma_map_ops * arm_get_dma_map_ops (bool coherent )
0 commit comments