@@ -584,6 +584,22 @@ int dma_direct_supported(struct device *dev, u64 mask)
584
584
return mask >= phys_to_dma_unencrypted (dev , min_mask );
585
585
}
586
586
587
+ static const struct bus_dma_region * dma_find_range (struct device * dev ,
588
+ unsigned long start_pfn )
589
+ {
590
+ const struct bus_dma_region * m ;
591
+
592
+ for (m = dev -> dma_range_map ; PFN_DOWN (m -> size ); m ++ ) {
593
+ unsigned long cpu_start_pfn = PFN_DOWN (m -> cpu_start );
594
+
595
+ if (start_pfn >= cpu_start_pfn &&
596
+ start_pfn - cpu_start_pfn < PFN_DOWN (m -> size ))
597
+ return m ;
598
+ }
599
+
600
+ return NULL ;
601
+ }
602
+
587
603
/*
588
604
* To check whether all ram resource ranges are covered by dma range map
589
605
* Returns 0 when further check is needed
@@ -593,20 +609,12 @@ static int check_ram_in_range_map(unsigned long start_pfn,
593
609
unsigned long nr_pages , void * data )
594
610
{
595
611
unsigned long end_pfn = start_pfn + nr_pages ;
596
- const struct bus_dma_region * bdr = NULL ;
597
- const struct bus_dma_region * m ;
598
612
struct device * dev = data ;
599
613
600
614
while (start_pfn < end_pfn ) {
601
- for (m = dev -> dma_range_map ; PFN_DOWN (m -> size ); m ++ ) {
602
- unsigned long cpu_start_pfn = PFN_DOWN (m -> cpu_start );
615
+ const struct bus_dma_region * bdr ;
603
616
604
- if (start_pfn >= cpu_start_pfn &&
605
- start_pfn - cpu_start_pfn < PFN_DOWN (m -> size )) {
606
- bdr = m ;
607
- break ;
608
- }
609
- }
617
+ bdr = dma_find_range (dev , start_pfn );
610
618
if (!bdr )
611
619
return 1 ;
612
620
0 commit comments