@@ -24,8 +24,6 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
24
24
static void init_iova_rcaches (struct iova_domain * iovad );
25
25
static void free_cpu_cached_iovas (unsigned int cpu , struct iova_domain * iovad );
26
26
static void free_iova_rcaches (struct iova_domain * iovad );
27
- static void fq_destroy_all_entries (struct iova_domain * iovad );
28
- static void fq_flush_timeout (struct timer_list * t );
29
27
30
28
static int iova_cpuhp_dead (unsigned int cpu , struct hlist_node * node )
31
29
{
@@ -73,60 +71,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
73
71
}
74
72
EXPORT_SYMBOL_GPL (init_iova_domain );
75
73
76
- static bool has_iova_flush_queue (struct iova_domain * iovad )
77
- {
78
- return !!iovad -> fq ;
79
- }
80
-
81
- static void free_iova_flush_queue (struct iova_domain * iovad )
82
- {
83
- if (!has_iova_flush_queue (iovad ))
84
- return ;
85
-
86
- del_timer_sync (& iovad -> fq_timer );
87
-
88
- fq_destroy_all_entries (iovad );
89
-
90
- free_percpu (iovad -> fq );
91
-
92
- iovad -> fq = NULL ;
93
- iovad -> fq_domain = NULL ;
94
- }
95
-
96
- int init_iova_flush_queue (struct iova_domain * iovad , struct iommu_domain * fq_domain )
97
- {
98
- struct iova_fq __percpu * queue ;
99
- int i , cpu ;
100
-
101
- atomic64_set (& iovad -> fq_flush_start_cnt , 0 );
102
- atomic64_set (& iovad -> fq_flush_finish_cnt , 0 );
103
-
104
- queue = alloc_percpu (struct iova_fq );
105
- if (!queue )
106
- return - ENOMEM ;
107
-
108
- for_each_possible_cpu (cpu ) {
109
- struct iova_fq * fq ;
110
-
111
- fq = per_cpu_ptr (queue , cpu );
112
- fq -> head = 0 ;
113
- fq -> tail = 0 ;
114
-
115
- spin_lock_init (& fq -> lock );
116
-
117
- for (i = 0 ; i < IOVA_FQ_SIZE ; i ++ )
118
- INIT_LIST_HEAD (& fq -> entries [i ].freelist );
119
- }
120
-
121
- iovad -> fq_domain = fq_domain ;
122
- iovad -> fq = queue ;
123
-
124
- timer_setup (& iovad -> fq_timer , fq_flush_timeout , 0 );
125
- atomic_set (& iovad -> fq_timer_on , 0 );
126
-
127
- return 0 ;
128
- }
129
-
130
74
static struct rb_node *
131
75
__get_cached_rbnode (struct iova_domain * iovad , unsigned long limit_pfn )
132
76
{
@@ -594,23 +538,6 @@ static void iova_domain_flush(struct iova_domain *iovad)
594
538
atomic64_inc (& iovad -> fq_flush_finish_cnt );
595
539
}
596
540
597
- static void fq_destroy_all_entries (struct iova_domain * iovad )
598
- {
599
- int cpu ;
600
-
601
- /*
602
- * This code runs when the iova_domain is being detroyed, so don't
603
- * bother to free iovas, just free any remaining pagetable pages.
604
- */
605
- for_each_possible_cpu (cpu ) {
606
- struct iova_fq * fq = per_cpu_ptr (iovad -> fq , cpu );
607
- int idx ;
608
-
609
- fq_ring_for_each (idx , fq )
610
- put_pages_list (& fq -> entries [idx ].freelist );
611
- }
612
- }
613
-
614
541
static void fq_flush_timeout (struct timer_list * t )
615
542
{
616
543
struct iova_domain * iovad = from_timer (iovad , t , fq_timer );
@@ -678,6 +605,64 @@ void queue_iova(struct iova_domain *iovad,
678
605
jiffies + msecs_to_jiffies (IOVA_FQ_TIMEOUT ));
679
606
}
680
607
608
+ static void free_iova_flush_queue (struct iova_domain * iovad )
609
+ {
610
+ int cpu , idx ;
611
+
612
+ if (!iovad -> fq )
613
+ return ;
614
+
615
+ del_timer_sync (& iovad -> fq_timer );
616
+ /*
617
+ * This code runs when the iova_domain is being detroyed, so don't
618
+ * bother to free iovas, just free any remaining pagetable pages.
619
+ */
620
+ for_each_possible_cpu (cpu ) {
621
+ struct iova_fq * fq = per_cpu_ptr (iovad -> fq , cpu );
622
+
623
+ fq_ring_for_each (idx , fq )
624
+ put_pages_list (& fq -> entries [idx ].freelist );
625
+ }
626
+
627
+ free_percpu (iovad -> fq );
628
+
629
+ iovad -> fq = NULL ;
630
+ iovad -> fq_domain = NULL ;
631
+ }
632
+
633
+ int init_iova_flush_queue (struct iova_domain * iovad , struct iommu_domain * fq_domain )
634
+ {
635
+ struct iova_fq __percpu * queue ;
636
+ int i , cpu ;
637
+
638
+ atomic64_set (& iovad -> fq_flush_start_cnt , 0 );
639
+ atomic64_set (& iovad -> fq_flush_finish_cnt , 0 );
640
+
641
+ queue = alloc_percpu (struct iova_fq );
642
+ if (!queue )
643
+ return - ENOMEM ;
644
+
645
+ for_each_possible_cpu (cpu ) {
646
+ struct iova_fq * fq = per_cpu_ptr (queue , cpu );
647
+
648
+ fq -> head = 0 ;
649
+ fq -> tail = 0 ;
650
+
651
+ spin_lock_init (& fq -> lock );
652
+
653
+ for (i = 0 ; i < IOVA_FQ_SIZE ; i ++ )
654
+ INIT_LIST_HEAD (& fq -> entries [i ].freelist );
655
+ }
656
+
657
+ iovad -> fq_domain = fq_domain ;
658
+ iovad -> fq = queue ;
659
+
660
+ timer_setup (& iovad -> fq_timer , fq_flush_timeout , 0 );
661
+ atomic_set (& iovad -> fq_timer_on , 0 );
662
+
663
+ return 0 ;
664
+ }
665
+
681
666
/**
682
667
* put_iova_domain - destroys the iova domain
683
668
* @iovad: - iova domain in question.
0 commit comments