Skip to content

Commit ea4d71b

Browse files
rmurphy-armjoergroedel
authored andcommitted
iommu/iova: Consolidate flush queue code
Squash and simplify some of the freeing code, and move the init and free routines down into the rest of the flush queue code to obviate the forward declarations. Reviewed-by: John Garry <[email protected]> Signed-off-by: Robin Murphy <[email protected]> Link: https://lore.kernel.org/r/b0dd4565e6646b6489599d7a1eaa362c75f53c95.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <[email protected]>
1 parent 87f60cc commit ea4d71b

File tree

1 file changed

+58
-73
lines changed

1 file changed

+58
-73
lines changed

drivers/iommu/iova.c

Lines changed: 58 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,6 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
2424
static void init_iova_rcaches(struct iova_domain *iovad);
2525
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
2626
static void free_iova_rcaches(struct iova_domain *iovad);
27-
static void fq_destroy_all_entries(struct iova_domain *iovad);
28-
static void fq_flush_timeout(struct timer_list *t);
2927

3028
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
3129
{
@@ -73,60 +71,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
7371
}
7472
EXPORT_SYMBOL_GPL(init_iova_domain);
7573

76-
static bool has_iova_flush_queue(struct iova_domain *iovad)
77-
{
78-
return !!iovad->fq;
79-
}
80-
81-
static void free_iova_flush_queue(struct iova_domain *iovad)
82-
{
83-
if (!has_iova_flush_queue(iovad))
84-
return;
85-
86-
del_timer_sync(&iovad->fq_timer);
87-
88-
fq_destroy_all_entries(iovad);
89-
90-
free_percpu(iovad->fq);
91-
92-
iovad->fq = NULL;
93-
iovad->fq_domain = NULL;
94-
}
95-
96-
int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain)
97-
{
98-
struct iova_fq __percpu *queue;
99-
int i, cpu;
100-
101-
atomic64_set(&iovad->fq_flush_start_cnt, 0);
102-
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
103-
104-
queue = alloc_percpu(struct iova_fq);
105-
if (!queue)
106-
return -ENOMEM;
107-
108-
for_each_possible_cpu(cpu) {
109-
struct iova_fq *fq;
110-
111-
fq = per_cpu_ptr(queue, cpu);
112-
fq->head = 0;
113-
fq->tail = 0;
114-
115-
spin_lock_init(&fq->lock);
116-
117-
for (i = 0; i < IOVA_FQ_SIZE; i++)
118-
INIT_LIST_HEAD(&fq->entries[i].freelist);
119-
}
120-
121-
iovad->fq_domain = fq_domain;
122-
iovad->fq = queue;
123-
124-
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
125-
atomic_set(&iovad->fq_timer_on, 0);
126-
127-
return 0;
128-
}
129-
13074
static struct rb_node *
13175
__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
13276
{
@@ -594,23 +538,6 @@ static void iova_domain_flush(struct iova_domain *iovad)
594538
atomic64_inc(&iovad->fq_flush_finish_cnt);
595539
}
596540

597-
static void fq_destroy_all_entries(struct iova_domain *iovad)
598-
{
599-
int cpu;
600-
601-
/*
602-
* This code runs when the iova_domain is being detroyed, so don't
603-
* bother to free iovas, just free any remaining pagetable pages.
604-
*/
605-
for_each_possible_cpu(cpu) {
606-
struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
607-
int idx;
608-
609-
fq_ring_for_each(idx, fq)
610-
put_pages_list(&fq->entries[idx].freelist);
611-
}
612-
}
613-
614541
static void fq_flush_timeout(struct timer_list *t)
615542
{
616543
struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
@@ -678,6 +605,64 @@ void queue_iova(struct iova_domain *iovad,
678605
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
679606
}
680607

608+
static void free_iova_flush_queue(struct iova_domain *iovad)
609+
{
610+
int cpu, idx;
611+
612+
if (!iovad->fq)
613+
return;
614+
615+
del_timer_sync(&iovad->fq_timer);
616+
/*
617+
* This code runs when the iova_domain is being detroyed, so don't
618+
* bother to free iovas, just free any remaining pagetable pages.
619+
*/
620+
for_each_possible_cpu(cpu) {
621+
struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
622+
623+
fq_ring_for_each(idx, fq)
624+
put_pages_list(&fq->entries[idx].freelist);
625+
}
626+
627+
free_percpu(iovad->fq);
628+
629+
iovad->fq = NULL;
630+
iovad->fq_domain = NULL;
631+
}
632+
633+
int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain)
634+
{
635+
struct iova_fq __percpu *queue;
636+
int i, cpu;
637+
638+
atomic64_set(&iovad->fq_flush_start_cnt, 0);
639+
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
640+
641+
queue = alloc_percpu(struct iova_fq);
642+
if (!queue)
643+
return -ENOMEM;
644+
645+
for_each_possible_cpu(cpu) {
646+
struct iova_fq *fq = per_cpu_ptr(queue, cpu);
647+
648+
fq->head = 0;
649+
fq->tail = 0;
650+
651+
spin_lock_init(&fq->lock);
652+
653+
for (i = 0; i < IOVA_FQ_SIZE; i++)
654+
INIT_LIST_HEAD(&fq->entries[i].freelist);
655+
}
656+
657+
iovad->fq_domain = fq_domain;
658+
iovad->fq = queue;
659+
660+
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
661+
atomic_set(&iovad->fq_timer_on, 0);
662+
663+
return 0;
664+
}
665+
681666
/**
682667
* put_iova_domain - destroys the iova domain
683668
* @iovad: - iova domain in question.

0 commit comments

Comments
 (0)