Skip to content

Commit 3f70356

Browse files
author
Christoph Hellwig
committed
swiotlb: merge swiotlb-xen initialization into swiotlb
Reuse the generic swiotlb initialization for xen-swiotlb. For ARM/ARM64 this works trivially, while for x86 xen_swiotlb_fixup needs to be passed as the remap argument to swiotlb_init_remap/swiotlb_init_late. Note that the lower bound of the swiotlb size is changed to the smaller IO_TLB_MIN_SLABS based value with this patch, but that is fine as the 2MB value used in Xen before was just an optimization and is not the hard lower bound. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Reviewed-by: Konrad Rzeszutek Wilk <[email protected]> Tested-by: Boris Ostrovsky <[email protected]>
1 parent 7374153 commit 3f70356

File tree

6 files changed

+28
-155
lines changed

6 files changed

+28
-155
lines changed

arch/arm/xen/mm.c

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,22 +23,20 @@
2323
#include <asm/xen/hypercall.h>
2424
#include <asm/xen/interface.h>
2525

26-
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
26+
static gfp_t xen_swiotlb_gfp(void)
2727
{
2828
phys_addr_t base;
29-
gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
3029
u64 i;
3130

3231
for_each_mem_range(i, &base, NULL) {
3332
if (base < (phys_addr_t)0xffffffff) {
3433
if (IS_ENABLED(CONFIG_ZONE_DMA32))
35-
flags |= __GFP_DMA32;
36-
else
37-
flags |= __GFP_DMA;
38-
break;
34+
return __GFP_DMA32;
35+
return __GFP_DMA;
3936
}
4037
}
41-
return __get_free_pages(flags, order);
38+
39+
return GFP_KERNEL;
4240
}
4341

4442
static bool hypercall_cflush = false;
@@ -140,10 +138,13 @@ static int __init xen_mm_init(void)
140138
if (!xen_swiotlb_detect())
141139
return 0;
142140

143-
rc = xen_swiotlb_init();
144141
/* we can work with the default swiotlb */
145-
if (rc < 0 && rc != -EEXIST)
146-
return rc;
142+
if (!io_tlb_default_mem.nslabs) {
143+
rc = swiotlb_init_late(swiotlb_size_or_default(),
144+
xen_swiotlb_gfp(), NULL);
145+
if (rc < 0)
146+
return rc;
147+
}
147148

148149
cflush.op = 0;
149150
cflush.a.dev_bus_addr = 0;

arch/x86/include/asm/xen/page.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -357,9 +357,4 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
357357
return false;
358358
}
359359

360-
static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
361-
{
362-
return __get_free_pages(__GFP_NOWARN, order);
363-
}
364-
365360
#endif /* _ASM_X86_XEN_PAGE_H */

arch/x86/kernel/pci-dma.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -72,30 +72,30 @@ static inline void __init pci_swiotlb_detect(void)
7272
#endif /* CONFIG_SWIOTLB */
7373

7474
#ifdef CONFIG_SWIOTLB_XEN
75-
static bool xen_swiotlb;
76-
7775
static void __init pci_xen_swiotlb_init(void)
7876
{
7977
if (!xen_initial_domain() && !x86_swiotlb_enable)
8078
return;
8179
x86_swiotlb_enable = true;
82-
xen_swiotlb = true;
83-
xen_swiotlb_init_early();
80+
x86_swiotlb_flags |= SWIOTLB_ANY;
81+
swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
8482
dma_ops = &xen_swiotlb_dma_ops;
8583
if (IS_ENABLED(CONFIG_PCI))
8684
pci_request_acs();
8785
}
8886

8987
int pci_xen_swiotlb_init_late(void)
9088
{
91-
int rc;
92-
93-
if (xen_swiotlb)
89+
if (dma_ops == &xen_swiotlb_dma_ops)
9490
return 0;
9591

96-
rc = xen_swiotlb_init();
97-
if (rc)
98-
return rc;
92+
/* we can work with the default swiotlb */
93+
if (!io_tlb_default_mem.nslabs) {
94+
int rc = swiotlb_init_late(swiotlb_size_or_default(),
95+
GFP_KERNEL, xen_swiotlb_fixup);
96+
if (rc < 0)
97+
return rc;
98+
}
9999

100100
/* XXX: this switches the dma ops under live devices! */
101101
dma_ops = &xen_swiotlb_dma_ops;

drivers/xen/swiotlb-xen.c

Lines changed: 1 addition & 127 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
104104
return 0;
105105
}
106106

107-
static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
107+
int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
108108
{
109109
int rc;
110110
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
@@ -130,132 +130,6 @@ static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
130130
return 0;
131131
}
132132

133-
enum xen_swiotlb_err {
134-
XEN_SWIOTLB_UNKNOWN = 0,
135-
XEN_SWIOTLB_ENOMEM,
136-
XEN_SWIOTLB_EFIXUP
137-
};
138-
139-
static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
140-
{
141-
switch (err) {
142-
case XEN_SWIOTLB_ENOMEM:
143-
return "Cannot allocate Xen-SWIOTLB buffer\n";
144-
case XEN_SWIOTLB_EFIXUP:
145-
return "Failed to get contiguous memory for DMA from Xen!\n"\
146-
"You either: don't have the permissions, do not have"\
147-
" enough free memory under 4GB, or the hypervisor memory"\
148-
" is too fragmented!";
149-
default:
150-
break;
151-
}
152-
return "";
153-
}
154-
155-
int xen_swiotlb_init(void)
156-
{
157-
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
158-
unsigned long bytes = swiotlb_size_or_default();
159-
unsigned long nslabs = bytes >> IO_TLB_SHIFT;
160-
unsigned int order, repeat = 3;
161-
int rc = -ENOMEM;
162-
char *start;
163-
164-
if (io_tlb_default_mem.nslabs) {
165-
pr_warn("swiotlb buffer already initialized\n");
166-
return -EEXIST;
167-
}
168-
169-
retry:
170-
m_ret = XEN_SWIOTLB_ENOMEM;
171-
order = get_order(bytes);
172-
173-
/*
174-
* Get IO TLB memory from any location.
175-
*/
176-
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
177-
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
178-
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
179-
start = (void *)xen_get_swiotlb_free_pages(order);
180-
if (start)
181-
break;
182-
order--;
183-
}
184-
if (!start)
185-
goto exit;
186-
if (order != get_order(bytes)) {
187-
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
188-
(PAGE_SIZE << order) >> 20);
189-
nslabs = SLABS_PER_PAGE << order;
190-
bytes = nslabs << IO_TLB_SHIFT;
191-
}
192-
193-
/*
194-
* And replace that memory with pages under 4GB.
195-
*/
196-
rc = xen_swiotlb_fixup(start, nslabs);
197-
if (rc) {
198-
free_pages((unsigned long)start, order);
199-
m_ret = XEN_SWIOTLB_EFIXUP;
200-
goto error;
201-
}
202-
rc = swiotlb_late_init_with_tbl(start, nslabs);
203-
if (rc)
204-
return rc;
205-
return 0;
206-
error:
207-
if (nslabs > 1024 && repeat--) {
208-
/* Min is 2MB */
209-
nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
210-
bytes = nslabs << IO_TLB_SHIFT;
211-
pr_info("Lowering to %luMB\n", bytes >> 20);
212-
goto retry;
213-
}
214-
exit:
215-
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
216-
return rc;
217-
}
218-
219-
#ifdef CONFIG_X86
220-
void __init xen_swiotlb_init_early(void)
221-
{
222-
unsigned long bytes = swiotlb_size_or_default();
223-
unsigned long nslabs = bytes >> IO_TLB_SHIFT;
224-
unsigned int repeat = 3;
225-
char *start;
226-
int rc;
227-
228-
retry:
229-
/*
230-
* Get IO TLB memory from any location.
231-
*/
232-
start = memblock_alloc(PAGE_ALIGN(bytes),
233-
IO_TLB_SEGSIZE << IO_TLB_SHIFT);
234-
if (!start)
235-
panic("%s: Failed to allocate %lu bytes\n",
236-
__func__, PAGE_ALIGN(bytes));
237-
238-
/*
239-
* And replace that memory with pages under 4GB.
240-
*/
241-
rc = xen_swiotlb_fixup(start, nslabs);
242-
if (rc) {
243-
memblock_free(start, PAGE_ALIGN(bytes));
244-
if (nslabs > 1024 && repeat--) {
245-
/* Min is 2MB */
246-
nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
247-
bytes = nslabs << IO_TLB_SHIFT;
248-
pr_info("Lowering to %luMB\n", bytes >> 20);
249-
goto retry;
250-
}
251-
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
252-
}
253-
254-
if (swiotlb_init_with_tbl(start, nslabs, SWIOTLB_VERBOSE))
255-
panic("Cannot allocate SWIOTLB buffer");
256-
}
257-
#endif /* CONFIG_X86 */
258-
259133
static void *
260134
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
261135
dma_addr_t *dma_handle, gfp_t flags,

include/xen/arm/page.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,5 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
115115
bool xen_arch_need_swiotlb(struct device *dev,
116116
phys_addr_t phys,
117117
dma_addr_t dev_addr);
118-
unsigned long xen_get_swiotlb_free_pages(unsigned int order);
119118

120119
#endif /* _ASM_ARM_XEN_PAGE_H */

include/xen/swiotlb-xen.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,12 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
1010
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
1111
size_t size, enum dma_data_direction dir);
1212

13-
int xen_swiotlb_init(void);
14-
void __init xen_swiotlb_init_early(void);
13+
#ifdef CONFIG_SWIOTLB_XEN
14+
int xen_swiotlb_fixup(void *buf, unsigned long nslabs);
15+
#else
16+
#define xen_swiotlb_fixup NULL
17+
#endif
18+
1519
extern const struct dma_map_ops xen_swiotlb_dma_ops;
1620

1721
#endif /* __LINUX_SWIOTLB_XEN_H */

0 commit comments

Comments
 (0)