Skip to content

Commit 7374153

Browse files
author
Christoph Hellwig
committed
swiotlb: provide swiotlb_init variants that remap the buffer
To shared more code between swiotlb and xen-swiotlb, offer a swiotlb_init_remap interface and add a remap callback to swiotlb_init_late that will allow Xen to remap the buffer without duplicating much of the logic. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Konrad Rzeszutek Wilk <[email protected]> Tested-by: Boris Ostrovsky <[email protected]>
1 parent 7425195 commit 7374153

File tree

3 files changed

+38
-5
lines changed

3 files changed

+38
-5
lines changed

arch/x86/pci/sta2x11-fixup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev)
5757
int size = STA2X11_SWIOTLB_SIZE;
5858
/* First instance: register your own swiotlb area */
5959
dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
60-
if (swiotlb_init_late(size, GFP_DMA))
60+
if (swiotlb_init_late(size, GFP_DMA, NULL))
6161
dev_emerg(&pdev->dev, "init swiotlb failed\n");
6262
}
6363
list_add(&instance->list, &sta2x11_instance_list);

include/linux/swiotlb.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,11 @@ struct scatterlist;
3636

3737
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags);
3838
unsigned long swiotlb_size_or_default(void);
39+
void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
40+
int (*remap)(void *tlb, unsigned long nslabs));
41+
int swiotlb_init_late(size_t size, gfp_t gfp_mask,
42+
int (*remap)(void *tlb, unsigned long nslabs));
3943
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
40-
int swiotlb_init_late(size_t size, gfp_t gfp_mask);
4144
extern void __init swiotlb_update_mem_attributes(void);
4245

4346
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,

kernel/dma/swiotlb.c

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -256,9 +256,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs,
256256
* Statically reserve bounce buffer space and initialize bounce buffer data
257257
* structures for the software IO TLB used to implement the DMA API.
258258
*/
259-
void __init swiotlb_init(bool addressing_limit, unsigned int flags)
259+
void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
260+
int (*remap)(void *tlb, unsigned long nslabs))
260261
{
261-
size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
262+
unsigned long nslabs = default_nslabs;
263+
size_t bytes;
262264
void *tlb;
263265

264266
if (!addressing_limit && !swiotlb_force_bounce)
@@ -271,12 +273,23 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags)
271273
* allow to pick a location everywhere for hypervisors with guest
272274
* memory encryption.
273275
*/
276+
retry:
277+
bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
274278
if (flags & SWIOTLB_ANY)
275279
tlb = memblock_alloc(bytes, PAGE_SIZE);
276280
else
277281
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
278282
if (!tlb)
279283
goto fail;
284+
if (remap && remap(tlb, nslabs) < 0) {
285+
memblock_free(tlb, PAGE_ALIGN(bytes));
286+
287+
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
288+
if (nslabs < IO_TLB_MIN_SLABS)
289+
panic("%s: Failed to remap %zu bytes\n",
290+
__func__, bytes);
291+
goto retry;
292+
}
280293
if (swiotlb_init_with_tbl(tlb, default_nslabs, flags))
281294
goto fail_free_mem;
282295
return;
@@ -287,12 +300,18 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags)
287300
pr_warn("Cannot allocate buffer");
288301
}
289302

303+
void __init swiotlb_init(bool addressing_limit, unsigned int flags)
304+
{
305+
return swiotlb_init_remap(addressing_limit, flags, NULL);
306+
}
307+
290308
/*
291309
* Systems with larger DMA zones (those that don't support ISA) can
292310
* initialize the swiotlb later using the slab allocator if needed.
293311
* This should be just like above, but with some error catching.
294312
*/
295-
int swiotlb_init_late(size_t size, gfp_t gfp_mask)
313+
int swiotlb_init_late(size_t size, gfp_t gfp_mask,
314+
int (*remap)(void *tlb, unsigned long nslabs))
296315
{
297316
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
298317
unsigned long bytes;
@@ -303,6 +322,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
303322
if (swiotlb_force_disable)
304323
return 0;
305324

325+
retry:
306326
order = get_order(nslabs << IO_TLB_SHIFT);
307327
nslabs = SLABS_PER_PAGE << order;
308328
bytes = nslabs << IO_TLB_SHIFT;
@@ -323,6 +343,16 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
323343
(PAGE_SIZE << order) >> 20);
324344
nslabs = SLABS_PER_PAGE << order;
325345
}
346+
if (remap)
347+
rc = remap(vstart, nslabs);
348+
if (rc) {
349+
free_pages((unsigned long)vstart, order);
350+
351+
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
352+
if (nslabs < IO_TLB_MIN_SLABS)
353+
return rc;
354+
goto retry;
355+
}
326356
rc = swiotlb_late_init_with_tbl(vstart, nslabs);
327357
if (rc)
328358
free_pages((unsigned long)vstart, order);

0 commit comments

Comments
 (0)