Skip to content

Commit 4a47cba

Browse files
author
Christoph Hellwig
committed
dma-direct: improve swiotlb error reporting
Untangle the way how dma_direct_map_page calls into swiotlb to be able to properly report errors where the swiotlb DMA address overflows the mask separately from overflows in the !swiotlb case. This means that siotlb_map now has to do a little more work that duplicates dma_direct_map_page, but doing so greatly simplifies the calling convention. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Konrad Rzeszutek Wilk <[email protected]>
1 parent 91ef26f commit 4a47cba

File tree

3 files changed

+33
-36
lines changed

3 files changed

+33
-36
lines changed

include/linux/swiotlb.h

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,9 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
6464
size_t size, enum dma_data_direction dir,
6565
enum dma_sync_target target);
6666

67+
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
68+
size_t size, enum dma_data_direction dir, unsigned long attrs);
69+
6770
#ifdef CONFIG_SWIOTLB
6871
extern enum swiotlb_force swiotlb_force;
6972
extern phys_addr_t io_tlb_start, io_tlb_end;
@@ -73,8 +76,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
7376
return paddr >= io_tlb_start && paddr < io_tlb_end;
7477
}
7578

76-
bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
77-
size_t size, enum dma_data_direction dir, unsigned long attrs);
7879
void __init swiotlb_exit(void);
7980
unsigned int swiotlb_max_segment(void);
8081
size_t swiotlb_max_mapping_size(struct device *dev);
@@ -85,12 +86,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
8586
{
8687
return false;
8788
}
88-
static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
89-
dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
90-
unsigned long attrs)
91-
{
92-
return false;
93-
}
9489
static inline void swiotlb_exit(void)
9590
{
9691
}

kernel/dma/direct.c

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -357,22 +357,20 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
357357
EXPORT_SYMBOL(dma_direct_unmap_sg);
358358
#endif
359359

360-
static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
361-
size_t size)
362-
{
363-
return swiotlb_force != SWIOTLB_FORCE &&
364-
dma_capable(dev, dma_addr, size, true);
365-
}
366-
367360
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
368361
unsigned long offset, size_t size, enum dma_data_direction dir,
369362
unsigned long attrs)
370363
{
371364
phys_addr_t phys = page_to_phys(page) + offset;
372365
dma_addr_t dma_addr = phys_to_dma(dev, phys);
373366

374-
if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
375-
!swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
367+
if (unlikely(swiotlb_force == SWIOTLB_FORCE))
368+
return swiotlb_map(dev, phys, size, dir, attrs);
369+
370+
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
371+
if (swiotlb_force != SWIOTLB_NO_FORCE)
372+
return swiotlb_map(dev, phys, size, dir, attrs);
373+
376374
report_addr(dev, dma_addr, size);
377375
return DMA_MAPPING_ERROR;
378376
}

kernel/dma/swiotlb.c

Lines changed: 23 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
#include <linux/cache.h>
2424
#include <linux/dma-direct.h>
25+
#include <linux/dma-noncoherent.h>
2526
#include <linux/mm.h>
2627
#include <linux/export.h>
2728
#include <linux/spinlock.h>
@@ -656,35 +657,38 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
656657
}
657658

658659
/*
659-
* Create a swiotlb mapping for the buffer at @phys, and in case of DMAing
660+
* Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
660661
* to the device copy the data into it as well.
661662
*/
662-
bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
663-
size_t size, enum dma_data_direction dir, unsigned long attrs)
663+
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
664+
enum dma_data_direction dir, unsigned long attrs)
664665
{
665-
trace_swiotlb_bounced(dev, *dma_addr, size, swiotlb_force);
666+
phys_addr_t swiotlb_addr;
667+
dma_addr_t dma_addr;
666668

667-
if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
668-
dev_warn_ratelimited(dev,
669-
"Cannot do DMA to address %pa\n", phys);
670-
return false;
671-
}
669+
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
670+
swiotlb_force);
672671

673-
/* Oh well, have to allocate and map a bounce buffer. */
674-
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
675-
*phys, size, size, dir, attrs);
676-
if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
677-
return false;
672+
swiotlb_addr = swiotlb_tbl_map_single(dev,
673+
__phys_to_dma(dev, io_tlb_start),
674+
paddr, size, size, dir, attrs);
675+
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
676+
return DMA_MAPPING_ERROR;
678677

679678
/* Ensure that the address returned is DMA'ble */
680-
*dma_addr = __phys_to_dma(dev, *phys);
681-
if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
682-
swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
679+
dma_addr = __phys_to_dma(dev, swiotlb_addr);
680+
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
681+
swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
683682
attrs | DMA_ATTR_SKIP_CPU_SYNC);
684-
return false;
683+
dev_WARN_ONCE(dev, 1,
684+
"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
685+
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
686+
return DMA_MAPPING_ERROR;
685687
}
686688

687-
return true;
689+
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
690+
arch_sync_dma_for_device(swiotlb_addr, size, dir);
691+
return dma_addr;
688692
}
689693

690694
size_t swiotlb_max_mapping_size(struct device *dev)

0 commit comments

Comments
 (0)