Skip to content

Commit 0a44cac

Browse files
committed
Merge tag 'dma-mapping-5.6' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - give command line cma= precedence over the CONFIG_ option (Nicolas Saenz Julienne) - always allow 32-bit DMA, even for weirdly placed ZONE_DMA - improve the debug printks when memory is not addressable, to help find problems with swiotlb initialization * tag 'dma-mapping-5.6' of git://git.infradead.org/users/hch/dma-mapping: dma-direct: improve DMA mask overflow reporting dma-direct: improve swiotlb error reporting dma-direct: relax addressability checks in dma_direct_supported dma-contiguous: CMA: give precedence to cmdline
2 parents 2b72104 + 75467ee commit 0a44cac

File tree

4 files changed

+59
-64
lines changed

4 files changed

+59
-64
lines changed

include/linux/swiotlb.h

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,9 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
6464
size_t size, enum dma_data_direction dir,
6565
enum dma_sync_target target);
6666

67+
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
68+
size_t size, enum dma_data_direction dir, unsigned long attrs);
69+
6770
#ifdef CONFIG_SWIOTLB
6871
extern enum swiotlb_force swiotlb_force;
6972
extern phys_addr_t io_tlb_start, io_tlb_end;
@@ -73,8 +76,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
7376
return paddr >= io_tlb_start && paddr < io_tlb_end;
7477
}
7578

76-
bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
77-
size_t size, enum dma_data_direction dir, unsigned long attrs);
7879
void __init swiotlb_exit(void);
7980
unsigned int swiotlb_max_segment(void);
8081
size_t swiotlb_max_mapping_size(struct device *dev);
@@ -85,12 +86,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
8586
{
8687
return false;
8788
}
88-
static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
89-
dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
90-
unsigned long attrs)
91-
{
92-
return false;
93-
}
9489
static inline void swiotlb_exit(void)
9590
{
9691
}

kernel/dma/contiguous.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,9 +302,16 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
302302
phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
303303
phys_addr_t mask = align - 1;
304304
unsigned long node = rmem->fdt_node;
305+
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
305306
struct cma *cma;
306307
int err;
307308

309+
if (size_cmdline != -1 && default_cma) {
310+
pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
311+
rmem->name);
312+
return -EBUSY;
313+
}
314+
308315
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
309316
of_get_flat_dt_prop(node, "no-map", NULL))
310317
return -EINVAL;
@@ -322,7 +329,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
322329
/* Architecture specific contiguous memory fixup. */
323330
dma_contiguous_early_fixup(rmem->base, rmem->size);
324331

325-
if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
332+
if (default_cma)
326333
dma_contiguous_set_default(cma);
327334

328335
rmem->ops = &rmem_cma_ops;

kernel/dma/direct.c

Lines changed: 25 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -23,18 +23,6 @@
2323
*/
2424
unsigned int zone_dma_bits __ro_after_init = 24;
2525

26-
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
27-
{
28-
if (!dev->dma_mask) {
29-
dev_err_once(dev, "DMA map on device without dma_mask\n");
30-
} else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_limit) {
31-
dev_err_once(dev,
32-
"overflow %pad+%zu of DMA mask %llx bus limit %llx\n",
33-
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
34-
}
35-
WARN_ON_ONCE(1);
36-
}
37-
3826
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
3927
phys_addr_t phys)
4028
{
@@ -357,23 +345,23 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
357345
EXPORT_SYMBOL(dma_direct_unmap_sg);
358346
#endif
359347

360-
static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
361-
size_t size)
362-
{
363-
return swiotlb_force != SWIOTLB_FORCE &&
364-
dma_capable(dev, dma_addr, size, true);
365-
}
366-
367348
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
368349
unsigned long offset, size_t size, enum dma_data_direction dir,
369350
unsigned long attrs)
370351
{
371352
phys_addr_t phys = page_to_phys(page) + offset;
372353
dma_addr_t dma_addr = phys_to_dma(dev, phys);
373354

374-
if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
375-
!swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
376-
report_addr(dev, dma_addr, size);
355+
if (unlikely(swiotlb_force == SWIOTLB_FORCE))
356+
return swiotlb_map(dev, phys, size, dir, attrs);
357+
358+
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
359+
if (swiotlb_force != SWIOTLB_NO_FORCE)
360+
return swiotlb_map(dev, phys, size, dir, attrs);
361+
362+
dev_WARN_ONCE(dev, 1,
363+
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
364+
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
377365
return DMA_MAPPING_ERROR;
378366
}
379367

@@ -411,7 +399,10 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
411399
dma_addr_t dma_addr = paddr;
412400

413401
if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
414-
report_addr(dev, dma_addr, size);
402+
dev_err_once(dev,
403+
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
404+
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
405+
WARN_ON_ONCE(1);
415406
return DMA_MAPPING_ERROR;
416407
}
417408

@@ -472,28 +463,26 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
472463
}
473464
#endif /* CONFIG_MMU */
474465

475-
/*
476-
* Because 32-bit DMA masks are so common we expect every architecture to be
477-
* able to satisfy them - either by not supporting more physical memory, or by
478-
* providing a ZONE_DMA32. If neither is the case, the architecture needs to
479-
* use an IOMMU instead of the direct mapping.
480-
*/
481466
int dma_direct_supported(struct device *dev, u64 mask)
482467
{
483-
u64 min_mask;
468+
u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
484469

485-
if (IS_ENABLED(CONFIG_ZONE_DMA))
486-
min_mask = DMA_BIT_MASK(zone_dma_bits);
487-
else
488-
min_mask = DMA_BIT_MASK(32);
489-
490-
min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
470+
/*
471+
* Because 32-bit DMA masks are so common we expect every architecture
472+
* to be able to satisfy them - either by not supporting more physical
473+
* memory, or by providing a ZONE_DMA32. If neither is the case, the
474+
* architecture needs to use an IOMMU instead of the direct mapping.
475+
*/
476+
if (mask >= DMA_BIT_MASK(32))
477+
return 1;
491478

492479
/*
493480
* This check needs to be against the actual bit mask value, so
494481
* use __phys_to_dma() here so that the SME encryption mask isn't
495482
* part of the check.
496483
*/
484+
if (IS_ENABLED(CONFIG_ZONE_DMA))
485+
min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
497486
return mask >= __phys_to_dma(dev, min_mask);
498487
}
499488

kernel/dma/swiotlb.c

Lines changed: 23 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
#include <linux/cache.h>
2424
#include <linux/dma-direct.h>
25+
#include <linux/dma-noncoherent.h>
2526
#include <linux/mm.h>
2627
#include <linux/export.h>
2728
#include <linux/spinlock.h>
@@ -656,35 +657,38 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
656657
}
657658

658659
/*
659-
* Create a swiotlb mapping for the buffer at @phys, and in case of DMAing
660+
* Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
660661
* to the device copy the data into it as well.
661662
*/
662-
bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
663-
size_t size, enum dma_data_direction dir, unsigned long attrs)
663+
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
664+
enum dma_data_direction dir, unsigned long attrs)
664665
{
665-
trace_swiotlb_bounced(dev, *dma_addr, size, swiotlb_force);
666+
phys_addr_t swiotlb_addr;
667+
dma_addr_t dma_addr;
666668

667-
if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
668-
dev_warn_ratelimited(dev,
669-
"Cannot do DMA to address %pa\n", phys);
670-
return false;
671-
}
669+
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
670+
swiotlb_force);
672671

673-
/* Oh well, have to allocate and map a bounce buffer. */
674-
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
675-
*phys, size, size, dir, attrs);
676-
if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
677-
return false;
672+
swiotlb_addr = swiotlb_tbl_map_single(dev,
673+
__phys_to_dma(dev, io_tlb_start),
674+
paddr, size, size, dir, attrs);
675+
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
676+
return DMA_MAPPING_ERROR;
678677

679678
/* Ensure that the address returned is DMA'ble */
680-
*dma_addr = __phys_to_dma(dev, *phys);
681-
if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
682-
swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
679+
dma_addr = __phys_to_dma(dev, swiotlb_addr);
680+
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
681+
swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
683682
attrs | DMA_ATTR_SKIP_CPU_SYNC);
684-
return false;
683+
dev_WARN_ONCE(dev, 1,
684+
"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
685+
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
686+
return DMA_MAPPING_ERROR;
685687
}
686688

687-
return true;
689+
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
690+
arch_sync_dma_for_device(swiotlb_addr, size, dir);
691+
return dma_addr;
688692
}
689693

690694
size_t swiotlb_max_mapping_size(struct device *dev)

0 commit comments

Comments
 (0)