|
22 | 22 |
|
23 | 23 | #include <linux/cache.h>
|
24 | 24 | #include <linux/dma-direct.h>
|
| 25 | +#include <linux/dma-noncoherent.h> |
25 | 26 | #include <linux/mm.h>
|
26 | 27 | #include <linux/export.h>
|
27 | 28 | #include <linux/spinlock.h>
|
@@ -656,35 +657,38 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
|
656 | 657 | }
|
657 | 658 |
|
658 | 659 | /*
|
659 |
| - * Create a swiotlb mapping for the buffer at @phys, and in case of DMAing |
| 660 | + * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing |
660 | 661 | * to the device copy the data into it as well.
|
661 | 662 | */
|
662 |
| -bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr, |
663 |
| - size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 663 | +dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, |
| 664 | + enum dma_data_direction dir, unsigned long attrs) |
664 | 665 | {
|
665 |
| - trace_swiotlb_bounced(dev, *dma_addr, size, swiotlb_force); |
| 666 | + phys_addr_t swiotlb_addr; |
| 667 | + dma_addr_t dma_addr; |
666 | 668 |
|
667 |
| - if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) { |
668 |
| - dev_warn_ratelimited(dev, |
669 |
| - "Cannot do DMA to address %pa\n", phys); |
670 |
| - return false; |
671 |
| - } |
| 669 | + trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, |
| 670 | + swiotlb_force); |
672 | 671 |
|
673 |
| - /* Oh well, have to allocate and map a bounce buffer. */ |
674 |
| - *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), |
675 |
| - *phys, size, size, dir, attrs); |
676 |
| - if (*phys == (phys_addr_t)DMA_MAPPING_ERROR) |
677 |
| - return false; |
| 672 | + swiotlb_addr = swiotlb_tbl_map_single(dev, |
| 673 | + __phys_to_dma(dev, io_tlb_start), |
| 674 | + paddr, size, size, dir, attrs); |
| 675 | + if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) |
| 676 | + return DMA_MAPPING_ERROR; |
678 | 677 |
|
679 | 678 | /* Ensure that the address returned is DMA'ble */
|
680 |
| - *dma_addr = __phys_to_dma(dev, *phys); |
681 |
| - if (unlikely(!dma_capable(dev, *dma_addr, size, true))) { |
682 |
| - swiotlb_tbl_unmap_single(dev, *phys, size, size, dir, |
| 679 | + dma_addr = __phys_to_dma(dev, swiotlb_addr); |
| 680 | + if (unlikely(!dma_capable(dev, dma_addr, size, true))) { |
| 681 | + swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir, |
683 | 682 | attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
684 |
| - return false; |
| 683 | + dev_WARN_ONCE(dev, 1, |
| 684 | + "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", |
| 685 | + &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); |
| 686 | + return DMA_MAPPING_ERROR; |
685 | 687 | }
|
686 | 688 |
|
687 |
| - return true; |
| 689 | + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 690 | + arch_sync_dma_for_device(swiotlb_addr, size, dir); |
| 691 | + return dma_addr; |
688 | 692 | }
|
689 | 693 |
|
690 | 694 | size_t swiotlb_max_mapping_size(struct device *dev)
|
|
0 commit comments