Skip to content

Commit 2ed90db

Browse files
committed
Merge tag 'dma-mapping-5.9' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - make support for dma_ops optional - move more code out of line - add generic support for a dma_ops bypass mode - misc cleanups * tag 'dma-mapping-5.9' of git://git.infradead.org/users/hch/dma-mapping: dma-contiguous: cleanup dma_alloc_contiguous dma-debug: use named initializers for dir2name powerpc: use the generic dma_ops_bypass mode dma-mapping: add a dma_ops_bypass flag to struct device dma-mapping: make support for dma ops optional dma-mapping: inline the fast path dma-direct calls dma-mapping: move the remaining DMA API calls out of line
2 parents 9fa867d + 274b3f7 commit 2ed90db

File tree

26 files changed

+415
-416
lines changed

26 files changed

+415
-416
lines changed

arch/alpha/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ config ALPHA
77
select ARCH_NO_PREEMPT
88
select ARCH_NO_SG_CHAIN
99
select ARCH_USE_CMPXCHG_LOCKREF
10+
select DMA_OPS if PCI
1011
select FORCE_PCI if !ALPHA_JENSEN
1112
select PCI_DOMAINS if PCI
1213
select PCI_SYSCALL if PCI

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ config ARM
4141
select CPU_PM if SUSPEND || CPU_IDLE
4242
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
4343
select DMA_DECLARE_COHERENT
44+
select DMA_OPS
4445
select DMA_REMAP if MMU
4546
select EDAC_SUPPORT
4647
select EDAC_ATOMIC_SCRUB

arch/ia64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,7 @@ config IA64_SGI_UV
192192

193193
config IA64_HP_SBA_IOMMU
194194
bool "HP SBA IOMMU support"
195+
select DMA_OPS
195196
default y
196197
help
197198
Say Y here to add support for the SBA IOMMU found on HP zx1 and

arch/mips/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -366,6 +366,7 @@ config MACH_JAZZ
366366
select ARC_PROMLIB
367367
select ARCH_MIGHT_HAVE_PC_PARPORT
368368
select ARCH_MIGHT_HAVE_PC_SERIO
369+
select DMA_OPS
369370
select FW_ARC
370371
select FW_ARC32
371372
select ARCH_MAY_HAVE_PC_FDC

arch/parisc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ config PARISC
1414
select ARCH_HAS_UBSAN_SANITIZE_ALL
1515
select ARCH_NO_SG_CHAIN
1616
select ARCH_SUPPORTS_MEMORY_FAILURE
17+
select DMA_OPS
1718
select RTC_CLASS
1819
select RTC_DRV_GENERIC
1920
select INIT_ALL_POSSIBLE

arch/powerpc/Kconfig

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,8 @@ config PPC
151151
select BUILDTIME_TABLE_SORT
152152
select CLONE_BACKWARDS
153153
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
154+
select DMA_OPS if PPC64
155+
select DMA_OPS_BYPASS if PPC64
154156
select DYNAMIC_FTRACE if FUNCTION_TRACER
155157
select EDAC_ATOMIC_SCRUB
156158
select EDAC_SUPPORT

arch/powerpc/include/asm/device.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,6 @@ struct iommu_table;
1818
* drivers/macintosh/macio_asic.c
1919
*/
2020
struct dev_archdata {
21-
/*
22-
* Set to %true if the dma_iommu_ops are requested to use a direct
23-
* window instead of dynamically mapping memory.
24-
*/
25-
bool iommu_bypass : 1;
2621
/*
2722
* These two used to be a union. However, with the hybrid ops we need
2823
* both so here we store both a DMA offset for direct mappings and

arch/powerpc/kernel/dma-iommu.c

Lines changed: 9 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -14,23 +14,6 @@
1414
* Generic iommu implementation
1515
*/
1616

17-
/*
18-
* The coherent mask may be smaller than the real mask, check if we can
19-
* really use a direct window.
20-
*/
21-
static inline bool dma_iommu_alloc_bypass(struct device *dev)
22-
{
23-
return dev->archdata.iommu_bypass && !iommu_fixed_is_weak &&
24-
dma_direct_supported(dev, dev->coherent_dma_mask);
25-
}
26-
27-
static inline bool dma_iommu_map_bypass(struct device *dev,
28-
unsigned long attrs)
29-
{
30-
return dev->archdata.iommu_bypass &&
31-
(!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING));
32-
}
33-
3417
/* Allocates a contiguous real buffer and creates mappings over it.
3518
* Returns the virtual address of the buffer and sets dma_handle
3619
* to the dma address (mapping) of the first page.
@@ -39,8 +22,6 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
3922
dma_addr_t *dma_handle, gfp_t flag,
4023
unsigned long attrs)
4124
{
42-
if (dma_iommu_alloc_bypass(dev))
43-
return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
4425
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
4526
dma_handle, dev->coherent_dma_mask, flag,
4627
dev_to_node(dev));
@@ -50,11 +31,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
5031
void *vaddr, dma_addr_t dma_handle,
5132
unsigned long attrs)
5233
{
53-
if (dma_iommu_alloc_bypass(dev))
54-
dma_direct_free(dev, size, vaddr, dma_handle, attrs);
55-
else
56-
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr,
57-
dma_handle);
34+
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
5835
}
5936

6037
/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -67,9 +44,6 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
6744
enum dma_data_direction direction,
6845
unsigned long attrs)
6946
{
70-
if (dma_iommu_map_bypass(dev, attrs))
71-
return dma_direct_map_page(dev, page, offset, size, direction,
72-
attrs);
7347
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
7448
size, dma_get_mask(dev), direction, attrs);
7549
}
@@ -79,20 +53,15 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
7953
size_t size, enum dma_data_direction direction,
8054
unsigned long attrs)
8155
{
82-
if (!dma_iommu_map_bypass(dev, attrs))
83-
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size,
84-
direction, attrs);
85-
else
86-
dma_direct_unmap_page(dev, dma_handle, size, direction, attrs);
56+
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
57+
attrs);
8758
}
8859

8960

9061
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
9162
int nelems, enum dma_data_direction direction,
9263
unsigned long attrs)
9364
{
94-
if (dma_iommu_map_bypass(dev, attrs))
95-
return dma_direct_map_sg(dev, sglist, nelems, direction, attrs);
9665
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
9766
dma_get_mask(dev), direction, attrs);
9867
}
@@ -101,20 +70,18 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
10170
int nelems, enum dma_data_direction direction,
10271
unsigned long attrs)
10372
{
104-
if (!dma_iommu_map_bypass(dev, attrs))
105-
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
73+
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
10674
direction, attrs);
107-
else
108-
dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs);
10975
}
11076

11177
static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
11278
{
11379
struct pci_dev *pdev = to_pci_dev(dev);
11480
struct pci_controller *phb = pci_bus_to_host(pdev->bus);
11581

116-
return phb->controller_ops.iommu_bypass_supported &&
117-
phb->controller_ops.iommu_bypass_supported(pdev, mask);
82+
if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
83+
return false;
84+
return phb->controller_ops.iommu_bypass_supported(pdev, mask);
11885
}
11986

12087
/* We support DMA to/from any memory page via the iommu */
@@ -123,7 +90,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
12390
struct iommu_table *tbl = get_iommu_table_base(dev);
12491

12592
if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
126-
dev->archdata.iommu_bypass = true;
93+
dev->dma_ops_bypass = true;
12794
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
12895
return 1;
12996
}
@@ -141,7 +108,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
141108
}
142109

143110
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
144-
dev->archdata.iommu_bypass = false;
111+
dev->dma_ops_bypass = false;
145112
return 1;
146113
}
147114

@@ -153,47 +120,12 @@ u64 dma_iommu_get_required_mask(struct device *dev)
153120
if (!tbl)
154121
return 0;
155122

156-
if (dev_is_pci(dev)) {
157-
u64 bypass_mask = dma_direct_get_required_mask(dev);
158-
159-
if (dma_iommu_bypass_supported(dev, bypass_mask))
160-
return bypass_mask;
161-
}
162-
163123
mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
164124
mask += mask - 1;
165125

166126
return mask;
167127
}
168128

169-
static void dma_iommu_sync_for_cpu(struct device *dev, dma_addr_t addr,
170-
size_t size, enum dma_data_direction dir)
171-
{
172-
if (dma_iommu_alloc_bypass(dev))
173-
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
174-
}
175-
176-
static void dma_iommu_sync_for_device(struct device *dev, dma_addr_t addr,
177-
size_t sz, enum dma_data_direction dir)
178-
{
179-
if (dma_iommu_alloc_bypass(dev))
180-
dma_direct_sync_single_for_device(dev, addr, sz, dir);
181-
}
182-
183-
extern void dma_iommu_sync_sg_for_cpu(struct device *dev,
184-
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
185-
{
186-
if (dma_iommu_alloc_bypass(dev))
187-
dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
188-
}
189-
190-
extern void dma_iommu_sync_sg_for_device(struct device *dev,
191-
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
192-
{
193-
if (dma_iommu_alloc_bypass(dev))
194-
dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
195-
}
196-
197129
const struct dma_map_ops dma_iommu_ops = {
198130
.alloc = dma_iommu_alloc_coherent,
199131
.free = dma_iommu_free_coherent,
@@ -203,10 +135,6 @@ const struct dma_map_ops dma_iommu_ops = {
203135
.map_page = dma_iommu_map_page,
204136
.unmap_page = dma_iommu_unmap_page,
205137
.get_required_mask = dma_iommu_get_required_mask,
206-
.sync_single_for_cpu = dma_iommu_sync_for_cpu,
207-
.sync_single_for_device = dma_iommu_sync_for_device,
208-
.sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu,
209-
.sync_sg_for_device = dma_iommu_sync_sg_for_device,
210138
.mmap = dma_common_mmap,
211139
.get_sgtable = dma_common_get_sgtable,
212140
};

arch/s390/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ config S390
112112
select ARCH_WANT_IPC_PARSE_VERSION
113113
select BUILDTIME_TABLE_SORT
114114
select CLONE_BACKWARDS2
115+
select DMA_OPS if PCI
115116
select DYNAMIC_FTRACE if FUNCTION_TRACER
116117
select GENERIC_CLOCKEVENTS
117118
select GENERIC_CPU_AUTOPROBE

arch/sparc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ config SPARC
1515
default y
1616
select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
1717
select ARCH_MIGHT_HAVE_PC_SERIO
18+
select DMA_OPS
1819
select OF
1920
select OF_PROMTREE
2021
select HAVE_ASM_MODVERSIONS

0 commit comments

Comments
 (0)