Skip to content

Commit 009fbfc

Browse files
committed
Merge tag 'dma-mapping-6.7-2023-10-30' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - get rid of the fake support for coherent DMA allocation on coldfire with caches (Christoph Hellwig) - add a few Kconfig dependencies so that Kconfig catches the use of invalid configurations (Christoph Hellwig) - fix a type in dma-debug output (Chuck Lever) - rewrite a comment in swiotlb (Sean Christopherson) * tag 'dma-mapping-6.7-2023-10-30' of git://git.infradead.org/users/hch/dma-mapping: dma-debug: Fix a typo in a debugging eye-catcher swiotlb: rewrite comment explaining why the source is preserved on DMA_FROM_DEVICE m68k: remove unused includes from dma.c m68k: don't provide arch_dma_alloc for nommu/coldfire net: fec: use dma_alloc_noncoherent for data cache enabled coldfire m68k: use the coherent DMA code for coldfire without data cache dma-direct: warn when coherent allocations aren't supported dma-direct: simplify the use atomic pool logic in dma_direct_alloc dma-direct: add a CONFIG_ARCH_HAS_DMA_ALLOC symbol dma-direct: add dependencies to CONFIG_DMA_GLOBAL_POOL
2 parents 3c86a44 + 36d91e8 commit 009fbfc

File tree

11 files changed

+127
-77
lines changed

11 files changed

+127
-77
lines changed

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ config ARM
88
select ARCH_HAS_CPU_FINALIZE_INIT if MMU
99
select ARCH_HAS_CURRENT_STACK_POINTER
1010
select ARCH_HAS_DEBUG_VIRTUAL if MMU
11+
select ARCH_HAS_DMA_ALLOC if MMU
1112
select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
1213
select ARCH_HAS_ELF_RANDOMIZE
1314
select ARCH_HAS_FORTIFY_SOURCE

arch/m68k/Kconfig

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,15 +6,15 @@ config M68K
66
select ARCH_HAS_BINFMT_FLAT
77
select ARCH_HAS_CPU_FINALIZE_INIT if MMU
88
select ARCH_HAS_CURRENT_STACK_POINTER
9-
select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
10-
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
9+
select ARCH_HAS_DMA_PREP_COHERENT if M68K_NONCOHERENT_DMA && !COLDFIRE
10+
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if M68K_NONCOHERENT_DMA
1111
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
1212
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
1313
select ARCH_NO_PREEMPT if !COLDFIRE
1414
select ARCH_USE_MEMTEST if MMU_MOTOROLA
1515
select ARCH_WANT_IPC_PARSE_VERSION
1616
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
17-
select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE
17+
select DMA_DIRECT_REMAP if M68K_NONCOHERENT_DMA && !COLDFIRE
1818
select GENERIC_ATOMIC64
1919
select GENERIC_CPU_DEVICES
2020
select GENERIC_IOMAP

arch/m68k/Kconfig.cpu

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -535,3 +535,15 @@ config CACHE_COPYBACK
535535
The ColdFire CPU cache is set into Copy-back mode.
536536
endchoice
537537
endif # HAVE_CACHE_CB
538+
539+
# Coldfire cores that do not have a data cache configured can do coherent DMA.
540+
config COLDFIRE_COHERENT_DMA
541+
bool
542+
default y
543+
depends on COLDFIRE
544+
depends on !HAVE_CACHE_CB && !CACHE_D && !CACHE_BOTH
545+
546+
config M68K_NONCOHERENT_DMA
547+
bool
548+
default y
549+
depends on HAS_DMA && !COLDFIRE_COHERENT_DMA

arch/m68k/kernel/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
2323
obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
2424
obj-$(CONFIG_PCI) += pcibios.o
2525

26-
obj-$(CONFIG_HAS_DMA) += dma.o
26+
obj-$(CONFIG_M68K_NONCOHERENT_DMA) += dma.o
2727

2828
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
2929
obj-$(CONFIG_BOOTINFO_PROC) += bootinfo_proc.o

arch/m68k/kernel/dma.c

Lines changed: 1 addition & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -4,20 +4,11 @@
44
* for more details.
55
*/
66

7-
#undef DEBUG
8-
97
#include <linux/dma-map-ops.h>
10-
#include <linux/device.h>
118
#include <linux/kernel.h>
12-
#include <linux/platform_device.h>
13-
#include <linux/scatterlist.h>
14-
#include <linux/slab.h>
15-
#include <linux/vmalloc.h>
16-
#include <linux/export.h>
17-
189
#include <asm/cacheflush.h>
1910

20-
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
11+
#ifndef CONFIG_COLDFIRE
2112
void arch_dma_prep_coherent(struct page *page, size_t size)
2213
{
2314
cache_push(page_to_phys(page), size);
@@ -33,29 +24,6 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot)
3324
}
3425
return prot;
3526
}
36-
#else
37-
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
38-
gfp_t gfp, unsigned long attrs)
39-
{
40-
void *ret;
41-
42-
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
43-
gfp |= GFP_DMA;
44-
ret = (void *)__get_free_pages(gfp, get_order(size));
45-
46-
if (ret != NULL) {
47-
memset(ret, 0, size);
48-
*dma_handle = virt_to_phys(ret);
49-
}
50-
return ret;
51-
}
52-
53-
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
54-
dma_addr_t dma_handle, unsigned long attrs)
55-
{
56-
free_pages((unsigned long)vaddr, get_order(size));
57-
}
58-
5927
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
6028

6129
void arch_sync_dma_for_device(phys_addr_t handle, size_t size,

arch/parisc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ config PARISC
88
select HAVE_FUNCTION_GRAPH_TRACER
99
select HAVE_SYSCALL_TRACEPOINTS
1010
select ARCH_WANT_FRAME_POINTERS
11+
select ARCH_HAS_DMA_ALLOC if PA11
1112
select ARCH_HAS_ELF_RANDOMIZE
1213
select ARCH_HAS_STRICT_KERNEL_RWX
1314
select ARCH_HAS_STRICT_MODULE_RWX

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 76 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -363,6 +363,70 @@ static void fec_dump(struct net_device *ndev)
363363
} while (bdp != txq->bd.base);
364364
}
365365

366+
/*
367+
* Coldfire does not support DMA coherent allocations, and has historically used
368+
* a band-aid with a manual flush in fec_enet_rx_queue.
369+
*/
370+
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
371+
static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
372+
gfp_t gfp)
373+
{
374+
return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
375+
}
376+
377+
static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
378+
dma_addr_t handle)
379+
{
380+
dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL);
381+
}
382+
#else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
383+
static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
384+
gfp_t gfp)
385+
{
386+
return dma_alloc_coherent(dev, size, handle, gfp);
387+
}
388+
389+
static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
390+
dma_addr_t handle)
391+
{
392+
dma_free_coherent(dev, size, cpu_addr, handle);
393+
}
394+
#endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
395+
396+
struct fec_dma_devres {
397+
size_t size;
398+
void *vaddr;
399+
dma_addr_t dma_handle;
400+
};
401+
402+
static void fec_dmam_release(struct device *dev, void *res)
403+
{
404+
struct fec_dma_devres *this = res;
405+
406+
fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
407+
}
408+
409+
static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle,
410+
gfp_t gfp)
411+
{
412+
struct fec_dma_devres *dr;
413+
void *vaddr;
414+
415+
dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp);
416+
if (!dr)
417+
return NULL;
418+
vaddr = fec_dma_alloc(dev, size, handle, gfp);
419+
if (!vaddr) {
420+
devres_free(dr);
421+
return NULL;
422+
}
423+
dr->vaddr = vaddr;
424+
dr->dma_handle = *handle;
425+
dr->size = size;
426+
devres_add(dev, dr);
427+
return vaddr;
428+
}
429+
366430
static inline bool is_ipv4_pkt(struct sk_buff *skb)
367431
{
368432
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
@@ -1617,7 +1681,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
16171681
}
16181682
#endif
16191683

1620-
#ifdef CONFIG_M532x
1684+
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
1685+
/*
1686+
* Hacky flush of all caches instead of using the DMA API for the TSO
1687+
* headers.
1688+
*/
16211689
flush_cache_all();
16221690
#endif
16231691
rxq = fep->rx_queue[queue_id];
@@ -3243,10 +3311,9 @@ static void fec_enet_free_queue(struct net_device *ndev)
32433311
for (i = 0; i < fep->num_tx_queues; i++)
32443312
if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
32453313
txq = fep->tx_queue[i];
3246-
dma_free_coherent(&fep->pdev->dev,
3247-
txq->bd.ring_size * TSO_HEADER_SIZE,
3248-
txq->tso_hdrs,
3249-
txq->tso_hdrs_dma);
3314+
fec_dma_free(&fep->pdev->dev,
3315+
txq->bd.ring_size * TSO_HEADER_SIZE,
3316+
txq->tso_hdrs, txq->tso_hdrs_dma);
32503317
}
32513318

32523319
for (i = 0; i < fep->num_rx_queues; i++)
@@ -3276,10 +3343,9 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
32763343
txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
32773344
txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
32783345

3279-
txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
3346+
txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev,
32803347
txq->bd.ring_size * TSO_HEADER_SIZE,
3281-
&txq->tso_hdrs_dma,
3282-
GFP_KERNEL);
3348+
&txq->tso_hdrs_dma, GFP_KERNEL);
32833349
if (!txq->tso_hdrs) {
32843350
ret = -ENOMEM;
32853351
goto alloc_failed;
@@ -3998,8 +4064,8 @@ static int fec_enet_init(struct net_device *ndev)
39984064
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
39994065

40004066
/* Allocate memory for buffer descriptors. */
4001-
cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
4002-
GFP_KERNEL);
4067+
cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma,
4068+
GFP_KERNEL);
40034069
if (!cbd_base) {
40044070
ret = -ENOMEM;
40054071
goto free_queue_mem;

kernel/dma/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,13 +135,24 @@ config DMA_COHERENT_POOL
135135

136136
config DMA_GLOBAL_POOL
137137
select DMA_DECLARE_COHERENT
138+
depends on !ARCH_HAS_DMA_SET_UNCACHED
139+
depends on !DMA_DIRECT_REMAP
138140
bool
139141

140142
config DMA_DIRECT_REMAP
141143
bool
142144
select DMA_COHERENT_POOL
143145
select DMA_NONCOHERENT_MMAP
144146

147+
#
148+
# Fallback to arch code for DMA allocations. This should eventually go away.
149+
#
150+
config ARCH_HAS_DMA_ALLOC
151+
depends on !ARCH_HAS_DMA_SET_UNCACHED
152+
depends on !DMA_DIRECT_REMAP
153+
depends on !DMA_GLOBAL_POOL
154+
bool
155+
145156
config DMA_CMA
146157
bool "DMA Contiguous Memory Allocator"
147158
depends on HAVE_DMA_CONTIGUOUS && CMA

kernel/dma/debug.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ static const char *const maperr2str[] = {
139139

140140
static const char *type2name[] = {
141141
[dma_debug_single] = "single",
142-
[dma_debug_sg] = "scather-gather",
142+
[dma_debug_sg] = "scatter-gather",
143143
[dma_debug_coherent] = "coherent",
144144
[dma_debug_resource] = "resource",
145145
};

kernel/dma/direct.c

Lines changed: 13 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -220,13 +220,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
220220
return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
221221

222222
if (!dev_is_dma_coherent(dev)) {
223-
/*
224-
* Fallback to the arch handler if it exists. This should
225-
* eventually go away.
226-
*/
227-
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
228-
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
229-
!IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
223+
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
230224
!is_swiotlb_for_alloc(dev))
231225
return arch_dma_alloc(dev, size, dma_handle, gfp,
232226
attrs);
@@ -240,27 +234,24 @@ void *dma_direct_alloc(struct device *dev, size_t size,
240234
dma_handle);
241235

242236
/*
243-
* Otherwise remap if the architecture is asking for it. But
244-
* given that remapping memory is a blocking operation we'll
245-
* instead have to dip into the atomic pools.
237+
* Otherwise we require the architecture to either be able to
238+
* mark arbitrary parts of the kernel direct mapping uncached,
239+
* or remapped it uncached.
246240
*/
241+
set_uncached = IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED);
247242
remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
248-
if (remap) {
249-
if (dma_direct_use_pool(dev, gfp))
250-
return dma_direct_alloc_from_pool(dev, size,
251-
dma_handle, gfp);
252-
} else {
253-
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
254-
return NULL;
255-
set_uncached = true;
243+
if (!set_uncached && !remap) {
244+
pr_warn_once("coherent DMA allocations not supported on this platform.\n");
245+
return NULL;
256246
}
257247
}
258248

259249
/*
260-
* Decrypting memory may block, so allocate the memory from the atomic
261-
* pools if we can't block.
250+
* Remapping or decrypting memory may block, allocate the memory from
251+
* the atomic pools instead if we aren't allowed block.
262252
*/
263-
if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
253+
if ((remap || force_dma_unencrypted(dev)) &&
254+
dma_direct_use_pool(dev, gfp))
264255
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
265256

266257
/* we always manually zero the memory once we are done */
@@ -330,9 +321,7 @@ void dma_direct_free(struct device *dev, size_t size,
330321
return;
331322
}
332323

333-
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
334-
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
335-
!IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
324+
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
336325
!dev_is_dma_coherent(dev) &&
337326
!is_swiotlb_for_alloc(dev)) {
338327
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);

0 commit comments

Comments
 (0)