Skip to content

Commit 19c65c3

Browse files
author
Christoph Hellwig
committed
dma-mapping: move large parts of <linux/dma-direct.h> to kernel/dma
Most of the dma_direct symbols should only be used by direct.c and mapping.c, so move them to kernel/dma. In fact more of dma-direct.h should eventually move, but that will require more coordination with other subsystems. Signed-off-by: Christoph Hellwig <[email protected]>
1 parent a1fd09e commit 19c65c3

File tree

4 files changed

+121
-108
lines changed

4 files changed

+121
-108
lines changed

include/linux/dma-direct.h

Lines changed: 0 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -120,114 +120,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
120120
void dma_direct_free_pages(struct device *dev, size_t size,
121121
struct page *page, dma_addr_t dma_addr,
122122
enum dma_data_direction dir);
123-
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
124-
void *cpu_addr, dma_addr_t dma_addr, size_t size,
125-
unsigned long attrs);
126-
bool dma_direct_can_mmap(struct device *dev);
127-
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
128-
void *cpu_addr, dma_addr_t dma_addr, size_t size,
129-
unsigned long attrs);
130123
int dma_direct_supported(struct device *dev, u64 mask);
131-
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
132-
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
133-
enum dma_data_direction dir, unsigned long attrs);
134124
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
135125
size_t size, enum dma_data_direction dir, unsigned long attrs);
136-
size_t dma_direct_max_mapping_size(struct device *dev);
137126

138-
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
139-
defined(CONFIG_SWIOTLB)
140-
void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
141-
int nents, enum dma_data_direction dir);
142-
#else
143-
static inline void dma_direct_sync_sg_for_device(struct device *dev,
144-
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
145-
{
146-
}
147-
#endif
148-
149-
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
150-
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
151-
defined(CONFIG_SWIOTLB)
152-
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
153-
int nents, enum dma_data_direction dir, unsigned long attrs);
154-
void dma_direct_sync_sg_for_cpu(struct device *dev,
155-
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
156-
#else
157-
static inline void dma_direct_unmap_sg(struct device *dev,
158-
struct scatterlist *sgl, int nents, enum dma_data_direction dir,
159-
unsigned long attrs)
160-
{
161-
}
162-
static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
163-
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
164-
{
165-
}
166-
#endif
167-
168-
static inline void dma_direct_sync_single_for_device(struct device *dev,
169-
dma_addr_t addr, size_t size, enum dma_data_direction dir)
170-
{
171-
phys_addr_t paddr = dma_to_phys(dev, addr);
172-
173-
if (unlikely(is_swiotlb_buffer(paddr)))
174-
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
175-
176-
if (!dev_is_dma_coherent(dev))
177-
arch_sync_dma_for_device(paddr, size, dir);
178-
}
179-
180-
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
181-
dma_addr_t addr, size_t size, enum dma_data_direction dir)
182-
{
183-
phys_addr_t paddr = dma_to_phys(dev, addr);
184-
185-
if (!dev_is_dma_coherent(dev)) {
186-
arch_sync_dma_for_cpu(paddr, size, dir);
187-
arch_sync_dma_for_cpu_all();
188-
}
189-
190-
if (unlikely(is_swiotlb_buffer(paddr)))
191-
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
192-
193-
if (dir == DMA_FROM_DEVICE)
194-
arch_dma_mark_clean(paddr, size);
195-
}
196-
197-
static inline dma_addr_t dma_direct_map_page(struct device *dev,
198-
struct page *page, unsigned long offset, size_t size,
199-
enum dma_data_direction dir, unsigned long attrs)
200-
{
201-
phys_addr_t phys = page_to_phys(page) + offset;
202-
dma_addr_t dma_addr = phys_to_dma(dev, phys);
203-
204-
if (unlikely(swiotlb_force == SWIOTLB_FORCE))
205-
return swiotlb_map(dev, phys, size, dir, attrs);
206-
207-
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
208-
if (swiotlb_force != SWIOTLB_NO_FORCE)
209-
return swiotlb_map(dev, phys, size, dir, attrs);
210-
211-
dev_WARN_ONCE(dev, 1,
212-
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
213-
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
214-
return DMA_MAPPING_ERROR;
215-
}
216-
217-
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
218-
arch_sync_dma_for_device(phys, size, dir);
219-
return dma_addr;
220-
}
221-
222-
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
223-
size_t size, enum dma_data_direction dir, unsigned long attrs)
224-
{
225-
phys_addr_t phys = dma_to_phys(dev, addr);
226-
227-
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
228-
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
229-
230-
if (unlikely(is_swiotlb_buffer(phys)))
231-
swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
232-
}
233127
#endif /* _LINUX_DMA_DIRECT_H */

kernel/dma/direct.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77
#include <linux/memblock.h> /* for max_pfn */
88
#include <linux/export.h>
99
#include <linux/mm.h>
10-
#include <linux/dma-direct.h>
1110
#include <linux/dma-map-ops.h>
1211
#include <linux/scatterlist.h>
1312
#include <linux/pfn.h>
1413
#include <linux/vmalloc.h>
1514
#include <linux/set_memory.h>
1615
#include <linux/slab.h>
16+
#include "direct.h"
1717

1818
/*
1919
* Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it

kernel/dma/direct.h

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2018 Christoph Hellwig.
4+
*
5+
* DMA operations that map physical memory directly without using an IOMMU.
6+
*/
7+
#ifndef _KERNEL_DMA_DIRECT_H
8+
#define _KERNEL_DMA_DIRECT_H
9+
10+
#include <linux/dma-direct.h>
11+
12+
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
13+
void *cpu_addr, dma_addr_t dma_addr, size_t size,
14+
unsigned long attrs);
15+
bool dma_direct_can_mmap(struct device *dev);
16+
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
17+
void *cpu_addr, dma_addr_t dma_addr, size_t size,
18+
unsigned long attrs);
19+
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
20+
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
21+
enum dma_data_direction dir, unsigned long attrs);
22+
size_t dma_direct_max_mapping_size(struct device *dev);
23+
24+
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
25+
defined(CONFIG_SWIOTLB)
26+
void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
27+
int nents, enum dma_data_direction dir);
28+
#else
29+
static inline void dma_direct_sync_sg_for_device(struct device *dev,
30+
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
31+
{
32+
}
33+
#endif
34+
35+
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
36+
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
37+
defined(CONFIG_SWIOTLB)
38+
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
39+
int nents, enum dma_data_direction dir, unsigned long attrs);
40+
void dma_direct_sync_sg_for_cpu(struct device *dev,
41+
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
42+
#else
43+
static inline void dma_direct_unmap_sg(struct device *dev,
44+
struct scatterlist *sgl, int nents, enum dma_data_direction dir,
45+
unsigned long attrs)
46+
{
47+
}
48+
static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
49+
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
50+
{
51+
}
52+
#endif
53+
54+
static inline void dma_direct_sync_single_for_device(struct device *dev,
55+
dma_addr_t addr, size_t size, enum dma_data_direction dir)
56+
{
57+
phys_addr_t paddr = dma_to_phys(dev, addr);
58+
59+
if (unlikely(is_swiotlb_buffer(paddr)))
60+
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
61+
62+
if (!dev_is_dma_coherent(dev))
63+
arch_sync_dma_for_device(paddr, size, dir);
64+
}
65+
66+
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
67+
dma_addr_t addr, size_t size, enum dma_data_direction dir)
68+
{
69+
phys_addr_t paddr = dma_to_phys(dev, addr);
70+
71+
if (!dev_is_dma_coherent(dev)) {
72+
arch_sync_dma_for_cpu(paddr, size, dir);
73+
arch_sync_dma_for_cpu_all();
74+
}
75+
76+
if (unlikely(is_swiotlb_buffer(paddr)))
77+
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
78+
79+
if (dir == DMA_FROM_DEVICE)
80+
arch_dma_mark_clean(paddr, size);
81+
}
82+
83+
static inline dma_addr_t dma_direct_map_page(struct device *dev,
84+
struct page *page, unsigned long offset, size_t size,
85+
enum dma_data_direction dir, unsigned long attrs)
86+
{
87+
phys_addr_t phys = page_to_phys(page) + offset;
88+
dma_addr_t dma_addr = phys_to_dma(dev, phys);
89+
90+
if (unlikely(swiotlb_force == SWIOTLB_FORCE))
91+
return swiotlb_map(dev, phys, size, dir, attrs);
92+
93+
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
94+
if (swiotlb_force != SWIOTLB_NO_FORCE)
95+
return swiotlb_map(dev, phys, size, dir, attrs);
96+
97+
dev_WARN_ONCE(dev, 1,
98+
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
99+
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
100+
return DMA_MAPPING_ERROR;
101+
}
102+
103+
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
104+
arch_sync_dma_for_device(phys, size, dir);
105+
return dma_addr;
106+
}
107+
108+
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
109+
size_t size, enum dma_data_direction dir, unsigned long attrs)
110+
{
111+
phys_addr_t phys = dma_to_phys(dev, addr);
112+
113+
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
114+
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
115+
116+
if (unlikely(is_swiotlb_buffer(phys)))
117+
swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
118+
}
119+
#endif /* _KERNEL_DMA_DIRECT_H */

kernel/dma/mapping.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,14 @@
77
*/
88
#include <linux/memblock.h> /* for max_pfn */
99
#include <linux/acpi.h>
10-
#include <linux/dma-direct.h>
1110
#include <linux/dma-map-ops.h>
1211
#include <linux/export.h>
1312
#include <linux/gfp.h>
1413
#include <linux/of_device.h>
1514
#include <linux/slab.h>
1615
#include <linux/vmalloc.h>
1716
#include "debug.h"
17+
#include "direct.h"
1818

1919
/*
2020
* Managed DMA API

0 commit comments

Comments
 (0)