1
1
/* SPDX-License-Identifier: GPL-2.0 */
2
+ /*
3
+ * Internals of the DMA direct mapping implementation. Only for use by the
4
+ * DMA mapping code and IOMMU drivers.
5
+ */
2
6
#ifndef _LINUX_DMA_DIRECT_H
3
7
#define _LINUX_DMA_DIRECT_H 1
4
8
5
9
#include <linux/dma-mapping.h>
10
+ #include <linux/dma-noncoherent.h>
6
11
#include <linux/memblock.h> /* for min_low_pfn */
7
12
#include <linux/mem_encrypt.h>
13
+ #include <linux/swiotlb.h>
8
14
9
15
extern unsigned int zone_dma_bits ;
10
16
@@ -87,25 +93,17 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
87
93
unsigned long attrs );
88
94
int dma_direct_supported (struct device * dev , u64 mask );
89
95
bool dma_direct_need_sync (struct device * dev , dma_addr_t dma_addr );
90
- dma_addr_t dma_direct_map_page (struct device * dev , struct page * page ,
91
- unsigned long offset , size_t size , enum dma_data_direction dir ,
92
- unsigned long attrs );
93
96
int dma_direct_map_sg (struct device * dev , struct scatterlist * sgl , int nents ,
94
97
enum dma_data_direction dir , unsigned long attrs );
95
98
dma_addr_t dma_direct_map_resource (struct device * dev , phys_addr_t paddr ,
96
99
size_t size , enum dma_data_direction dir , unsigned long attrs );
100
+ size_t dma_direct_max_mapping_size (struct device * dev );
97
101
98
102
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE ) || \
99
103
defined(CONFIG_SWIOTLB )
100
- void dma_direct_sync_single_for_device (struct device * dev ,
101
- dma_addr_t addr , size_t size , enum dma_data_direction dir );
102
- void dma_direct_sync_sg_for_device (struct device * dev ,
103
- struct scatterlist * sgl , int nents , enum dma_data_direction dir );
104
+ void dma_direct_sync_sg_for_device (struct device * dev , struct scatterlist * sgl ,
105
+ int nents , enum dma_data_direction dir );
104
106
#else
105
- static inline void dma_direct_sync_single_for_device (struct device * dev ,
106
- dma_addr_t addr , size_t size , enum dma_data_direction dir )
107
- {
108
- }
109
107
static inline void dma_direct_sync_sg_for_device (struct device * dev ,
110
108
struct scatterlist * sgl , int nents , enum dma_data_direction dir )
111
109
{
@@ -115,34 +113,82 @@ static inline void dma_direct_sync_sg_for_device(struct device *dev,
115
113
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU ) || \
116
114
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL ) || \
117
115
defined(CONFIG_SWIOTLB )
118
- void dma_direct_unmap_page (struct device * dev , dma_addr_t addr ,
119
- size_t size , enum dma_data_direction dir , unsigned long attrs );
120
116
void dma_direct_unmap_sg (struct device * dev , struct scatterlist * sgl ,
121
117
int nents , enum dma_data_direction dir , unsigned long attrs );
122
- void dma_direct_sync_single_for_cpu (struct device * dev ,
123
- dma_addr_t addr , size_t size , enum dma_data_direction dir );
124
118
void dma_direct_sync_sg_for_cpu (struct device * dev ,
125
119
struct scatterlist * sgl , int nents , enum dma_data_direction dir );
126
120
#else
127
- static inline void dma_direct_unmap_page (struct device * dev , dma_addr_t addr ,
128
- size_t size , enum dma_data_direction dir , unsigned long attrs )
129
- {
130
- }
131
121
static inline void dma_direct_unmap_sg (struct device * dev ,
132
122
struct scatterlist * sgl , int nents , enum dma_data_direction dir ,
133
123
unsigned long attrs )
134
124
{
135
125
}
126
+ static inline void dma_direct_sync_sg_for_cpu (struct device * dev ,
127
+ struct scatterlist * sgl , int nents , enum dma_data_direction dir )
128
+ {
129
+ }
130
+ #endif
131
+
132
+ static inline void dma_direct_sync_single_for_device (struct device * dev ,
133
+ dma_addr_t addr , size_t size , enum dma_data_direction dir )
134
+ {
135
+ phys_addr_t paddr = dma_to_phys (dev , addr );
136
+
137
+ if (unlikely (is_swiotlb_buffer (paddr )))
138
+ swiotlb_tbl_sync_single (dev , paddr , size , dir , SYNC_FOR_DEVICE );
139
+
140
+ if (!dev_is_dma_coherent (dev ))
141
+ arch_sync_dma_for_device (paddr , size , dir );
142
+ }
143
+
136
144
static inline void dma_direct_sync_single_for_cpu (struct device * dev ,
137
145
dma_addr_t addr , size_t size , enum dma_data_direction dir )
138
146
{
147
+ phys_addr_t paddr = dma_to_phys (dev , addr );
148
+
149
+ if (!dev_is_dma_coherent (dev )) {
150
+ arch_sync_dma_for_cpu (paddr , size , dir );
151
+ arch_sync_dma_for_cpu_all ();
152
+ }
153
+
154
+ if (unlikely (is_swiotlb_buffer (paddr )))
155
+ swiotlb_tbl_sync_single (dev , paddr , size , dir , SYNC_FOR_CPU );
139
156
}
140
- static inline void dma_direct_sync_sg_for_cpu (struct device * dev ,
141
- struct scatterlist * sgl , int nents , enum dma_data_direction dir )
157
+
158
+ static inline dma_addr_t dma_direct_map_page (struct device * dev ,
159
+ struct page * page , unsigned long offset , size_t size ,
160
+ enum dma_data_direction dir , unsigned long attrs )
142
161
{
162
+ phys_addr_t phys = page_to_phys (page ) + offset ;
163
+ dma_addr_t dma_addr = phys_to_dma (dev , phys );
164
+
165
+ if (unlikely (swiotlb_force == SWIOTLB_FORCE ))
166
+ return swiotlb_map (dev , phys , size , dir , attrs );
167
+
168
+ if (unlikely (!dma_capable (dev , dma_addr , size , true))) {
169
+ if (swiotlb_force != SWIOTLB_NO_FORCE )
170
+ return swiotlb_map (dev , phys , size , dir , attrs );
171
+
172
+ dev_WARN_ONCE (dev , 1 ,
173
+ "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n" ,
174
+ & dma_addr , size , * dev -> dma_mask , dev -> bus_dma_limit );
175
+ return DMA_MAPPING_ERROR ;
176
+ }
177
+
178
+ if (!dev_is_dma_coherent (dev ) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC ))
179
+ arch_sync_dma_for_device (phys , size , dir );
180
+ return dma_addr ;
143
181
}
144
- #endif
145
182
146
- size_t dma_direct_max_mapping_size (struct device * dev );
183
+ static inline void dma_direct_unmap_page (struct device * dev , dma_addr_t addr ,
184
+ size_t size , enum dma_data_direction dir , unsigned long attrs )
185
+ {
186
+ phys_addr_t phys = dma_to_phys (dev , addr );
147
187
188
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC ))
189
+ dma_direct_sync_single_for_cpu (dev , addr , size , dir );
190
+
191
+ if (unlikely (is_swiotlb_buffer (phys )))
192
+ swiotlb_tbl_unmap_single (dev , phys , size , size , dir , attrs );
193
+ }
148
194
#endif /* _LINUX_DMA_DIRECT_H */
0 commit comments