|
36 | 36 | #include <xen/hvc-console.h>
|
37 | 37 |
|
38 | 38 | #include <asm/dma-mapping.h>
|
39 |
| -#include <asm/xen/page-coherent.h> |
40 | 39 |
|
41 | 40 | #include <trace/events/swiotlb.h>
|
42 | 41 | #define MAX_DMA_BITS 32
|
@@ -104,6 +103,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
|
104 | 103 | return 0;
|
105 | 104 | }
|
106 | 105 |
|
| 106 | +#ifdef CONFIG_X86 |
107 | 107 | int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
|
108 | 108 | {
|
109 | 109 | int rc;
|
@@ -131,94 +131,58 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
|
131 | 131 | }
|
132 | 132 |
|
133 | 133 | static void *
|
134 |
| -xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
135 |
| - dma_addr_t *dma_handle, gfp_t flags, |
136 |
| - unsigned long attrs) |
| 134 | +xen_swiotlb_alloc_coherent(struct device *dev, size_t size, |
| 135 | + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) |
137 | 136 | {
|
138 |
| - void *ret; |
| 137 | + u64 dma_mask = dev->coherent_dma_mask; |
139 | 138 | int order = get_order(size);
|
140 |
| - u64 dma_mask = DMA_BIT_MASK(32); |
141 | 139 | phys_addr_t phys;
|
142 |
| - dma_addr_t dev_addr; |
143 |
| - |
144 |
| - /* |
145 |
| - * Ignore region specifiers - the kernel's ideas of |
146 |
| - * pseudo-phys memory layout has nothing to do with the |
147 |
| - * machine physical layout. We can't allocate highmem |
148 |
| - * because we can't return a pointer to it. |
149 |
| - */ |
150 |
| - flags &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 140 | + void *ret; |
151 | 141 |
|
152 |
| - /* Convert the size to actually allocated. */ |
| 142 | + /* Align the allocation to the Xen page size */ |
153 | 143 | size = 1UL << (order + XEN_PAGE_SHIFT);
|
154 | 144 |
|
155 |
| - /* On ARM this function returns an ioremap'ped virtual address for |
156 |
| - * which virt_to_phys doesn't return the corresponding physical |
157 |
| - * address. In fact on ARM virt_to_phys only works for kernel direct |
158 |
| - * mapped RAM memory. Also see comment below. |
159 |
| - */ |
160 |
| - ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); |
161 |
| - |
| 145 | + ret = (void *)__get_free_pages(flags, get_order(size)); |
162 | 146 | if (!ret)
|
163 | 147 | return ret;
|
164 |
| - |
165 |
| - if (hwdev && hwdev->coherent_dma_mask) |
166 |
| - dma_mask = hwdev->coherent_dma_mask; |
167 |
| - |
168 |
| - /* At this point dma_handle is the dma address, next we are |
169 |
| - * going to set it to the machine address. |
170 |
| - * Do not use virt_to_phys(ret) because on ARM it doesn't correspond |
171 |
| - * to *dma_handle. */ |
172 |
| - phys = dma_to_phys(hwdev, *dma_handle); |
173 |
| - dev_addr = xen_phys_to_dma(hwdev, phys); |
174 |
| - if (((dev_addr + size - 1 <= dma_mask)) && |
175 |
| - !range_straddles_page_boundary(phys, size)) |
176 |
| - *dma_handle = dev_addr; |
177 |
| - else { |
178 |
| - if (xen_create_contiguous_region(phys, order, |
179 |
| - fls64(dma_mask), dma_handle) != 0) { |
180 |
| - xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); |
181 |
| - return NULL; |
182 |
| - } |
183 |
| - *dma_handle = phys_to_dma(hwdev, *dma_handle); |
| 148 | + phys = virt_to_phys(ret); |
| 149 | + |
| 150 | + *dma_handle = xen_phys_to_dma(dev, phys); |
| 151 | + if (*dma_handle + size - 1 > dma_mask || |
| 152 | + range_straddles_page_boundary(phys, size)) { |
| 153 | + if (xen_create_contiguous_region(phys, order, fls64(dma_mask), |
| 154 | + dma_handle) != 0) |
| 155 | + goto out_free_pages; |
184 | 156 | SetPageXenRemapped(virt_to_page(ret));
|
185 | 157 | }
|
| 158 | + |
186 | 159 | memset(ret, 0, size);
|
187 | 160 | return ret;
|
| 161 | + |
| 162 | +out_free_pages: |
| 163 | + free_pages((unsigned long)ret, get_order(size)); |
| 164 | + return NULL; |
188 | 165 | }
|
189 | 166 |
|
190 | 167 | static void
|
191 |
| -xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
192 |
| - dma_addr_t dev_addr, unsigned long attrs) |
| 168 | +xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, |
| 169 | + dma_addr_t dma_handle, unsigned long attrs) |
193 | 170 | {
|
| 171 | + phys_addr_t phys = virt_to_phys(vaddr); |
194 | 172 | int order = get_order(size);
|
195 |
| - phys_addr_t phys; |
196 |
| - u64 dma_mask = DMA_BIT_MASK(32); |
197 |
| - struct page *page; |
198 |
| - |
199 |
| - if (hwdev && hwdev->coherent_dma_mask) |
200 |
| - dma_mask = hwdev->coherent_dma_mask; |
201 |
| - |
202 |
| - /* do not use virt_to_phys because on ARM it doesn't return you the |
203 |
| - * physical address */ |
204 |
| - phys = xen_dma_to_phys(hwdev, dev_addr); |
205 | 173 |
|
206 | 174 | /* Convert the size to actually allocated. */
|
207 | 175 | size = 1UL << (order + XEN_PAGE_SHIFT);
|
208 | 176 |
|
209 |
| - if (is_vmalloc_addr(vaddr)) |
210 |
| - page = vmalloc_to_page(vaddr); |
211 |
| - else |
212 |
| - page = virt_to_page(vaddr); |
| 177 | + if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) || |
| 178 | + WARN_ON_ONCE(range_straddles_page_boundary(phys, size))) |
| 179 | + return; |
213 | 180 |
|
214 |
| - if (!WARN_ON((dev_addr + size - 1 > dma_mask) || |
215 |
| - range_straddles_page_boundary(phys, size)) && |
216 |
| - TestClearPageXenRemapped(page)) |
| 181 | + if (TestClearPageXenRemapped(virt_to_page(vaddr))) |
217 | 182 | xen_destroy_contiguous_region(phys, order);
|
218 |
| - |
219 |
| - xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys), |
220 |
| - attrs); |
| 183 | + free_pages((unsigned long)vaddr, get_order(size)); |
221 | 184 | }
|
| 185 | +#endif /* CONFIG_X86 */ |
222 | 186 |
|
223 | 187 | /*
|
224 | 188 | * Map a single buffer of the indicated size for DMA in streaming mode. The
|
@@ -421,8 +385,13 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
421 | 385 | }
|
422 | 386 |
|
423 | 387 | const struct dma_map_ops xen_swiotlb_dma_ops = {
|
| 388 | +#ifdef CONFIG_X86 |
424 | 389 | .alloc = xen_swiotlb_alloc_coherent,
|
425 | 390 | .free = xen_swiotlb_free_coherent,
|
| 391 | +#else |
| 392 | + .alloc = dma_direct_alloc, |
| 393 | + .free = dma_direct_free, |
| 394 | +#endif |
426 | 395 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
|
427 | 396 | .sync_single_for_device = xen_swiotlb_sync_single_for_device,
|
428 | 397 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
|
|
0 commit comments