Skip to content

Commit 51249b1

Browse files
jgross1aaron-ang
authored andcommitted
xen/swiotlb: relax alignment requirements
When mapping a buffer for DMA via .map_page or .map_sg DMA operations, there is no need to check the machine frames to be aligned according to the mapped areas size. All what is needed in these cases is that the buffer is contiguous at machine level. So carve out the alignment check from range_straddles_page_boundary() and move it to a helper called by xen_swiotlb_alloc_coherent() and xen_swiotlb_free_coherent() directly. Fixes: 9f40ec8 ("xen/swiotlb: add alignment check for dma buffers") Reported-by: Jan Vejvalka <[email protected]> Tested-by: Jan Vejvalka <[email protected]> Signed-off-by: Juergen Gross <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Signed-off-by: Juergen Gross <[email protected]>
1 parent fd1fef7 commit 51249b1

File tree

1 file changed

+12
-8
lines changed

1 file changed

+12
-8
lines changed

drivers/xen/swiotlb-xen.c

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
7474
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
7575
}
7676

77+
static inline bool range_requires_alignment(phys_addr_t p, size_t size)
78+
{
79+
phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
80+
phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
81+
82+
return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
83+
}
84+
7785
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
7886
{
7987
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
8088
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
81-
phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
8289

8390
next_bfn = pfn_to_bfn(xen_pfn);
8491

85-
/* If buffer is physically aligned, ensure DMA alignment. */
86-
if (IS_ALIGNED(p, algn) &&
87-
!IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
88-
return 1;
89-
9092
for (i = 1; i < nr_pages; i++)
9193
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
9294
return 1;
@@ -156,7 +158,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
156158

157159
*dma_handle = xen_phys_to_dma(dev, phys);
158160
if (*dma_handle + size - 1 > dma_mask ||
159-
range_straddles_page_boundary(phys, size)) {
161+
range_straddles_page_boundary(phys, size) ||
162+
range_requires_alignment(phys, size)) {
160163
if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
161164
dma_handle) != 0)
162165
goto out_free_pages;
@@ -182,7 +185,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
182185
size = ALIGN(size, XEN_PAGE_SIZE);
183186

184187
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
185-
WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
188+
WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
189+
range_requires_alignment(phys, size)))
186190
return;
187191

188192
if (TestClearPageXenRemapped(virt_to_page(vaddr)))

0 commit comments

Comments
 (0)