Skip to content

Commit 62bad54

Browse files
committed
Merge tag 'dma-mapping-6.3-2023-03-31' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - fix for swiotlb deadlock due to wrong alignment checks (GuoRui.Yu, Petr Tesarik) * tag 'dma-mapping-6.3-2023-03-31' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: fix slot alignment checks swiotlb: use wrap_area_index() instead of open-coding it swiotlb: fix the deadlock in swiotlb_do_find_slots
2 parents 10f76dc + 0eee5ae commit 62bad54

File tree

1 file changed

+16
-13
lines changed

1 file changed

+16
-13
lines changed

kernel/dma/swiotlb.c

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -625,39 +625,44 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
625625
unsigned int iotlb_align_mask =
626626
dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
627627
unsigned int nslots = nr_slots(alloc_size), stride;
628-
unsigned int index, wrap, count = 0, i;
629628
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
629+
unsigned int index, slots_checked, count = 0, i;
630630
unsigned long flags;
631631
unsigned int slot_base;
632632
unsigned int slot_index;
633633

634634
BUG_ON(!nslots);
635635
BUG_ON(area_index >= mem->nareas);
636636

637+
/*
638+
* For allocations of PAGE_SIZE or larger only look for page aligned
639+
* allocations.
640+
*/
641+
if (alloc_size >= PAGE_SIZE)
642+
iotlb_align_mask &= PAGE_MASK;
643+
iotlb_align_mask &= alloc_align_mask;
644+
637645
/*
638646
* For mappings with an alignment requirement don't bother looping to
639-
* unaligned slots once we found an aligned one. For allocations of
640-
* PAGE_SIZE or larger only look for page aligned allocations.
647+
* unaligned slots once we found an aligned one.
641648
*/
642649
stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
643-
if (alloc_size >= PAGE_SIZE)
644-
stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
645-
stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
646650

647651
spin_lock_irqsave(&area->lock, flags);
648652
if (unlikely(nslots > mem->area_nslabs - area->used))
649653
goto not_found;
650654

651655
slot_base = area_index * mem->area_nslabs;
652-
index = wrap = wrap_area_index(mem, ALIGN(area->index, stride));
656+
index = area->index;
653657

654-
do {
658+
for (slots_checked = 0; slots_checked < mem->area_nslabs; ) {
655659
slot_index = slot_base + index;
656660

657661
if (orig_addr &&
658662
(slot_addr(tbl_dma_addr, slot_index) &
659663
iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
660664
index = wrap_area_index(mem, index + 1);
665+
slots_checked++;
661666
continue;
662667
}
663668

@@ -673,7 +678,8 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
673678
goto found;
674679
}
675680
index = wrap_area_index(mem, index + stride);
676-
} while (index != wrap);
681+
slots_checked += stride;
682+
}
677683

678684
not_found:
679685
spin_unlock_irqrestore(&area->lock, flags);
@@ -693,10 +699,7 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
693699
/*
694700
* Update the indices to avoid searching in the next round.
695701
*/
696-
if (index + nslots < mem->area_nslabs)
697-
area->index = index + nslots;
698-
else
699-
area->index = 0;
702+
area->index = wrap_area_index(mem, index + nslots);
700703
area->used += nslots;
701704
spin_unlock_irqrestore(&area->lock, flags);
702705
return slot_index;

0 commit comments

Comments
 (0)