Skip to content

Commit 0b9f2bd

Browse files
committed
ALSA: memalloc: Use proper DMA mapping API for x86 S/G buffer allocations
The fallback S/G buffer allocation for x86 used the addresses deduced from the page allocations blindly. It broke the allocations on IOMMU and made us to work around with a hackish DMA ops check. For cleaning up those messes, this patch switches to the proper DMA mapping API usages with the standard sg-table instead. By introducing the sg-table, the address table isn't needed, but for keeping the original allocation sizes for freeing, replace it with the array keeping the number of pages. The get_addr callback is changed to use the existing one for non-contiguous buffers. (Also it's the reason sg_table is put at the beginning of struct snd_dma_sg_fallback.) And finally, the hackish workaround that checks the DMA ops is dropped now. Link: https://patch.msgid.link/[email protected] Signed-off-by: Takashi Iwai <[email protected]>
1 parent c880a51 commit 0b9f2bd

File tree

1 file changed

+36
-42
lines changed

1 file changed

+36
-42
lines changed

sound/core/memalloc.c

Lines changed: 36 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -680,43 +680,43 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
680680
#ifdef CONFIG_SND_DMA_SGBUF
681681
/* Fallback SG-buffer allocations for x86 */
682682
struct snd_dma_sg_fallback {
683+
struct sg_table sgt; /* used by get_addr - must be the first item */
683684
size_t count;
684685
struct page **pages;
685-
/* DMA address array; the first page contains #pages in ~PAGE_MASK */
686-
dma_addr_t *addrs;
686+
unsigned int *npages;
687687
};
688688

689689
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
690690
struct snd_dma_sg_fallback *sgbuf)
691691
{
692+
bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
692693
size_t i, size;
693694

694-
if (sgbuf->pages && sgbuf->addrs) {
695+
if (sgbuf->pages && sgbuf->npages) {
695696
i = 0;
696697
while (i < sgbuf->count) {
697-
if (!sgbuf->pages[i] || !sgbuf->addrs[i])
698-
break;
699-
size = sgbuf->addrs[i] & ~PAGE_MASK;
700-
if (WARN_ON(!size))
698+
size = sgbuf->npages[i];
699+
if (!size)
701700
break;
702701
do_free_pages(page_address(sgbuf->pages[i]),
703-
size << PAGE_SHIFT, false);
702+
size << PAGE_SHIFT, wc);
704703
i += size;
705704
}
706705
}
707706
kvfree(sgbuf->pages);
708-
kvfree(sgbuf->addrs);
707+
kvfree(sgbuf->npages);
709708
kfree(sgbuf);
710709
}
711710

712711
/* fallback manual S/G buffer allocations */
713712
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
714713
{
714+
bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
715715
struct snd_dma_sg_fallback *sgbuf;
716716
struct page **pagep, *curp;
717-
size_t chunk, npages;
718-
dma_addr_t *addrp;
717+
size_t chunk;
719718
dma_addr_t addr;
719+
unsigned int idx, npages;
720720
void *p;
721721

722722
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
@@ -725,16 +725,16 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
725725
size = PAGE_ALIGN(size);
726726
sgbuf->count = size >> PAGE_SHIFT;
727727
sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
728-
sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
729-
if (!sgbuf->pages || !sgbuf->addrs)
728+
sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL);
729+
if (!sgbuf->pages || !sgbuf->npages)
730730
goto error;
731731

732732
pagep = sgbuf->pages;
733-
addrp = sgbuf->addrs;
734-
chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
733+
chunk = size;
734+
idx = 0;
735735
while (size > 0) {
736736
chunk = min(size, chunk);
737-
p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
737+
p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
738738
if (!p) {
739739
if (chunk <= PAGE_SIZE)
740740
goto error;
@@ -746,27 +746,33 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
746746
size -= chunk;
747747
/* fill pages */
748748
npages = chunk >> PAGE_SHIFT;
749-
*addrp = npages; /* store in lower bits */
749+
sgbuf->npages[idx] = npages;
750+
idx += npages;
750751
curp = virt_to_page(p);
751-
while (npages--) {
752+
while (npages--)
752753
*pagep++ = curp++;
753-
*addrp++ |= addr;
754-
addr += PAGE_SIZE;
755-
}
756754
}
757755

758-
p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
759-
if (!p)
756+
if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count,
757+
0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL))
760758
goto error;
761759

762-
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
763-
set_pages_array_wc(sgbuf->pages, sgbuf->count);
760+
if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0))
761+
goto error_dma_map;
762+
763+
p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
764+
if (!p)
765+
goto error_vmap;
764766

765767
dmab->private_data = sgbuf;
766768
/* store the first page address for convenience */
767-
dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
769+
dmab->addr = snd_sgbuf_get_addr(dmab, 0);
768770
return p;
769771

772+
error_vmap:
773+
dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
774+
error_dma_map:
775+
sg_free_table(&sgbuf->sgt);
770776
error:
771777
__snd_dma_sg_fallback_free(dmab, sgbuf);
772778
return NULL;
@@ -776,21 +782,12 @@ static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
776782
{
777783
struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
778784

779-
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
780-
set_pages_array_wb(sgbuf->pages, sgbuf->count);
781785
vunmap(dmab->area);
786+
dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
787+
sg_free_table(&sgbuf->sgt);
782788
__snd_dma_sg_fallback_free(dmab, dmab->private_data);
783789
}
784790

785-
static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
786-
size_t offset)
787-
{
788-
struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
789-
size_t index = offset >> PAGE_SHIFT;
790-
791-
return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
792-
}
793-
794791
static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
795792
struct vm_area_struct *area)
796793
{
@@ -816,18 +813,15 @@ static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
816813
return p;
817814

818815
dmab->dev.type = type; /* restore the type */
819-
/* if IOMMU is present but failed, give up */
820-
if (get_dma_ops(dmab->dev.dev))
821-
return NULL;
822-
/* try fallback */
823816
return snd_dma_sg_fallback_alloc(dmab, size);
824817
}
825818

826819
static const struct snd_malloc_ops snd_dma_sg_ops = {
827820
.alloc = snd_dma_sg_alloc,
828821
.free = snd_dma_sg_fallback_free,
829822
.mmap = snd_dma_sg_fallback_mmap,
830-
.get_addr = snd_dma_sg_fallback_get_addr,
823+
/* reuse noncontig helper */
824+
.get_addr = snd_dma_noncontig_get_addr,
831825
/* reuse vmalloc helpers */
832826
.get_page = snd_dma_vmalloc_get_page,
833827
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,

0 commit comments

Comments
 (0)