@@ -680,43 +680,43 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
680
680
#ifdef CONFIG_SND_DMA_SGBUF
681
681
/* Fallback SG-buffer allocations for x86 */
682
682
struct snd_dma_sg_fallback {
683
+ struct sg_table sgt ; /* used by get_addr - must be the first item */
683
684
size_t count ;
684
685
struct page * * pages ;
685
- /* DMA address array; the first page contains #pages in ~PAGE_MASK */
686
- dma_addr_t * addrs ;
686
+ unsigned int * npages ;
687
687
};
688
688
689
689
static void __snd_dma_sg_fallback_free (struct snd_dma_buffer * dmab ,
690
690
struct snd_dma_sg_fallback * sgbuf )
691
691
{
692
+ bool wc = dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG ;
692
693
size_t i , size ;
693
694
694
- if (sgbuf -> pages && sgbuf -> addrs ) {
695
+ if (sgbuf -> pages && sgbuf -> npages ) {
695
696
i = 0 ;
696
697
while (i < sgbuf -> count ) {
697
- if (!sgbuf -> pages [i ] || !sgbuf -> addrs [i ])
698
- break ;
699
- size = sgbuf -> addrs [i ] & ~PAGE_MASK ;
700
- if (WARN_ON (!size ))
698
+ size = sgbuf -> npages [i ];
699
+ if (!size )
701
700
break ;
702
701
do_free_pages (page_address (sgbuf -> pages [i ]),
703
- size << PAGE_SHIFT , false );
702
+ size << PAGE_SHIFT , wc );
704
703
i += size ;
705
704
}
706
705
}
707
706
kvfree (sgbuf -> pages );
708
- kvfree (sgbuf -> addrs );
707
+ kvfree (sgbuf -> npages );
709
708
kfree (sgbuf );
710
709
}
711
710
712
711
/* fallback manual S/G buffer allocations */
713
712
static void * snd_dma_sg_fallback_alloc (struct snd_dma_buffer * dmab , size_t size )
714
713
{
714
+ bool wc = dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG ;
715
715
struct snd_dma_sg_fallback * sgbuf ;
716
716
struct page * * pagep , * curp ;
717
- size_t chunk , npages ;
718
- dma_addr_t * addrp ;
717
+ size_t chunk ;
719
718
dma_addr_t addr ;
719
+ unsigned int idx , npages ;
720
720
void * p ;
721
721
722
722
sgbuf = kzalloc (sizeof (* sgbuf ), GFP_KERNEL );
@@ -725,16 +725,16 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
725
725
size = PAGE_ALIGN (size );
726
726
sgbuf -> count = size >> PAGE_SHIFT ;
727
727
sgbuf -> pages = kvcalloc (sgbuf -> count , sizeof (* sgbuf -> pages ), GFP_KERNEL );
728
- sgbuf -> addrs = kvcalloc (sgbuf -> count , sizeof (* sgbuf -> addrs ), GFP_KERNEL );
729
- if (!sgbuf -> pages || !sgbuf -> addrs )
728
+ sgbuf -> npages = kvcalloc (sgbuf -> count , sizeof (* sgbuf -> npages ), GFP_KERNEL );
729
+ if (!sgbuf -> pages || !sgbuf -> npages )
730
730
goto error ;
731
731
732
732
pagep = sgbuf -> pages ;
733
- addrp = sgbuf -> addrs ;
734
- chunk = ( PAGE_SIZE - 1 ) << PAGE_SHIFT ; /* to fit in low bits in addrs */
733
+ chunk = size ;
734
+ idx = 0 ;
735
735
while (size > 0 ) {
736
736
chunk = min (size , chunk );
737
- p = do_alloc_pages (dmab -> dev .dev , chunk , & addr , false );
737
+ p = do_alloc_pages (dmab -> dev .dev , chunk , & addr , wc );
738
738
if (!p ) {
739
739
if (chunk <= PAGE_SIZE )
740
740
goto error ;
@@ -746,27 +746,33 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
746
746
size -= chunk ;
747
747
/* fill pages */
748
748
npages = chunk >> PAGE_SHIFT ;
749
- * addrp = npages ; /* store in lower bits */
749
+ sgbuf -> npages [idx ] = npages ;
750
+ idx += npages ;
750
751
curp = virt_to_page (p );
751
- while (npages -- ) {
752
+ while (npages -- )
752
753
* pagep ++ = curp ++ ;
753
- * addrp ++ |= addr ;
754
- addr += PAGE_SIZE ;
755
- }
756
754
}
757
755
758
- p = vmap ( sgbuf -> pages , sgbuf -> count , VM_MAP , PAGE_KERNEL );
759
- if (! p )
756
+ if ( sg_alloc_table_from_pages ( & sgbuf -> sgt , sgbuf -> pages , sgbuf -> count ,
757
+ 0 , sgbuf -> count << PAGE_SHIFT , GFP_KERNEL ) )
760
758
goto error ;
761
759
762
- if (dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG )
763
- set_pages_array_wc (sgbuf -> pages , sgbuf -> count );
760
+ if (dma_map_sgtable (dmab -> dev .dev , & sgbuf -> sgt , DMA_BIDIRECTIONAL , 0 ))
761
+ goto error_dma_map ;
762
+
763
+ p = vmap (sgbuf -> pages , sgbuf -> count , VM_MAP , PAGE_KERNEL );
764
+ if (!p )
765
+ goto error_vmap ;
764
766
765
767
dmab -> private_data = sgbuf ;
766
768
/* store the first page address for convenience */
767
- dmab -> addr = sgbuf -> addrs [ 0 ] & PAGE_MASK ;
769
+ dmab -> addr = snd_sgbuf_get_addr ( dmab , 0 ) ;
768
770
return p ;
769
771
772
+ error_vmap :
773
+ dma_unmap_sgtable (dmab -> dev .dev , & sgbuf -> sgt , DMA_BIDIRECTIONAL , 0 );
774
+ error_dma_map :
775
+ sg_free_table (& sgbuf -> sgt );
770
776
error :
771
777
__snd_dma_sg_fallback_free (dmab , sgbuf );
772
778
return NULL ;
@@ -776,21 +782,12 @@ static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
776
782
{
777
783
struct snd_dma_sg_fallback * sgbuf = dmab -> private_data ;
778
784
779
- if (dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG )
780
- set_pages_array_wb (sgbuf -> pages , sgbuf -> count );
781
785
vunmap (dmab -> area );
786
+ dma_unmap_sgtable (dmab -> dev .dev , & sgbuf -> sgt , DMA_BIDIRECTIONAL , 0 );
787
+ sg_free_table (& sgbuf -> sgt );
782
788
__snd_dma_sg_fallback_free (dmab , dmab -> private_data );
783
789
}
784
790
785
- static dma_addr_t snd_dma_sg_fallback_get_addr (struct snd_dma_buffer * dmab ,
786
- size_t offset )
787
- {
788
- struct snd_dma_sg_fallback * sgbuf = dmab -> private_data ;
789
- size_t index = offset >> PAGE_SHIFT ;
790
-
791
- return (sgbuf -> addrs [index ] & PAGE_MASK ) | (offset & ~PAGE_MASK );
792
- }
793
-
794
791
static int snd_dma_sg_fallback_mmap (struct snd_dma_buffer * dmab ,
795
792
struct vm_area_struct * area )
796
793
{
@@ -816,18 +813,15 @@ static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
816
813
return p ;
817
814
818
815
dmab -> dev .type = type ; /* restore the type */
819
- /* if IOMMU is present but failed, give up */
820
- if (get_dma_ops (dmab -> dev .dev ))
821
- return NULL ;
822
- /* try fallback */
823
816
return snd_dma_sg_fallback_alloc (dmab , size );
824
817
}
825
818
826
819
static const struct snd_malloc_ops snd_dma_sg_ops = {
827
820
.alloc = snd_dma_sg_alloc ,
828
821
.free = snd_dma_sg_fallback_free ,
829
822
.mmap = snd_dma_sg_fallback_mmap ,
830
- .get_addr = snd_dma_sg_fallback_get_addr ,
823
+ /* reuse noncontig helper */
824
+ .get_addr = snd_dma_noncontig_get_addr ,
831
825
/* reuse vmalloc helpers */
832
826
.get_page = snd_dma_vmalloc_get_page ,
833
827
.get_chunk_size = snd_dma_vmalloc_get_chunk_size ,
0 commit comments