Skip to content

Commit 0d5d746

Browse files
committed
Merge tag 'sound-fix-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
Pull sound fixes from Takashi Iwai: "A collection of fixes for 5.16-rc1, notably for a few regressions that were found in 5.15 and pre-rc1: - revert of the unification of SG-buffer helper functions on x86 and the relevant fix - regression fixes for mmap after the recent code refactoring - two NULL dereference fixes in HD-audio controller driver - UAF fixes in ALSA timer core - a few usual HD-audio and FireWire quirks" * tag 'sound-fix-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound: ALSA: fireworks: add support for Loud Onyx 1200f quirk ALSA: hda: fix general protection fault in azx_runtime_idle ALSA: hda: Free card instance properly at probe errors ALSA: hda/realtek: Add quirk for HP EliteBook 840 G7 mute LED ALSA: memalloc: Remove a stale comment ALSA: synth: missing check for possible NULL after the call to kstrdup ALSA: memalloc: Use proper SG helpers for noncontig allocations ALSA: pci: rme: Fix unaligned buffer addresses ALSA: firewire-motu: add support for MOTU Track 16 ALSA: PCM: Fix NULL dereference at mmap checks ALSA: hda/realtek: Add quirk for ASUS UX550VE ALSA: timer: Unconditionally unlink slave instances, too ALSA: memalloc: Catch call with NULL snd_dma_buffer pointer Revert "ALSA: memalloc: Convert x86 SG-buffer handling with non-contiguous type" ALSA: hda/realtek: Add a quirk for Acer Spin SP513-54N ALSA: firewire-motu: add support for MOTU Traveler mk3 ALSA: hda/realtek: Headset fixup for Clevo NH77HJQ ALSA: timer: Fix use-after-free problem
2 parents 304ac80 + 0ca3727 commit 0d5d746

File tree

15 files changed

+386
-102
lines changed

15 files changed

+386
-102
lines changed

include/sound/memalloc.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,13 @@ struct snd_dma_device {
3636
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
3737
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
3838
#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */
39+
#ifdef CONFIG_SND_DMA_SGBUF
40+
#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
41+
#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
42+
#else
43+
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
44+
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
45+
#endif
3946
#ifdef CONFIG_GENERIC_ALLOCATOR
4047
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
4148
#else
@@ -44,13 +51,6 @@ struct snd_dma_device {
4451
#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
4552
#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
4653
#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
47-
#ifdef CONFIG_SND_DMA_SGBUF
48-
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
49-
#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
50-
#else
51-
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
52-
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
53-
#endif
5454

5555
/*
5656
* info for buffer allocation

sound/core/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ snd-$(CONFIG_SND_JACK) += ctljack.o jack.o
1919
snd-pcm-y := pcm.o pcm_native.o pcm_lib.o pcm_misc.o \
2020
pcm_memory.o memalloc.o
2121
snd-pcm-$(CONFIG_SND_PCM_TIMER) += pcm_timer.o
22+
snd-pcm-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
2223
snd-pcm-$(CONFIG_SND_PCM_ELD) += pcm_drm_eld.o
2324
snd-pcm-$(CONFIG_SND_PCM_IEC958) += pcm_iec958.o
2425

sound/core/memalloc.c

Lines changed: 62 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -183,8 +183,11 @@ EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
183183
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
184184
struct vm_area_struct *area)
185185
{
186-
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
186+
const struct snd_malloc_ops *ops;
187187

188+
if (!dmab)
189+
return -ENOENT;
190+
ops = snd_dma_get_ops(dmab);
188191
if (ops && ops->mmap)
189192
return ops->mmap(dmab, area);
190193
else
@@ -549,60 +552,73 @@ static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
549552
}
550553
}
551554

552-
static const struct snd_malloc_ops snd_dma_noncontig_ops = {
553-
.alloc = snd_dma_noncontig_alloc,
554-
.free = snd_dma_noncontig_free,
555-
.mmap = snd_dma_noncontig_mmap,
556-
.sync = snd_dma_noncontig_sync,
557-
/* re-use vmalloc helpers for get_* ops */
558-
.get_addr = snd_dma_vmalloc_get_addr,
559-
.get_page = snd_dma_vmalloc_get_page,
560-
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
561-
};
555+
static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
556+
struct sg_page_iter *piter,
557+
size_t offset)
558+
{
559+
struct sg_table *sgt = dmab->private_data;
562560

563-
/* x86-specific SG-buffer with WC pages */
564-
#ifdef CONFIG_SND_DMA_SGBUF
565-
#define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v))
561+
__sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
562+
offset >> PAGE_SHIFT);
563+
}
566564

567-
static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
565+
static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
566+
size_t offset)
568567
{
569-
void *p = snd_dma_noncontig_alloc(dmab, size);
570-
size_t ofs;
568+
struct sg_dma_page_iter iter;
571569

572-
if (!p)
573-
return NULL;
574-
for (ofs = 0; ofs < size; ofs += PAGE_SIZE)
575-
set_memory_uc(vmalloc_to_virt(p + ofs), 1);
576-
return p;
570+
snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
571+
__sg_page_iter_dma_next(&iter);
572+
return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
577573
}
578574

579-
static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
575+
static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
576+
size_t offset)
580577
{
581-
size_t ofs;
578+
struct sg_page_iter iter;
582579

583-
for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE)
584-
set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1);
585-
snd_dma_noncontig_free(dmab);
580+
snd_dma_noncontig_iter_set(dmab, &iter, offset);
581+
__sg_page_iter_next(&iter);
582+
return sg_page_iter_page(&iter);
586583
}
587584

588-
static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
589-
struct vm_area_struct *area)
585+
static unsigned int
586+
snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
587+
unsigned int ofs, unsigned int size)
590588
{
591-
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
592-
/* FIXME: dma_mmap_noncontiguous() works? */
593-
return -ENOENT; /* continue with the default mmap handler */
589+
struct sg_dma_page_iter iter;
590+
unsigned int start, end;
591+
unsigned long addr;
592+
593+
start = ALIGN_DOWN(ofs, PAGE_SIZE);
594+
end = ofs + size - 1; /* the last byte address */
595+
snd_dma_noncontig_iter_set(dmab, &iter.base, start);
596+
if (!__sg_page_iter_dma_next(&iter))
597+
return 0;
598+
/* check page continuity */
599+
addr = sg_page_iter_dma_address(&iter);
600+
for (;;) {
601+
start += PAGE_SIZE;
602+
if (start > end)
603+
break;
604+
addr += PAGE_SIZE;
605+
if (!__sg_page_iter_dma_next(&iter) ||
606+
sg_page_iter_dma_address(&iter) != addr)
607+
return start - ofs;
608+
}
609+
/* ok, all on continuous pages */
610+
return size;
594611
}
595612

596-
const struct snd_malloc_ops snd_dma_sg_wc_ops = {
597-
.alloc = snd_dma_sg_wc_alloc,
598-
.free = snd_dma_sg_wc_free,
599-
.mmap = snd_dma_sg_wc_mmap,
613+
static const struct snd_malloc_ops snd_dma_noncontig_ops = {
614+
.alloc = snd_dma_noncontig_alloc,
615+
.free = snd_dma_noncontig_free,
616+
.mmap = snd_dma_noncontig_mmap,
600617
.sync = snd_dma_noncontig_sync,
601-
.get_addr = snd_dma_vmalloc_get_addr,
602-
.get_page = snd_dma_vmalloc_get_page,
603-
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
618+
.get_addr = snd_dma_noncontig_get_addr,
619+
.get_page = snd_dma_noncontig_get_page,
620+
.get_chunk_size = snd_dma_noncontig_get_chunk_size,
604621
};
605-
#endif /* CONFIG_SND_DMA_SGBUF */
606622

607623
/*
608624
* Non-coherent pages allocator
@@ -663,17 +679,20 @@ static const struct snd_malloc_ops *dma_ops[] = {
663679
[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
664680
[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
665681
[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
666-
#ifdef CONFIG_SND_DMA_SGBUF
667-
[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
668-
#endif
669682
#ifdef CONFIG_GENERIC_ALLOCATOR
670683
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
671684
#endif /* CONFIG_GENERIC_ALLOCATOR */
672685
#endif /* CONFIG_HAS_DMA */
686+
#ifdef CONFIG_SND_DMA_SGBUF
687+
[SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
688+
[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
689+
#endif
673690
};
674691

675692
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
676693
{
694+
if (WARN_ON_ONCE(!dmab))
695+
return NULL;
677696
if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
678697
dmab->dev.type >= ARRAY_SIZE(dma_ops)))
679698
return NULL;

sound/core/sgbuf.c

Lines changed: 201 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,201 @@
1+
// SPDX-License-Identifier: GPL-2.0-or-later
2+
/*
3+
* Scatter-Gather buffer
4+
*
5+
* Copyright (c) by Takashi Iwai <[email protected]>
6+
*/
7+
8+
#include <linux/slab.h>
9+
#include <linux/mm.h>
10+
#include <linux/vmalloc.h>
11+
#include <linux/export.h>
12+
#include <sound/memalloc.h>
13+
#include "memalloc_local.h"
14+
15+
struct snd_sg_page {
16+
void *buf;
17+
dma_addr_t addr;
18+
};
19+
20+
struct snd_sg_buf {
21+
int size; /* allocated byte size */
22+
int pages; /* allocated pages */
23+
int tblsize; /* allocated table size */
24+
struct snd_sg_page *table; /* address table */
25+
struct page **page_table; /* page table (for vmap/vunmap) */
26+
struct device *dev;
27+
};
28+
29+
/* table entries are align to 32 */
30+
#define SGBUF_TBL_ALIGN 32
31+
#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
32+
33+
static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
34+
{
35+
struct snd_sg_buf *sgbuf = dmab->private_data;
36+
struct snd_dma_buffer tmpb;
37+
int i;
38+
39+
if (!sgbuf)
40+
return;
41+
42+
vunmap(dmab->area);
43+
dmab->area = NULL;
44+
45+
tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
46+
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
47+
tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC;
48+
tmpb.dev.dev = sgbuf->dev;
49+
for (i = 0; i < sgbuf->pages; i++) {
50+
if (!(sgbuf->table[i].addr & ~PAGE_MASK))
51+
continue; /* continuous pages */
52+
tmpb.area = sgbuf->table[i].buf;
53+
tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
54+
tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
55+
snd_dma_free_pages(&tmpb);
56+
}
57+
58+
kfree(sgbuf->table);
59+
kfree(sgbuf->page_table);
60+
kfree(sgbuf);
61+
dmab->private_data = NULL;
62+
}
63+
64+
#define MAX_ALLOC_PAGES 32
65+
66+
static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
67+
{
68+
struct snd_sg_buf *sgbuf;
69+
unsigned int i, pages, chunk, maxpages;
70+
struct snd_dma_buffer tmpb;
71+
struct snd_sg_page *table;
72+
struct page **pgtable;
73+
int type = SNDRV_DMA_TYPE_DEV;
74+
pgprot_t prot = PAGE_KERNEL;
75+
void *area;
76+
77+
dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
78+
if (!sgbuf)
79+
return NULL;
80+
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) {
81+
type = SNDRV_DMA_TYPE_DEV_WC;
82+
#ifdef pgprot_noncached
83+
prot = pgprot_noncached(PAGE_KERNEL);
84+
#endif
85+
}
86+
sgbuf->dev = dmab->dev.dev;
87+
pages = snd_sgbuf_aligned_pages(size);
88+
sgbuf->tblsize = sgbuf_align_table(pages);
89+
table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
90+
if (!table)
91+
goto _failed;
92+
sgbuf->table = table;
93+
pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
94+
if (!pgtable)
95+
goto _failed;
96+
sgbuf->page_table = pgtable;
97+
98+
/* allocate pages */
99+
maxpages = MAX_ALLOC_PAGES;
100+
while (pages > 0) {
101+
chunk = pages;
102+
/* don't be too eager to take a huge chunk */
103+
if (chunk > maxpages)
104+
chunk = maxpages;
105+
chunk <<= PAGE_SHIFT;
106+
if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
107+
chunk, &tmpb) < 0) {
108+
if (!sgbuf->pages)
109+
goto _failed;
110+
size = sgbuf->pages * PAGE_SIZE;
111+
break;
112+
}
113+
chunk = tmpb.bytes >> PAGE_SHIFT;
114+
for (i = 0; i < chunk; i++) {
115+
table->buf = tmpb.area;
116+
table->addr = tmpb.addr;
117+
if (!i)
118+
table->addr |= chunk; /* mark head */
119+
table++;
120+
*pgtable++ = virt_to_page(tmpb.area);
121+
tmpb.area += PAGE_SIZE;
122+
tmpb.addr += PAGE_SIZE;
123+
}
124+
sgbuf->pages += chunk;
125+
pages -= chunk;
126+
if (chunk < maxpages)
127+
maxpages = chunk;
128+
}
129+
130+
sgbuf->size = size;
131+
area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
132+
if (!area)
133+
goto _failed;
134+
return area;
135+
136+
_failed:
137+
snd_dma_sg_free(dmab); /* free the table */
138+
return NULL;
139+
}
140+
141+
static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
142+
size_t offset)
143+
{
144+
struct snd_sg_buf *sgbuf = dmab->private_data;
145+
dma_addr_t addr;
146+
147+
addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
148+
addr &= ~((dma_addr_t)PAGE_SIZE - 1);
149+
return addr + offset % PAGE_SIZE;
150+
}
151+
152+
static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
153+
size_t offset)
154+
{
155+
struct snd_sg_buf *sgbuf = dmab->private_data;
156+
unsigned int idx = offset >> PAGE_SHIFT;
157+
158+
if (idx >= (unsigned int)sgbuf->pages)
159+
return NULL;
160+
return sgbuf->page_table[idx];
161+
}
162+
163+
static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
164+
unsigned int ofs,
165+
unsigned int size)
166+
{
167+
struct snd_sg_buf *sg = dmab->private_data;
168+
unsigned int start, end, pg;
169+
170+
start = ofs >> PAGE_SHIFT;
171+
end = (ofs + size - 1) >> PAGE_SHIFT;
172+
/* check page continuity */
173+
pg = sg->table[start].addr >> PAGE_SHIFT;
174+
for (;;) {
175+
start++;
176+
if (start > end)
177+
break;
178+
pg++;
179+
if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
180+
return (start << PAGE_SHIFT) - ofs;
181+
}
182+
/* ok, all on continuous pages */
183+
return size;
184+
}
185+
186+
static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab,
187+
struct vm_area_struct *area)
188+
{
189+
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
190+
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
191+
return -ENOENT; /* continue with the default mmap handler */
192+
}
193+
194+
const struct snd_malloc_ops snd_dma_sg_ops = {
195+
.alloc = snd_dma_sg_alloc,
196+
.free = snd_dma_sg_free,
197+
.get_addr = snd_dma_sg_get_addr,
198+
.get_page = snd_dma_sg_get_page,
199+
.get_chunk_size = snd_dma_sg_get_chunk_size,
200+
.mmap = snd_dma_sg_mmap,
201+
};

0 commit comments

Comments
 (0)