Skip to content

Commit f05baa0

Browse files
committed
Merge tag 'dma-mapping-5.8-4' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - fix dma coherent mmap in nommu (me) - more AMD SEV fallout (David Rientjes, me) - fix alignment in dma_common_*_remap (Eric Auger) * tag 'dma-mapping-5.8-4' of git://git.infradead.org/users/hch/dma-mapping: dma-remap: align the size in dma_common_*_remap() dma-mapping: DMA_COHERENT_POOL should select GENERIC_ALLOCATOR dma-direct: add missing set_memory_decrypted() for coherent mapping dma-direct: check return value when encrypting or decrypting memory dma-direct: re-encrypt memory if dma_direct_alloc_pages() fails dma-direct: always align allocation size in dma_direct_alloc_pages() dma-direct: mark __dma_direct_alloc_pages static dma-direct: re-enable mmap for !CONFIG_MMU
2 parents 4e99b32 + 8e36baf commit f05baa0

File tree

4 files changed

+39
-30
lines changed

4 files changed

+39
-30
lines changed

include/linux/dma-direct.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
7777
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
7878
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
7979
dma_addr_t dma_addr, unsigned long attrs);
80-
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
81-
gfp_t gfp, unsigned long attrs);
8280
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
8381
void *cpu_addr, dma_addr_t dma_addr, size_t size,
8482
unsigned long attrs);

kernel/dma/Kconfig

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,15 +71,16 @@ config SWIOTLB
7171
# in the pagetables
7272
#
7373
config DMA_NONCOHERENT_MMAP
74+
default y if !MMU
7475
bool
7576

7677
config DMA_COHERENT_POOL
78+
select GENERIC_ALLOCATOR
7779
bool
7880

7981
config DMA_REMAP
8082
bool
8183
depends on MMU
82-
select GENERIC_ALLOCATOR
8384
select DMA_NONCOHERENT_MMAP
8485

8586
config DMA_DIRECT_REMAP

kernel/dma/direct.c

Lines changed: 34 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -109,29 +109,30 @@ static inline bool dma_should_free_from_pool(struct device *dev,
109109
return false;
110110
}
111111

112-
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
112+
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
113113
gfp_t gfp, unsigned long attrs)
114114
{
115-
size_t alloc_size = PAGE_ALIGN(size);
116115
int node = dev_to_node(dev);
117116
struct page *page = NULL;
118117
u64 phys_limit;
119118

119+
WARN_ON_ONCE(!PAGE_ALIGNED(size));
120+
120121
if (attrs & DMA_ATTR_NO_WARN)
121122
gfp |= __GFP_NOWARN;
122123

123124
/* we always manually zero the memory once we are done: */
124125
gfp &= ~__GFP_ZERO;
125126
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
126127
&phys_limit);
127-
page = dma_alloc_contiguous(dev, alloc_size, gfp);
128+
page = dma_alloc_contiguous(dev, size, gfp);
128129
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
129-
dma_free_contiguous(dev, page, alloc_size);
130+
dma_free_contiguous(dev, page, size);
130131
page = NULL;
131132
}
132133
again:
133134
if (!page)
134-
page = alloc_pages_node(node, gfp, get_order(alloc_size));
135+
page = alloc_pages_node(node, gfp, get_order(size));
135136
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
136137
dma_free_contiguous(dev, page, size);
137138
page = NULL;
@@ -157,9 +158,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
157158
{
158159
struct page *page;
159160
void *ret;
161+
int err;
162+
163+
size = PAGE_ALIGN(size);
160164

161165
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
162-
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
166+
ret = dma_alloc_from_pool(dev, size, &page, gfp);
163167
if (!ret)
164168
return NULL;
165169
goto done;
@@ -183,14 +187,20 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
183187
dma_alloc_need_uncached(dev, attrs)) ||
184188
(IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
185189
/* remove any dirty cache lines on the kernel alias */
186-
arch_dma_prep_coherent(page, PAGE_ALIGN(size));
190+
arch_dma_prep_coherent(page, size);
187191

188192
/* create a coherent mapping */
189-
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
193+
ret = dma_common_contiguous_remap(page, size,
190194
dma_pgprot(dev, PAGE_KERNEL, attrs),
191195
__builtin_return_address(0));
192196
if (!ret)
193197
goto out_free_pages;
198+
if (force_dma_unencrypted(dev)) {
199+
err = set_memory_decrypted((unsigned long)ret,
200+
1 << get_order(size));
201+
if (err)
202+
goto out_free_pages;
203+
}
194204
memset(ret, 0, size);
195205
goto done;
196206
}
@@ -207,8 +217,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
207217
}
208218

209219
ret = page_address(page);
210-
if (force_dma_unencrypted(dev))
211-
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
220+
if (force_dma_unencrypted(dev)) {
221+
err = set_memory_decrypted((unsigned long)ret,
222+
1 << get_order(size));
223+
if (err)
224+
goto out_free_pages;
225+
}
212226

213227
memset(ret, 0, size);
214228

@@ -217,14 +231,23 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
217231
arch_dma_prep_coherent(page, size);
218232
ret = arch_dma_set_uncached(ret, size);
219233
if (IS_ERR(ret))
220-
goto out_free_pages;
234+
goto out_encrypt_pages;
221235
}
222236
done:
223237
if (force_dma_unencrypted(dev))
224238
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
225239
else
226240
*dma_handle = phys_to_dma(dev, page_to_phys(page));
227241
return ret;
242+
243+
out_encrypt_pages:
244+
if (force_dma_unencrypted(dev)) {
245+
err = set_memory_encrypted((unsigned long)page_address(page),
246+
1 << get_order(size));
247+
/* If memory cannot be re-encrypted, it must be leaked */
248+
if (err)
249+
return NULL;
250+
}
228251
out_free_pages:
229252
dma_free_contiguous(dev, page, size);
230253
return NULL;
@@ -459,7 +482,6 @@ int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
459482
return ret;
460483
}
461484

462-
#ifdef CONFIG_MMU
463485
bool dma_direct_can_mmap(struct device *dev)
464486
{
465487
return dev_is_dma_coherent(dev) ||
@@ -485,19 +507,6 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
485507
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
486508
user_count << PAGE_SHIFT, vma->vm_page_prot);
487509
}
488-
#else /* CONFIG_MMU */
489-
bool dma_direct_can_mmap(struct device *dev)
490-
{
491-
return false;
492-
}
493-
494-
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
495-
void *cpu_addr, dma_addr_t dma_addr, size_t size,
496-
unsigned long attrs)
497-
{
498-
return -ENXIO;
499-
}
500-
#endif /* CONFIG_MMU */
501510

502511
int dma_direct_supported(struct device *dev, u64 mask)
503512
{

kernel/dma/remap.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
2424
{
2525
void *vaddr;
2626

27-
vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot);
27+
vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
28+
VM_DMA_COHERENT, prot);
2829
if (vaddr)
2930
find_vm_area(vaddr)->pages = pages;
3031
return vaddr;
@@ -37,7 +38,7 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
3738
void *dma_common_contiguous_remap(struct page *page, size_t size,
3839
pgprot_t prot, const void *caller)
3940
{
40-
int count = size >> PAGE_SHIFT;
41+
int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
4142
struct page **pages;
4243
void *vaddr;
4344
int i;

0 commit comments

Comments
 (0)