Skip to content

Commit 633d5fc

Browse files
rientjesChristoph Hellwig
authored andcommitted
dma-direct: always align allocation size in dma_direct_alloc_pages()
dma_alloc_contiguous() does size >> PAGE_SHIFT and set_memory_decrypted() works at page granularity. It's necessary to page align the allocation size in dma_direct_alloc_pages() for consistent behavior. This also fixes an issue when arch_dma_prep_coherent() is called on an unaligned allocation size for dma_alloc_need_uncached() when CONFIG_DMA_DIRECT_REMAP is disabled but CONFIG_ARCH_HAS_DMA_SET_UNCACHED is enabled. Signed-off-by: David Rientjes <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 26749b3 commit 633d5fc

File tree

1 file changed

+10
-7
lines changed

1 file changed

+10
-7
lines changed

kernel/dma/direct.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -112,26 +112,27 @@ static inline bool dma_should_free_from_pool(struct device *dev,
112112
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
113113
gfp_t gfp, unsigned long attrs)
114114
{
115-
size_t alloc_size = PAGE_ALIGN(size);
116115
int node = dev_to_node(dev);
117116
struct page *page = NULL;
118117
u64 phys_limit;
119118

119+
WARN_ON_ONCE(!PAGE_ALIGNED(size));
120+
120121
if (attrs & DMA_ATTR_NO_WARN)
121122
gfp |= __GFP_NOWARN;
122123

123124
/* we always manually zero the memory once we are done: */
124125
gfp &= ~__GFP_ZERO;
125126
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
126127
&phys_limit);
127-
page = dma_alloc_contiguous(dev, alloc_size, gfp);
128+
page = dma_alloc_contiguous(dev, size, gfp);
128129
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
129-
dma_free_contiguous(dev, page, alloc_size);
130+
dma_free_contiguous(dev, page, size);
130131
page = NULL;
131132
}
132133
again:
133134
if (!page)
134-
page = alloc_pages_node(node, gfp, get_order(alloc_size));
135+
page = alloc_pages_node(node, gfp, get_order(size));
135136
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
136137
dma_free_contiguous(dev, page, size);
137138
page = NULL;
@@ -158,8 +159,10 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
158159
struct page *page;
159160
void *ret;
160161

162+
size = PAGE_ALIGN(size);
163+
161164
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
162-
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
165+
ret = dma_alloc_from_pool(dev, size, &page, gfp);
163166
if (!ret)
164167
return NULL;
165168
goto done;
@@ -183,10 +186,10 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
183186
dma_alloc_need_uncached(dev, attrs)) ||
184187
(IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
185188
/* remove any dirty cache lines on the kernel alias */
186-
arch_dma_prep_coherent(page, PAGE_ALIGN(size));
189+
arch_dma_prep_coherent(page, size);
187190

188191
/* create a coherent mapping */
189-
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
192+
ret = dma_common_contiguous_remap(page, size,
190193
dma_pgprot(dev, PAGE_KERNEL, attrs),
191194
__builtin_return_address(0));
192195
if (!ret)

0 commit comments

Comments
 (0)