Skip to content

Commit 92826e9

Browse files
author
Christoph Hellwig
committed
dma-direct: don't fail on highmem CMA pages in dma_direct_alloc_pages
When dma_direct_alloc_pages encounters a highmem page it just gives up currently. But what we really should do is to try memory using the page allocator instead - without this platforms with a global highmem CMA pool will fail all dma_alloc_pages allocations. Fixes: efa70f2 ("dma-mapping: add a new dma_alloc_pages API") Reported-by: Mark O'Neill <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 566fb90 commit 92826e9

File tree

1 file changed

+10
-17
lines changed

1 file changed

+10
-17
lines changed

kernel/dma/direct.c

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
115115
}
116116

117117
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
118-
gfp_t gfp)
118+
gfp_t gfp, bool allow_highmem)
119119
{
120120
int node = dev_to_node(dev);
121121
struct page *page = NULL;
@@ -129,9 +129,12 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
129129
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
130130
&phys_limit);
131131
page = dma_alloc_contiguous(dev, size, gfp);
132-
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
133-
dma_free_contiguous(dev, page, size);
134-
page = NULL;
132+
if (page) {
133+
if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
134+
(!allow_highmem && PageHighMem(page))) {
135+
dma_free_contiguous(dev, page, size);
136+
page = NULL;
137+
}
135138
}
136139
again:
137140
if (!page)
@@ -189,7 +192,7 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
189192
{
190193
struct page *page;
191194

192-
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
195+
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
193196
if (!page)
194197
return NULL;
195198

@@ -262,7 +265,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
262265
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
263266

264267
/* we always manually zero the memory once we are done */
265-
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
268+
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
266269
if (!page)
267270
return NULL;
268271

@@ -370,19 +373,9 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
370373
if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
371374
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
372375

373-
page = __dma_direct_alloc_pages(dev, size, gfp);
376+
page = __dma_direct_alloc_pages(dev, size, gfp, false);
374377
if (!page)
375378
return NULL;
376-
if (PageHighMem(page)) {
377-
/*
378-
* Depending on the cma= arguments and per-arch setup
379-
* dma_alloc_contiguous could return highmem pages.
380-
* Without remapping there is no way to return them here,
381-
* so log an error and fail.
382-
*/
383-
dev_info(dev, "Rejecting highmem page from CMA.\n");
384-
goto out_free_pages;
385-
}
386379

387380
ret = page_address(page);
388381
if (dma_set_decrypted(dev, ret, size))

0 commit comments

Comments
 (0)