Skip to content

Commit a4a4d11

Browse files
author
Christoph Hellwig
committed
openrisc: use the generic in-place uncached DMA allocator
Switch openrisc to use the dma-direct allocator and just provide the hooks for setting memory uncached or cached. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Stafford Horne <[email protected]> Reviewed-by: Robin Murphy <[email protected]>
1 parent 999a5d1 commit a4a4d11

File tree

2 files changed

+12
-45
lines changed

2 files changed

+12
-45
lines changed

arch/openrisc/Kconfig

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
config OPENRISC
88
def_bool y
99
select ARCH_32BIT_OFF_T
10+
select ARCH_HAS_DMA_SET_UNCACHED
11+
select ARCH_HAS_DMA_CLEAR_UNCACHED
1012
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
1113
select OF
1214
select OF_EARLY_FLATTREE

arch/openrisc/kernel/dma.c

Lines changed: 10 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,6 @@
1111
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
1212
*
1313
* DMA mapping callbacks...
14-
* As alloc_coherent is the only DMA callback being used currently, that's
15-
* the only thing implemented properly. The rest need looking into...
1614
*/
1715

1816
#include <linux/dma-noncoherent.h>
@@ -67,62 +65,29 @@ static const struct mm_walk_ops clear_nocache_walk_ops = {
6765
.pte_entry = page_clear_nocache,
6866
};
6967

70-
/*
71-
* Alloc "coherent" memory, which for OpenRISC means simply uncached.
72-
*
73-
* This function effectively just calls __get_free_pages, sets the
74-
* cache-inhibit bit on those pages, and makes sure that the pages are
75-
* flushed out of the cache before they are used.
76-
*
77-
* If the NON_CONSISTENT attribute is set, then this function just
78-
* returns "normal", cachable memory.
79-
*
80-
* There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
81-
* into consideration here, too. All current known implementations of
82-
* the OR1K support only strongly ordered memory accesses, so that flag
83-
* is being ignored for now; uncached but write-combined memory is a
84-
* missing feature of the OR1K.
85-
*/
86-
void *
87-
arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
88-
gfp_t gfp, unsigned long attrs)
68+
void *arch_dma_set_uncached(void *cpu_addr, size_t size)
8969
{
90-
unsigned long va;
91-
void *page;
92-
93-
page = alloc_pages_exact(size, gfp | __GFP_ZERO);
94-
if (!page)
95-
return NULL;
96-
97-
/* This gives us the real physical address of the first page. */
98-
*dma_handle = __pa(page);
99-
100-
va = (unsigned long)page;
70+
unsigned long va = (unsigned long)cpu_addr;
71+
int error;
10172

10273
/*
10374
* We need to iterate through the pages, clearing the dcache for
10475
* them and setting the cache-inhibit bit.
10576
*/
106-
if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
107-
NULL)) {
108-
free_pages_exact(page, size);
109-
return NULL;
110-
}
111-
112-
return (void *)va;
77+
error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
78+
NULL);
79+
if (error)
80+
return ERR_PTR(error);
81+
return cpu_addr;
11382
}
11483

115-
void
116-
arch_dma_free(struct device *dev, size_t size, void *vaddr,
117-
dma_addr_t dma_handle, unsigned long attrs)
84+
void arch_dma_clear_uncached(void *cpu_addr, size_t size)
11885
{
119-
unsigned long va = (unsigned long)vaddr;
86+
unsigned long va = (unsigned long)cpu_addr;
12087

12188
/* walk_page_range shouldn't be able to fail here */
12289
WARN_ON(walk_page_range(&init_mm, va, va + size,
12390
&clear_nocache_walk_ops, NULL));
124-
125-
free_pages_exact(vaddr, size);
12691
}
12792

12893
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,

0 commit comments

Comments
 (0)