|
11 | 11 | * Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
|
12 | 12 | *
|
13 | 13 | * DMA mapping callbacks...
|
14 |
| - * As alloc_coherent is the only DMA callback being used currently, that's |
15 |
| - * the only thing implemented properly. The rest need looking into... |
16 | 14 | */
|
17 | 15 |
|
18 | 16 | #include <linux/dma-noncoherent.h>
|
@@ -67,62 +65,29 @@ static const struct mm_walk_ops clear_nocache_walk_ops = {
|
67 | 65 | .pte_entry = page_clear_nocache,
|
68 | 66 | };
|
69 | 67 |
|
70 |
| -/* |
71 |
| - * Alloc "coherent" memory, which for OpenRISC means simply uncached. |
72 |
| - * |
73 |
| - * This function effectively just calls __get_free_pages, sets the |
74 |
| - * cache-inhibit bit on those pages, and makes sure that the pages are |
75 |
| - * flushed out of the cache before they are used. |
76 |
| - * |
77 |
| - * If the NON_CONSISTENT attribute is set, then this function just |
78 |
| - * returns "normal", cachable memory. |
79 |
| - * |
80 |
| - * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take |
81 |
| - * into consideration here, too. All current known implementations of |
82 |
| - * the OR1K support only strongly ordered memory accesses, so that flag |
83 |
| - * is being ignored for now; uncached but write-combined memory is a |
84 |
| - * missing feature of the OR1K. |
85 |
| - */ |
86 |
| -void * |
87 |
| -arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
88 |
| - gfp_t gfp, unsigned long attrs) |
| 68 | +void *arch_dma_set_uncached(void *cpu_addr, size_t size) |
89 | 69 | {
|
90 |
| - unsigned long va; |
91 |
| - void *page; |
92 |
| - |
93 |
| - page = alloc_pages_exact(size, gfp | __GFP_ZERO); |
94 |
| - if (!page) |
95 |
| - return NULL; |
96 |
| - |
97 |
| - /* This gives us the real physical address of the first page. */ |
98 |
| - *dma_handle = __pa(page); |
99 |
| - |
100 |
| - va = (unsigned long)page; |
| 70 | + unsigned long va = (unsigned long)cpu_addr; |
| 71 | + int error; |
101 | 72 |
|
102 | 73 | /*
|
103 | 74 | * We need to iterate through the pages, clearing the dcache for
|
104 | 75 | * them and setting the cache-inhibit bit.
|
105 | 76 | */
|
106 |
| - if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, |
107 |
| - NULL)) { |
108 |
| - free_pages_exact(page, size); |
109 |
| - return NULL; |
110 |
| - } |
111 |
| - |
112 |
| - return (void *)va; |
| 77 | + error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, |
| 78 | + NULL); |
| 79 | + if (error) |
| 80 | + return ERR_PTR(error); |
| 81 | + return cpu_addr; |
113 | 82 | }
|
114 | 83 |
|
115 |
| -void |
116 |
| -arch_dma_free(struct device *dev, size_t size, void *vaddr, |
117 |
| - dma_addr_t dma_handle, unsigned long attrs) |
| 84 | +void arch_dma_clear_uncached(void *cpu_addr, size_t size) |
118 | 85 | {
|
119 |
| - unsigned long va = (unsigned long)vaddr; |
| 86 | + unsigned long va = (unsigned long)cpu_addr; |
120 | 87 |
|
121 | 88 | /* walk_page_range shouldn't be able to fail here */
|
122 | 89 | WARN_ON(walk_page_range(&init_mm, va, va + size,
|
123 | 90 | &clear_nocache_walk_ops, NULL));
|
124 |
| - |
125 |
| - free_pages_exact(vaddr, size); |
126 | 91 | }
|
127 | 92 |
|
128 | 93 | void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
|
|
0 commit comments