@@ -31,12 +31,12 @@ static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
31
31
32
32
static inline dma_addr_t grant_to_dma (grant_ref_t grant )
33
33
{
34
- return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t )grant << PAGE_SHIFT );
34
+ return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t )grant << XEN_PAGE_SHIFT );
35
35
}
36
36
37
37
static inline grant_ref_t dma_to_grant (dma_addr_t dma )
38
38
{
39
- return (grant_ref_t )((dma & ~XEN_GRANT_DMA_ADDR_OFF ) >> PAGE_SHIFT );
39
+ return (grant_ref_t )((dma & ~XEN_GRANT_DMA_ADDR_OFF ) >> XEN_PAGE_SHIFT );
40
40
}
41
41
42
42
static struct xen_grant_dma_data * find_xen_grant_dma_data (struct device * dev )
@@ -79,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
79
79
unsigned long attrs )
80
80
{
81
81
struct xen_grant_dma_data * data ;
82
- unsigned int i , n_pages = PFN_UP (size );
82
+ unsigned int i , n_pages = XEN_PFN_UP (size );
83
83
unsigned long pfn ;
84
84
grant_ref_t grant ;
85
85
void * ret ;
@@ -91,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
91
91
if (unlikely (data -> broken ))
92
92
return NULL ;
93
93
94
- ret = alloc_pages_exact (n_pages * PAGE_SIZE , gfp );
94
+ ret = alloc_pages_exact (n_pages * XEN_PAGE_SIZE , gfp );
95
95
if (!ret )
96
96
return NULL ;
97
97
98
98
pfn = virt_to_pfn (ret );
99
99
100
100
if (gnttab_alloc_grant_reference_seq (n_pages , & grant )) {
101
- free_pages_exact (ret , n_pages * PAGE_SIZE );
101
+ free_pages_exact (ret , n_pages * XEN_PAGE_SIZE );
102
102
return NULL ;
103
103
}
104
104
@@ -116,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
116
116
dma_addr_t dma_handle , unsigned long attrs )
117
117
{
118
118
struct xen_grant_dma_data * data ;
119
- unsigned int i , n_pages = PFN_UP (size );
119
+ unsigned int i , n_pages = XEN_PFN_UP (size );
120
120
grant_ref_t grant ;
121
121
122
122
data = find_xen_grant_dma_data (dev );
@@ -138,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
138
138
139
139
gnttab_free_grant_reference_seq (grant , n_pages );
140
140
141
- free_pages_exact (vaddr , n_pages * PAGE_SIZE );
141
+ free_pages_exact (vaddr , n_pages * XEN_PAGE_SIZE );
142
142
}
143
143
144
144
static struct page * xen_grant_dma_alloc_pages (struct device * dev , size_t size ,
@@ -168,7 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
168
168
unsigned long attrs )
169
169
{
170
170
struct xen_grant_dma_data * data ;
171
- unsigned int i , n_pages = PFN_UP (offset + size );
171
+ unsigned long dma_offset = xen_offset_in_page (offset ),
172
+ pfn_offset = XEN_PFN_DOWN (offset );
173
+ unsigned int i , n_pages = XEN_PFN_UP (dma_offset + size );
172
174
grant_ref_t grant ;
173
175
dma_addr_t dma_handle ;
174
176
@@ -187,10 +189,11 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
187
189
188
190
for (i = 0 ; i < n_pages ; i ++ ) {
189
191
gnttab_grant_foreign_access_ref (grant + i , data -> backend_domid ,
190
- xen_page_to_gfn (page ) + i , dir == DMA_TO_DEVICE );
192
+ pfn_to_gfn (page_to_xen_pfn (page ) + i + pfn_offset ),
193
+ dir == DMA_TO_DEVICE );
191
194
}
192
195
193
- dma_handle = grant_to_dma (grant ) + offset ;
196
+ dma_handle = grant_to_dma (grant ) + dma_offset ;
194
197
195
198
return dma_handle ;
196
199
}
@@ -200,8 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
200
203
unsigned long attrs )
201
204
{
202
205
struct xen_grant_dma_data * data ;
203
- unsigned long offset = dma_handle & ( PAGE_SIZE - 1 );
204
- unsigned int i , n_pages = PFN_UP ( offset + size );
206
+ unsigned long dma_offset = xen_offset_in_page ( dma_handle );
207
+ unsigned int i , n_pages = XEN_PFN_UP ( dma_offset + size );
205
208
grant_ref_t grant ;
206
209
207
210
if (WARN_ON (dir == DMA_NONE ))
0 commit comments