@@ -31,12 +31,12 @@ static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
3131
3232static inline dma_addr_t grant_to_dma (grant_ref_t grant )
3333{
34- return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t )grant << PAGE_SHIFT );
34+ return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t )grant << XEN_PAGE_SHIFT );
3535}
3636
3737static inline grant_ref_t dma_to_grant (dma_addr_t dma )
3838{
39- return (grant_ref_t )((dma & ~XEN_GRANT_DMA_ADDR_OFF ) >> PAGE_SHIFT );
39+ return (grant_ref_t )((dma & ~XEN_GRANT_DMA_ADDR_OFF ) >> XEN_PAGE_SHIFT );
4040}
4141
4242static struct xen_grant_dma_data * find_xen_grant_dma_data (struct device * dev )
@@ -79,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
7979 unsigned long attrs )
8080{
8181 struct xen_grant_dma_data * data ;
82- unsigned int i , n_pages = PFN_UP (size );
82+ unsigned int i , n_pages = XEN_PFN_UP (size );
8383 unsigned long pfn ;
8484 grant_ref_t grant ;
8585 void * ret ;
@@ -91,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
9191 if (unlikely (data -> broken ))
9292 return NULL ;
9393
94- ret = alloc_pages_exact (n_pages * PAGE_SIZE , gfp );
94+ ret = alloc_pages_exact (n_pages * XEN_PAGE_SIZE , gfp );
9595 if (!ret )
9696 return NULL ;
9797
9898 pfn = virt_to_pfn (ret );
9999
100100 if (gnttab_alloc_grant_reference_seq (n_pages , & grant )) {
101- free_pages_exact (ret , n_pages * PAGE_SIZE );
101+ free_pages_exact (ret , n_pages * XEN_PAGE_SIZE );
102102 return NULL ;
103103 }
104104
@@ -116,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
116116 dma_addr_t dma_handle , unsigned long attrs )
117117{
118118 struct xen_grant_dma_data * data ;
119- unsigned int i , n_pages = PFN_UP (size );
119+ unsigned int i , n_pages = XEN_PFN_UP (size );
120120 grant_ref_t grant ;
121121
122122 data = find_xen_grant_dma_data (dev );
@@ -138,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
138138
139139 gnttab_free_grant_reference_seq (grant , n_pages );
140140
141- free_pages_exact (vaddr , n_pages * PAGE_SIZE );
141+ free_pages_exact (vaddr , n_pages * XEN_PAGE_SIZE );
142142}
143143
144144static struct page * xen_grant_dma_alloc_pages (struct device * dev , size_t size ,
@@ -168,7 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
168168 unsigned long attrs )
169169{
170170 struct xen_grant_dma_data * data ;
171- unsigned int i , n_pages = PFN_UP (offset + size );
171+ unsigned long dma_offset = xen_offset_in_page (offset ),
172+ pfn_offset = XEN_PFN_DOWN (offset );
173+ unsigned int i , n_pages = XEN_PFN_UP (dma_offset + size );
172174 grant_ref_t grant ;
173175 dma_addr_t dma_handle ;
174176
@@ -187,10 +189,11 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
187189
188190 for (i = 0 ; i < n_pages ; i ++ ) {
189191 gnttab_grant_foreign_access_ref (grant + i , data -> backend_domid ,
190- xen_page_to_gfn (page ) + i , dir == DMA_TO_DEVICE );
192+ pfn_to_gfn (page_to_xen_pfn (page ) + i + pfn_offset ),
193+ dir == DMA_TO_DEVICE );
191194 }
192195
193- dma_handle = grant_to_dma (grant ) + offset ;
196+ dma_handle = grant_to_dma (grant ) + dma_offset ;
194197
195198 return dma_handle ;
196199}
@@ -200,8 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
200203 unsigned long attrs )
201204{
202205 struct xen_grant_dma_data * data ;
203- unsigned long offset = dma_handle & ( PAGE_SIZE - 1 );
204- unsigned int i , n_pages = PFN_UP ( offset + size );
206+ unsigned long dma_offset = xen_offset_in_page ( dma_handle );
207+ unsigned int i , n_pages = XEN_PFN_UP ( dma_offset + size );
205208 grant_ref_t grant ;
206209
207210 if (WARN_ON (dir == DMA_NONE ))
0 commit comments