@@ -51,11 +51,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
5151 struct scatterlist * sg ;
5252 unsigned int i ;
5353
54- if (umem -> nmap > 0 )
55- ib_dma_unmap_sg (dev , umem -> sg_head . sgl , umem -> sg_nents ,
56- DMA_BIDIRECTIONAL );
54+ if (dirty )
55+ ib_dma_unmap_sgtable_attrs (dev , & umem -> sgt_append . sgt ,
56+ DMA_BIDIRECTIONAL , 0 );
5757
58- for_each_sg ( umem -> sg_head . sgl , sg , umem -> sg_nents , i )
58+ for_each_sgtable_sg ( & umem -> sgt_append . sgt , sg , i )
5959 unpin_user_page_range_dirty_lock (sg_page (sg ),
6060 DIV_ROUND_UP (sg -> length , PAGE_SIZE ), make_dirty );
6161
@@ -111,7 +111,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
111111 /* offset into first SGL */
112112 pgoff = umem -> address & ~PAGE_MASK ;
113113
114- for_each_sg ( umem -> sg_head . sgl , sg , umem -> nmap , i ) {
114+ for_each_sgtable_dma_sg ( & umem -> sgt_append . sgt , sg , i ) {
115115 /* Walk SGL and reduce max page size if VA/PA bits differ
116116 * for any address.
117117 */
@@ -121,7 +121,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
121121 * the maximum possible page size as the low bits of the iova
122122 * must be zero when starting the next chunk.
123123 */
124- if (i != (umem -> nmap - 1 ))
124+ if (i != (umem -> sgt_append . sgt . nents - 1 ))
125125 mask |= va ;
126126 pgoff = 0 ;
127127 }
@@ -231,30 +231,19 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
231231 & umem -> sgt_append , page_list , pinned , 0 ,
232232 pinned << PAGE_SHIFT , ib_dma_max_seg_size (device ),
233233 npages , GFP_KERNEL );
234- umem -> sg_nents = umem -> sgt_append .sgt .nents ;
235234 if (ret ) {
236- memcpy (& umem -> sg_head .sgl , & umem -> sgt_append .sgt ,
237- sizeof (umem -> sgt_append .sgt ));
238235 unpin_user_pages_dirty_lock (page_list , pinned , 0 );
239236 goto umem_release ;
240237 }
241238 }
242239
243- memcpy (& umem -> sg_head .sgl , & umem -> sgt_append .sgt ,
244- sizeof (umem -> sgt_append .sgt ));
245240 if (access & IB_ACCESS_RELAXED_ORDERING )
246241 dma_attr |= DMA_ATTR_WEAK_ORDERING ;
247242
248- umem -> nmap =
249- ib_dma_map_sg_attrs (device , umem -> sg_head .sgl , umem -> sg_nents ,
250- DMA_BIDIRECTIONAL , dma_attr );
251-
252- if (!umem -> nmap ) {
253- ret = - ENOMEM ;
243+ ret = ib_dma_map_sgtable_attrs (device , & umem -> sgt_append .sgt ,
244+ DMA_BIDIRECTIONAL , dma_attr );
245+ if (ret )
254246 goto umem_release ;
255- }
256-
257- ret = 0 ;
258247 goto out ;
259248
260249umem_release :
@@ -314,7 +303,8 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
314303 return - EINVAL ;
315304 }
316305
317- ret = sg_pcopy_to_buffer (umem -> sg_head .sgl , umem -> sg_nents , dst , length ,
306+ ret = sg_pcopy_to_buffer (umem -> sgt_append .sgt .sgl ,
307+ umem -> sgt_append .sgt .orig_nents , dst , length ,
318308 offset + ib_umem_offset (umem ));
319309
320310 if (ret < 0 )
0 commit comments