48
48
49
49
#include "uverbs.h"
50
50
51
- static inline int ib_init_umem_odp (struct ib_umem_odp * umem_odp ,
52
- const struct mmu_interval_notifier_ops * ops )
51
+ static void ib_init_umem_implicit_odp (struct ib_umem_odp * umem_odp )
52
+ {
53
+ umem_odp -> is_implicit_odp = 1 ;
54
+ umem_odp -> umem .is_odp = 1 ;
55
+ mutex_init (& umem_odp -> umem_mutex );
56
+ }
57
+
58
+ static int ib_init_umem_odp (struct ib_umem_odp * umem_odp ,
59
+ const struct mmu_interval_notifier_ops * ops )
53
60
{
54
61
struct ib_device * dev = umem_odp -> umem .ibdev ;
62
+ size_t page_size = 1UL << umem_odp -> page_shift ;
63
+ unsigned long start ;
64
+ unsigned long end ;
55
65
int ret ;
56
66
57
67
umem_odp -> umem .is_odp = 1 ;
58
68
mutex_init (& umem_odp -> umem_mutex );
59
69
60
- if (!umem_odp -> is_implicit_odp ) {
61
- size_t page_size = 1UL << umem_odp -> page_shift ;
62
- unsigned long start ;
63
- unsigned long end ;
64
-
65
- start = ALIGN_DOWN (umem_odp -> umem .address , page_size );
66
- if (check_add_overflow (umem_odp -> umem .address ,
67
- (unsigned long )umem_odp -> umem .length ,
68
- & end ))
69
- return - EOVERFLOW ;
70
- end = ALIGN (end , page_size );
71
- if (unlikely (end < page_size ))
72
- return - EOVERFLOW ;
73
-
74
- ret = hmm_dma_map_alloc (dev -> dma_device , & umem_odp -> map ,
75
- (end - start ) >> PAGE_SHIFT ,
76
- 1 << umem_odp -> page_shift );
77
- if (ret )
78
- return ret ;
79
-
80
- ret = mmu_interval_notifier_insert (& umem_odp -> notifier ,
81
- umem_odp -> umem .owning_mm ,
82
- start , end - start , ops );
83
- if (ret )
84
- goto out_free_map ;
85
- }
70
+ start = ALIGN_DOWN (umem_odp -> umem .address , page_size );
71
+ if (check_add_overflow (umem_odp -> umem .address ,
72
+ (unsigned long )umem_odp -> umem .length , & end ))
73
+ return - EOVERFLOW ;
74
+ end = ALIGN (end , page_size );
75
+ if (unlikely (end < page_size ))
76
+ return - EOVERFLOW ;
77
+
78
+ ret = hmm_dma_map_alloc (dev -> dma_device , & umem_odp -> map ,
79
+ (end - start ) >> PAGE_SHIFT ,
80
+ 1 << umem_odp -> page_shift );
81
+ if (ret )
82
+ return ret ;
83
+
84
+ ret = mmu_interval_notifier_insert (& umem_odp -> notifier ,
85
+ umem_odp -> umem .owning_mm , start ,
86
+ end - start , ops );
87
+ if (ret )
88
+ goto out_free_map ;
86
89
87
90
return 0 ;
88
91
@@ -106,7 +109,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
106
109
{
107
110
struct ib_umem * umem ;
108
111
struct ib_umem_odp * umem_odp ;
109
- int ret ;
110
112
111
113
if (access & IB_ACCESS_HUGETLB )
112
114
return ERR_PTR (- EINVAL );
@@ -118,16 +120,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
118
120
umem -> ibdev = device ;
119
121
umem -> writable = ib_access_writable (access );
120
122
umem -> owning_mm = current -> mm ;
121
- umem_odp -> is_implicit_odp = 1 ;
122
123
umem_odp -> page_shift = PAGE_SHIFT ;
123
124
124
125
umem_odp -> tgid = get_task_pid (current -> group_leader , PIDTYPE_PID );
125
- ret = ib_init_umem_odp (umem_odp , NULL );
126
- if (ret ) {
127
- put_pid (umem_odp -> tgid );
128
- kfree (umem_odp );
129
- return ERR_PTR (ret );
130
- }
126
+ ib_init_umem_implicit_odp (umem_odp );
131
127
return umem_odp ;
132
128
}
133
129
EXPORT_SYMBOL (ib_umem_odp_alloc_implicit );
@@ -248,7 +244,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
248
244
}
249
245
EXPORT_SYMBOL (ib_umem_odp_get );
250
246
251
- void ib_umem_odp_release (struct ib_umem_odp * umem_odp )
247
+ static void ib_umem_odp_free (struct ib_umem_odp * umem_odp )
252
248
{
253
249
struct ib_device * dev = umem_odp -> umem .ibdev ;
254
250
@@ -258,14 +254,19 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
258
254
* It is the driver's responsibility to ensure, before calling us,
259
255
* that the hardware will not attempt to access the MR any more.
260
256
*/
261
- if (!umem_odp -> is_implicit_odp ) {
262
- mutex_lock (& umem_odp -> umem_mutex );
263
- ib_umem_odp_unmap_dma_pages (umem_odp , ib_umem_start (umem_odp ),
264
- ib_umem_end (umem_odp ));
265
- mutex_unlock (& umem_odp -> umem_mutex );
266
- mmu_interval_notifier_remove (& umem_odp -> notifier );
267
- hmm_dma_map_free (dev -> dma_device , & umem_odp -> map );
268
- }
257
+ mutex_lock (& umem_odp -> umem_mutex );
258
+ ib_umem_odp_unmap_dma_pages (umem_odp , ib_umem_start (umem_odp ),
259
+ ib_umem_end (umem_odp ));
260
+ mutex_unlock (& umem_odp -> umem_mutex );
261
+ mmu_interval_notifier_remove (& umem_odp -> notifier );
262
+ hmm_dma_map_free (dev -> dma_device , & umem_odp -> map );
263
+ }
264
+
265
+ void ib_umem_odp_release (struct ib_umem_odp * umem_odp )
266
+ {
267
+ if (!umem_odp -> is_implicit_odp )
268
+ ib_umem_odp_free (umem_odp );
269
+
269
270
put_pid (umem_odp -> tgid );
270
271
kfree (umem_odp );
271
272
}
0 commit comments