Skip to content

Commit 15a9f67

Browse files
committed
RDMA/umem: Separate implicit ODP initialization from explicit ODP
Create separate functions for the implicit ODP initialization which is different from the explicit ODP initialization. Tested-by: Jens Axboe <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]>
1 parent 1efe8c0 commit 15a9f67

File tree

1 file changed

+46
-45
lines changed

1 file changed

+46
-45
lines changed

drivers/infiniband/core/umem_odp.c

Lines changed: 46 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -48,41 +48,44 @@
4848

4949
#include "uverbs.h"
5050

51-
static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
52-
const struct mmu_interval_notifier_ops *ops)
51+
static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp)
52+
{
53+
umem_odp->is_implicit_odp = 1;
54+
umem_odp->umem.is_odp = 1;
55+
mutex_init(&umem_odp->umem_mutex);
56+
}
57+
58+
static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
59+
const struct mmu_interval_notifier_ops *ops)
5360
{
5461
struct ib_device *dev = umem_odp->umem.ibdev;
62+
size_t page_size = 1UL << umem_odp->page_shift;
63+
unsigned long start;
64+
unsigned long end;
5565
int ret;
5666

5767
umem_odp->umem.is_odp = 1;
5868
mutex_init(&umem_odp->umem_mutex);
5969

60-
if (!umem_odp->is_implicit_odp) {
61-
size_t page_size = 1UL << umem_odp->page_shift;
62-
unsigned long start;
63-
unsigned long end;
64-
65-
start = ALIGN_DOWN(umem_odp->umem.address, page_size);
66-
if (check_add_overflow(umem_odp->umem.address,
67-
(unsigned long)umem_odp->umem.length,
68-
&end))
69-
return -EOVERFLOW;
70-
end = ALIGN(end, page_size);
71-
if (unlikely(end < page_size))
72-
return -EOVERFLOW;
73-
74-
ret = hmm_dma_map_alloc(dev->dma_device, &umem_odp->map,
75-
(end - start) >> PAGE_SHIFT,
76-
1 << umem_odp->page_shift);
77-
if (ret)
78-
return ret;
79-
80-
ret = mmu_interval_notifier_insert(&umem_odp->notifier,
81-
umem_odp->umem.owning_mm,
82-
start, end - start, ops);
83-
if (ret)
84-
goto out_free_map;
85-
}
70+
start = ALIGN_DOWN(umem_odp->umem.address, page_size);
71+
if (check_add_overflow(umem_odp->umem.address,
72+
(unsigned long)umem_odp->umem.length, &end))
73+
return -EOVERFLOW;
74+
end = ALIGN(end, page_size);
75+
if (unlikely(end < page_size))
76+
return -EOVERFLOW;
77+
78+
ret = hmm_dma_map_alloc(dev->dma_device, &umem_odp->map,
79+
(end - start) >> PAGE_SHIFT,
80+
1 << umem_odp->page_shift);
81+
if (ret)
82+
return ret;
83+
84+
ret = mmu_interval_notifier_insert(&umem_odp->notifier,
85+
umem_odp->umem.owning_mm, start,
86+
end - start, ops);
87+
if (ret)
88+
goto out_free_map;
8689

8790
return 0;
8891

@@ -106,7 +109,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
106109
{
107110
struct ib_umem *umem;
108111
struct ib_umem_odp *umem_odp;
109-
int ret;
110112

111113
if (access & IB_ACCESS_HUGETLB)
112114
return ERR_PTR(-EINVAL);
@@ -118,16 +120,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
118120
umem->ibdev = device;
119121
umem->writable = ib_access_writable(access);
120122
umem->owning_mm = current->mm;
121-
umem_odp->is_implicit_odp = 1;
122123
umem_odp->page_shift = PAGE_SHIFT;
123124

124125
umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
125-
ret = ib_init_umem_odp(umem_odp, NULL);
126-
if (ret) {
127-
put_pid(umem_odp->tgid);
128-
kfree(umem_odp);
129-
return ERR_PTR(ret);
130-
}
126+
ib_init_umem_implicit_odp(umem_odp);
131127
return umem_odp;
132128
}
133129
EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
@@ -248,7 +244,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
248244
}
249245
EXPORT_SYMBOL(ib_umem_odp_get);
250246

251-
void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
247+
static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
252248
{
253249
struct ib_device *dev = umem_odp->umem.ibdev;
254250

@@ -258,14 +254,19 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
258254
* It is the driver's responsibility to ensure, before calling us,
259255
* that the hardware will not attempt to access the MR any more.
260256
*/
261-
if (!umem_odp->is_implicit_odp) {
262-
mutex_lock(&umem_odp->umem_mutex);
263-
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
264-
ib_umem_end(umem_odp));
265-
mutex_unlock(&umem_odp->umem_mutex);
266-
mmu_interval_notifier_remove(&umem_odp->notifier);
267-
hmm_dma_map_free(dev->dma_device, &umem_odp->map);
268-
}
257+
mutex_lock(&umem_odp->umem_mutex);
258+
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
259+
ib_umem_end(umem_odp));
260+
mutex_unlock(&umem_odp->umem_mutex);
261+
mmu_interval_notifier_remove(&umem_odp->notifier);
262+
hmm_dma_map_free(dev->dma_device, &umem_odp->map);
263+
}
264+
265+
void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
266+
{
267+
if (!umem_odp->is_implicit_odp)
268+
ib_umem_odp_free(umem_odp);
269+
269270
put_pid(umem_odp->tgid);
270271
kfree(umem_odp);
271272
}

0 commit comments

Comments
 (0)