Skip to content

Commit 681467e

Browse files
authored
Merge pull request #1657 from TaranovK/kotaranov/memory_helper
providers/mana: use helpers to (de-)allocate memory
2 parents 4ebcec3 + 3cc7205 commit 681467e

File tree

5 files changed

+46
-30
lines changed

5 files changed

+46
-30
lines changed

providers/mana/cq.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ struct ibv_cq *mana_create_cq(struct ibv_context *context, int cqe,
100100
if (cq->buf_external)
101101
ctx->extern_alloc.free(cq->buf, ctx->extern_alloc.data);
102102
else
103-
munmap(cq->buf, cq_size);
103+
mana_dealloc_mem(cq->buf, cq_size);
104104
free_cq:
105105
free(cq);
106106
return NULL;
@@ -125,7 +125,7 @@ int mana_destroy_cq(struct ibv_cq *ibcq)
125125
if (cq->buf_external)
126126
ctx->extern_alloc.free(cq->buf, ctx->extern_alloc.data);
127127
else
128-
munmap(cq->buf, cq->cqe * COMP_ENTRY_SIZE);
128+
mana_dealloc_mem(cq->buf, cq->cqe * COMP_ENTRY_SIZE);
129129

130130
free(cq);
131131

providers/mana/mana.c

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ struct mana_context *to_mctx(struct ibv_context *ibctx)
3535
return container_of(ibctx, struct mana_context, ibv_ctx.context);
3636
}
3737

38-
void *mana_alloc_mem(uint32_t size)
38+
void *mana_alloc_mem(size_t size)
3939
{
4040
void *buf;
4141

@@ -44,7 +44,44 @@ void *mana_alloc_mem(uint32_t size)
4444

4545
if (buf == MAP_FAILED)
4646
return NULL;
47+
48+
if (ibv_dontfork_range(buf, size))
49+
goto unmap;
50+
4751
return buf;
52+
53+
unmap:
54+
munmap(buf, size);
55+
return NULL;
56+
}
57+
58+
void mana_dealloc_mem(void *buf, size_t size)
59+
{
60+
ibv_dofork_range(buf, size);
61+
munmap(buf, size);
62+
}
63+
64+
int create_shadow_queue(struct shadow_queue *queue, uint32_t length, uint32_t stride)
65+
{
66+
length = roundup_pow_of_two(length);
67+
stride = align(stride, 8);
68+
69+
queue->buffer = mana_alloc_mem(stride * length);
70+
if (!queue->buffer)
71+
return -1;
72+
73+
queue->length = length;
74+
queue->stride = stride;
75+
reset_shadow_queue(queue);
76+
return 0;
77+
}
78+
79+
void destroy_shadow_queue(struct shadow_queue *queue)
80+
{
81+
if (queue->buffer) {
82+
mana_dealloc_mem(queue->buffer, queue->stride * queue->length);
83+
queue->buffer = NULL;
84+
}
4885
}
4986

5087
int mana_query_device_ex(struct ibv_context *context,

providers/mana/mana.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,8 @@ struct mana_parent_domain {
182182

183183
struct mana_context *to_mctx(struct ibv_context *ibctx);
184184

185-
void *mana_alloc_mem(uint32_t size);
185+
void *mana_alloc_mem(size_t size);
186+
void mana_dealloc_mem(void *buf, size_t size);
186187

187188
int mana_query_device_ex(struct ibv_context *context,
188189
const struct ibv_query_device_ex_input *input,

providers/mana/qp.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -352,7 +352,7 @@ static struct ibv_qp *mana_create_qp_rc(struct ibv_pd *ibpd,
352352
mana_ib_deinit_rb_shmem(qp);
353353
destroy_queues:
354354
while (i-- > 0)
355-
munmap(qp->rc_qp.queues[i].buffer, qp->rc_qp.queues[i].size);
355+
mana_dealloc_mem(qp->rc_qp.queues[i].buffer, qp->rc_qp.queues[i].size);
356356
destroy_shadow_queue(&qp->shadow_rq);
357357
destroy_shadow_sq:
358358
destroy_shadow_queue(&qp->shadow_sq);
@@ -482,7 +482,7 @@ int mana_destroy_qp(struct ibv_qp *ibqp)
482482
destroy_shadow_queue(&qp->shadow_rq);
483483
mana_ib_deinit_rb_shmem(qp);
484484
for (i = 0; i < USER_RC_QUEUE_TYPE_MAX; ++i)
485-
munmap(qp->rc_qp.queues[i].buffer, qp->rc_qp.queues[i].size);
485+
mana_dealloc_mem(qp->rc_qp.queues[i].buffer, qp->rc_qp.queues[i].size);
486486
break;
487487
default:
488488
verbs_err(verbs_get_ctx(ibqp->context),

providers/mana/shadow_queue.h

Lines changed: 2 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -60,31 +60,9 @@ static inline void reset_shadow_queue(struct shadow_queue *queue)
6060
queue->next_to_signal_idx = 0;
6161
}
6262

63-
static inline int create_shadow_queue(struct shadow_queue *queue, uint32_t length, uint32_t stride)
64-
{
65-
length = roundup_pow_of_two(length);
66-
stride = align(stride, 8);
67-
68-
void *buffer = mmap(NULL, stride * length, PROT_READ | PROT_WRITE,
69-
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
70-
71-
if (buffer == MAP_FAILED)
72-
return -1;
73-
74-
queue->length = length;
75-
queue->stride = stride;
76-
reset_shadow_queue(queue);
77-
queue->buffer = buffer;
78-
return 0;
79-
}
63+
int create_shadow_queue(struct shadow_queue *queue, uint32_t length, uint32_t stride);
8064

81-
static inline void destroy_shadow_queue(struct shadow_queue *queue)
82-
{
83-
if (queue->buffer) {
84-
munmap(queue->buffer, queue->stride * queue->length);
85-
queue->buffer = NULL;
86-
}
87-
}
65+
void destroy_shadow_queue(struct shadow_queue *queue);
8866

8967
static inline _atomic_t *producer(struct shadow_queue *queue)
9068
{

0 commit comments

Comments
 (0)