Skip to content

Commit c5d68d2

Browse files
committed
svcrdma: Clean up allocation of svc_rdma_recv_ctxt
The physical device's favored NUMA node ID is available when allocating a recv_ctxt. Use that value instead of relying on the assumption that the memory allocation happens to be running on a node close to the device. This clean up eliminates the hack of destroying recv_ctxts that were not created by the receive CQ thread -- recv_ctxts are now always allocated on a "good" node. Signed-off-by: Chuck Lever <[email protected]>
1 parent fe2b401 commit c5d68d2

File tree

2 files changed

+7
-12
lines changed

2 files changed

+7
-12
lines changed

include/linux/sunrpc/svc_rdma.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,6 @@ struct svc_rdma_recv_ctxt {
135135
struct ib_sge rc_recv_sge;
136136
void *rc_recv_buf;
137137
struct xdr_stream rc_stream;
138-
bool rc_temp;
139138
u32 rc_byte_len;
140139
unsigned int rc_page_count;
141140
u32 rc_inv_rkey;

net/sunrpc/xprtrdma/svc_rdma_recvfrom.c

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -125,14 +125,15 @@ static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
125125
static struct svc_rdma_recv_ctxt *
126126
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
127127
{
128+
int node = ibdev_to_node(rdma->sc_cm_id->device);
128129
struct svc_rdma_recv_ctxt *ctxt;
129130
dma_addr_t addr;
130131
void *buffer;
131132

132-
ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
133+
ctxt = kmalloc_node(sizeof(*ctxt), GFP_KERNEL, node);
133134
if (!ctxt)
134135
goto fail0;
135-
buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
136+
buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
136137
if (!buffer)
137138
goto fail1;
138139
addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
@@ -155,7 +156,6 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
155156
ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
156157
ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
157158
ctxt->rc_recv_buf = buffer;
158-
ctxt->rc_temp = false;
159159
return ctxt;
160160

161161
fail2:
@@ -232,10 +232,7 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
232232
pcl_free(&ctxt->rc_write_pcl);
233233
pcl_free(&ctxt->rc_reply_pcl);
234234

235-
if (!ctxt->rc_temp)
236-
llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
237-
else
238-
svc_rdma_recv_ctxt_destroy(rdma, ctxt);
235+
llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
239236
}
240237

241238
/**
@@ -258,7 +255,7 @@ void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
258255
}
259256

260257
static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
261-
unsigned int wanted, bool temp)
258+
unsigned int wanted)
262259
{
263260
const struct ib_recv_wr *bad_wr = NULL;
264261
struct svc_rdma_recv_ctxt *ctxt;
@@ -275,7 +272,6 @@ static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
275272
break;
276273

277274
trace_svcrdma_post_recv(ctxt);
278-
ctxt->rc_temp = temp;
279275
ctxt->rc_recv_wr.next = recv_chain;
280276
recv_chain = &ctxt->rc_recv_wr;
281277
rdma->sc_pending_recvs++;
@@ -309,7 +305,7 @@ static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
309305
*/
310306
bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
311307
{
312-
return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true);
308+
return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests);
313309
}
314310

315311
/**
@@ -343,7 +339,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
343339
* client reconnects.
344340
*/
345341
if (rdma->sc_pending_recvs < rdma->sc_max_requests)
346-
if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch, false))
342+
if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch))
347343
goto dropped;
348344

349345
/* All wc fields are now known to be valid */

0 commit comments

Comments
 (0)