39
39
40
40
#define NVMET_RDMA_BACKLOG 128
41
41
42
+ #define NVMET_RDMA_DISCRETE_RSP_TAG -1
43
+
42
44
struct nvmet_rdma_srq ;
43
45
44
46
struct nvmet_rdma_cmd {
@@ -75,7 +77,7 @@ struct nvmet_rdma_rsp {
75
77
u32 invalidate_rkey ;
76
78
77
79
struct list_head wait_list ;
78
- struct list_head free_list ;
80
+ int tag ;
79
81
};
80
82
81
83
enum nvmet_rdma_queue_state {
@@ -98,8 +100,7 @@ struct nvmet_rdma_queue {
98
100
struct nvmet_sq nvme_sq ;
99
101
100
102
struct nvmet_rdma_rsp * rsps ;
101
- struct list_head free_rsps ;
102
- spinlock_t rsps_lock ;
103
+ struct sbitmap rsp_tags ;
103
104
struct nvmet_rdma_cmd * cmds ;
104
105
105
106
struct work_struct release_work ;
@@ -172,7 +173,8 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
172
173
static void nvmet_rdma_free_rsp (struct nvmet_rdma_device * ndev ,
173
174
struct nvmet_rdma_rsp * r );
174
175
static int nvmet_rdma_alloc_rsp (struct nvmet_rdma_device * ndev ,
175
- struct nvmet_rdma_rsp * r );
176
+ struct nvmet_rdma_rsp * r ,
177
+ int tag );
176
178
177
179
static const struct nvmet_fabrics_ops nvmet_rdma_ops ;
178
180
@@ -210,29 +212,25 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
210
212
static inline struct nvmet_rdma_rsp *
211
213
nvmet_rdma_get_rsp (struct nvmet_rdma_queue * queue )
212
214
{
213
- struct nvmet_rdma_rsp * rsp ;
214
- unsigned long flags ;
215
+ struct nvmet_rdma_rsp * rsp = NULL ;
216
+ int tag ;
215
217
216
- spin_lock_irqsave (& queue -> rsps_lock , flags );
217
- rsp = list_first_entry_or_null (& queue -> free_rsps ,
218
- struct nvmet_rdma_rsp , free_list );
219
- if (likely (rsp ))
220
- list_del (& rsp -> free_list );
221
- spin_unlock_irqrestore (& queue -> rsps_lock , flags );
218
+ tag = sbitmap_get (& queue -> rsp_tags );
219
+ if (tag >= 0 )
220
+ rsp = & queue -> rsps [tag ];
222
221
223
222
if (unlikely (!rsp )) {
224
223
int ret ;
225
224
226
225
rsp = kzalloc (sizeof (* rsp ), GFP_KERNEL );
227
226
if (unlikely (!rsp ))
228
227
return NULL ;
229
- ret = nvmet_rdma_alloc_rsp (queue -> dev , rsp );
228
+ ret = nvmet_rdma_alloc_rsp (queue -> dev , rsp ,
229
+ NVMET_RDMA_DISCRETE_RSP_TAG );
230
230
if (unlikely (ret )) {
231
231
kfree (rsp );
232
232
return NULL ;
233
233
}
234
-
235
- rsp -> allocated = true;
236
234
}
237
235
238
236
return rsp ;
@@ -241,17 +239,13 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
241
239
static inline void
242
240
nvmet_rdma_put_rsp (struct nvmet_rdma_rsp * rsp )
243
241
{
244
- unsigned long flags ;
245
-
246
- if (unlikely (rsp -> allocated )) {
242
+ if (unlikely (rsp -> tag == NVMET_RDMA_DISCRETE_RSP_TAG )) {
247
243
nvmet_rdma_free_rsp (rsp -> queue -> dev , rsp );
248
244
kfree (rsp );
249
245
return ;
250
246
}
251
247
252
- spin_lock_irqsave (& rsp -> queue -> rsps_lock , flags );
253
- list_add_tail (& rsp -> free_list , & rsp -> queue -> free_rsps );
254
- spin_unlock_irqrestore (& rsp -> queue -> rsps_lock , flags );
248
+ sbitmap_clear_bit (& rsp -> queue -> rsp_tags , rsp -> tag );
255
249
}
256
250
257
251
static void nvmet_rdma_free_inline_pages (struct nvmet_rdma_device * ndev ,
@@ -404,7 +398,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
404
398
}
405
399
406
400
static int nvmet_rdma_alloc_rsp (struct nvmet_rdma_device * ndev ,
407
- struct nvmet_rdma_rsp * r )
401
+ struct nvmet_rdma_rsp * r , int tag )
408
402
{
409
403
/* NVMe CQE / RDMA SEND */
410
404
r -> req .cqe = kmalloc (sizeof (* r -> req .cqe ), GFP_KERNEL );
@@ -432,6 +426,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
432
426
r -> read_cqe .done = nvmet_rdma_read_data_done ;
433
427
/* Data Out / RDMA WRITE */
434
428
r -> write_cqe .done = nvmet_rdma_write_data_done ;
429
+ r -> tag = tag ;
435
430
436
431
return 0 ;
437
432
@@ -454,21 +449,23 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
454
449
{
455
450
struct nvmet_rdma_device * ndev = queue -> dev ;
456
451
int nr_rsps = queue -> recv_queue_size * 2 ;
457
- int ret = - EINVAL , i ;
452
+ int ret = - ENOMEM , i ;
453
+
454
+ if (sbitmap_init_node (& queue -> rsp_tags , nr_rsps , -1 , GFP_KERNEL ,
455
+ NUMA_NO_NODE , false, true))
456
+ goto out ;
458
457
459
458
queue -> rsps = kcalloc (nr_rsps , sizeof (struct nvmet_rdma_rsp ),
460
459
GFP_KERNEL );
461
460
if (!queue -> rsps )
462
- goto out ;
461
+ goto out_free_sbitmap ;
463
462
464
463
for (i = 0 ; i < nr_rsps ; i ++ ) {
465
464
struct nvmet_rdma_rsp * rsp = & queue -> rsps [i ];
466
465
467
- ret = nvmet_rdma_alloc_rsp (ndev , rsp );
466
+ ret = nvmet_rdma_alloc_rsp (ndev , rsp , i );
468
467
if (ret )
469
468
goto out_free ;
470
-
471
- list_add_tail (& rsp -> free_list , & queue -> free_rsps );
472
469
}
473
470
474
471
return 0 ;
@@ -477,6 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
477
474
while (-- i >= 0 )
478
475
nvmet_rdma_free_rsp (ndev , & queue -> rsps [i ]);
479
476
kfree (queue -> rsps );
477
+ out_free_sbitmap :
478
+ sbitmap_free (& queue -> rsp_tags );
480
479
out :
481
480
return ret ;
482
481
}
@@ -489,6 +488,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
489
488
for (i = 0 ; i < nr_rsps ; i ++ )
490
489
nvmet_rdma_free_rsp (ndev , & queue -> rsps [i ]);
491
490
kfree (queue -> rsps );
491
+ sbitmap_free (& queue -> rsp_tags );
492
492
}
493
493
494
494
static int nvmet_rdma_post_recv (struct nvmet_rdma_device * ndev ,
@@ -1447,8 +1447,6 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1447
1447
INIT_LIST_HEAD (& queue -> rsp_wait_list );
1448
1448
INIT_LIST_HEAD (& queue -> rsp_wr_wait_list );
1449
1449
spin_lock_init (& queue -> rsp_wr_wait_lock );
1450
- INIT_LIST_HEAD (& queue -> free_rsps );
1451
- spin_lock_init (& queue -> rsps_lock );
1452
1450
INIT_LIST_HEAD (& queue -> queue_list );
1453
1451
1454
1452
queue -> idx = ida_alloc (& nvmet_rdma_queue_ida , GFP_KERNEL );
0 commit comments