@@ -123,53 +123,91 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
123
123
return 0 ;
124
124
}
125
125
126
+ static void qla_nvme_release_fcp_cmd_kref (struct kref * kref )
127
+ {
128
+ struct srb * sp = container_of (kref , struct srb , cmd_kref );
129
+ struct nvme_private * priv = (struct nvme_private * )sp -> priv ;
130
+ struct nvmefc_fcp_req * fd ;
131
+ struct srb_iocb * nvme ;
132
+ unsigned long flags ;
133
+
134
+ if (!priv )
135
+ goto out ;
136
+
137
+ nvme = & sp -> u .iocb_cmd ;
138
+ fd = nvme -> u .nvme .desc ;
139
+
140
+ spin_lock_irqsave (& priv -> cmd_lock , flags );
141
+ priv -> sp = NULL ;
142
+ sp -> priv = NULL ;
143
+ if (priv -> comp_status == QLA_SUCCESS ) {
144
+ fd -> rcv_rsplen = nvme -> u .nvme .rsp_pyld_len ;
145
+ } else {
146
+ fd -> rcv_rsplen = 0 ;
147
+ fd -> transferred_length = 0 ;
148
+ }
149
+ fd -> status = 0 ;
150
+ spin_unlock_irqrestore (& priv -> cmd_lock , flags );
151
+
152
+ fd -> done (fd );
153
+ out :
154
+ qla2xxx_rel_qpair_sp (sp -> qpair , sp );
155
+ }
156
+
157
+ static void qla_nvme_release_ls_cmd_kref (struct kref * kref )
158
+ {
159
+ struct srb * sp = container_of (kref , struct srb , cmd_kref );
160
+ struct nvme_private * priv = (struct nvme_private * )sp -> priv ;
161
+ struct nvmefc_ls_req * fd ;
162
+ unsigned long flags ;
163
+
164
+ if (!priv )
165
+ goto out ;
166
+
167
+ spin_lock_irqsave (& priv -> cmd_lock , flags );
168
+ priv -> sp = NULL ;
169
+ sp -> priv = NULL ;
170
+ spin_unlock_irqrestore (& priv -> cmd_lock , flags );
171
+
172
+ fd = priv -> fd ;
173
+ fd -> done (fd , priv -> comp_status );
174
+ out :
175
+ qla2x00_rel_sp (sp );
176
+ }
177
+
178
+ static void qla_nvme_ls_complete (struct work_struct * work )
179
+ {
180
+ struct nvme_private * priv =
181
+ container_of (work , struct nvme_private , ls_work );
182
+
183
+ kref_put (& priv -> sp -> cmd_kref , qla_nvme_release_ls_cmd_kref );
184
+ }
185
+
126
186
static void qla_nvme_sp_ls_done (void * ptr , int res )
127
187
{
128
188
srb_t * sp = ptr ;
129
- struct srb_iocb * nvme ;
130
- struct nvmefc_ls_req * fd ;
131
189
struct nvme_private * priv ;
132
190
133
- if (WARN_ON_ONCE (atomic_read (& sp -> ref_count ) == 0 ))
191
+ if (WARN_ON_ONCE (kref_read (& sp -> cmd_kref ) == 0 ))
134
192
return ;
135
193
136
- atomic_dec (& sp -> ref_count );
137
-
138
194
if (res )
139
195
res = - EINVAL ;
140
196
141
- nvme = & sp -> u .iocb_cmd ;
142
- fd = nvme -> u .nvme .desc ;
143
- priv = fd -> private ;
197
+ priv = (struct nvme_private * )sp -> priv ;
144
198
priv -> comp_status = res ;
199
+ INIT_WORK (& priv -> ls_work , qla_nvme_ls_complete );
145
200
schedule_work (& priv -> ls_work );
146
- /* work schedule doesn't need the sp */
147
- qla2x00_rel_sp (sp );
148
201
}
149
202
203
+ /* it assumed that QPair lock is held. */
150
204
static void qla_nvme_sp_done (void * ptr , int res )
151
205
{
152
206
srb_t * sp = ptr ;
153
- struct srb_iocb * nvme ;
154
- struct nvmefc_fcp_req * fd ;
207
+ struct nvme_private * priv = (struct nvme_private * )sp -> priv ;
155
208
156
- nvme = & sp -> u .iocb_cmd ;
157
- fd = nvme -> u .nvme .desc ;
158
-
159
- if (WARN_ON_ONCE (atomic_read (& sp -> ref_count ) == 0 ))
160
- return ;
161
-
162
- atomic_dec (& sp -> ref_count );
163
-
164
- if (res == QLA_SUCCESS ) {
165
- fd -> rcv_rsplen = nvme -> u .nvme .rsp_pyld_len ;
166
- } else {
167
- fd -> rcv_rsplen = 0 ;
168
- fd -> transferred_length = 0 ;
169
- }
170
- fd -> status = 0 ;
171
- fd -> done (fd );
172
- qla2xxx_rel_qpair_sp (sp -> qpair , sp );
209
+ priv -> comp_status = res ;
210
+ kref_put (& sp -> cmd_kref , qla_nvme_release_fcp_cmd_kref );
173
211
174
212
return ;
175
213
}
@@ -188,44 +226,50 @@ static void qla_nvme_abort_work(struct work_struct *work)
188
226
__func__ , sp , sp -> handle , fcport , fcport -> deleted );
189
227
190
228
if (!ha -> flags .fw_started && (fcport && fcport -> deleted ))
191
- return ;
229
+ goto out ;
192
230
193
231
if (ha -> flags .host_shutting_down ) {
194
232
ql_log (ql_log_info , sp -> fcport -> vha , 0xffff ,
195
233
"%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n" ,
196
234
__func__ , sp , sp -> type , atomic_read (& sp -> ref_count ));
197
235
sp -> done (sp , 0 );
198
- return ;
236
+ goto out ;
199
237
}
200
238
201
- if (WARN_ON_ONCE (atomic_read (& sp -> ref_count ) == 0 ))
202
- return ;
203
-
204
239
rval = ha -> isp_ops -> abort_command (sp );
205
240
206
241
ql_dbg (ql_dbg_io , fcport -> vha , 0x212b ,
207
242
"%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n" ,
208
243
__func__ , (rval != QLA_SUCCESS ) ? "Failed to abort" : "Aborted" ,
209
244
sp , sp -> handle , fcport , rval );
245
+
246
+ out :
247
+ /* kref_get was done before work was schedule. */
248
+ kref_put (& sp -> cmd_kref , sp -> put_fn );
210
249
}
211
250
212
251
static void qla_nvme_ls_abort (struct nvme_fc_local_port * lport ,
213
252
struct nvme_fc_remote_port * rport , struct nvmefc_ls_req * fd )
214
253
{
215
254
struct nvme_private * priv = fd -> private ;
255
+ unsigned long flags ;
256
+
257
+ spin_lock_irqsave (& priv -> cmd_lock , flags );
258
+ if (!priv -> sp ) {
259
+ spin_unlock_irqrestore (& priv -> cmd_lock , flags );
260
+ return ;
261
+ }
262
+
263
+ if (!kref_get_unless_zero (& priv -> sp -> cmd_kref )) {
264
+ spin_unlock_irqrestore (& priv -> cmd_lock , flags );
265
+ return ;
266
+ }
267
+ spin_unlock_irqrestore (& priv -> cmd_lock , flags );
216
268
217
269
INIT_WORK (& priv -> abort_work , qla_nvme_abort_work );
218
270
schedule_work (& priv -> abort_work );
219
271
}
220
272
221
- static void qla_nvme_ls_complete (struct work_struct * work )
222
- {
223
- struct nvme_private * priv =
224
- container_of (work , struct nvme_private , ls_work );
225
- struct nvmefc_ls_req * fd = priv -> fd ;
226
-
227
- fd -> done (fd , priv -> comp_status );
228
- }
229
273
230
274
static int qla_nvme_ls_req (struct nvme_fc_local_port * lport ,
231
275
struct nvme_fc_remote_port * rport , struct nvmefc_ls_req * fd )
@@ -257,11 +301,13 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
257
301
sp -> type = SRB_NVME_LS ;
258
302
sp -> name = "nvme_ls" ;
259
303
sp -> done = qla_nvme_sp_ls_done ;
260
- atomic_set ( & sp -> ref_count , 1 ) ;
261
- nvme = & sp -> u . iocb_cmd ;
304
+ sp -> put_fn = qla_nvme_release_ls_cmd_kref ;
305
+ sp -> priv = ( void * ) priv ;
262
306
priv -> sp = sp ;
307
+ kref_init (& sp -> cmd_kref );
308
+ spin_lock_init (& priv -> cmd_lock );
309
+ nvme = & sp -> u .iocb_cmd ;
263
310
priv -> fd = fd ;
264
- INIT_WORK (& priv -> ls_work , qla_nvme_ls_complete );
265
311
nvme -> u .nvme .desc = fd ;
266
312
nvme -> u .nvme .dir = 0 ;
267
313
nvme -> u .nvme .dl = 0 ;
@@ -278,9 +324,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
278
324
if (rval != QLA_SUCCESS ) {
279
325
ql_log (ql_log_warn , vha , 0x700e ,
280
326
"qla2x00_start_sp failed = %d\n" , rval );
281
- atomic_dec (& sp -> ref_count );
282
327
wake_up (& sp -> nvme_ls_waitq );
283
- sp -> free (sp );
328
+ sp -> priv = NULL ;
329
+ priv -> sp = NULL ;
330
+ qla2x00_rel_sp (sp );
284
331
return rval ;
285
332
}
286
333
@@ -292,6 +339,18 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
292
339
struct nvmefc_fcp_req * fd )
293
340
{
294
341
struct nvme_private * priv = fd -> private ;
342
+ unsigned long flags ;
343
+
344
+ spin_lock_irqsave (& priv -> cmd_lock , flags );
345
+ if (!priv -> sp ) {
346
+ spin_unlock_irqrestore (& priv -> cmd_lock , flags );
347
+ return ;
348
+ }
349
+ if (!kref_get_unless_zero (& priv -> sp -> cmd_kref )) {
350
+ spin_unlock_irqrestore (& priv -> cmd_lock , flags );
351
+ return ;
352
+ }
353
+ spin_unlock_irqrestore (& priv -> cmd_lock , flags );
295
354
296
355
INIT_WORK (& priv -> abort_work , qla_nvme_abort_work );
297
356
schedule_work (& priv -> abort_work );
@@ -515,12 +574,15 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
515
574
if (!sp )
516
575
return - EBUSY ;
517
576
518
- atomic_set (& sp -> ref_count , 1 );
519
577
init_waitqueue_head (& sp -> nvme_ls_waitq );
578
+ kref_init (& sp -> cmd_kref );
579
+ spin_lock_init (& priv -> cmd_lock );
580
+ sp -> priv = (void * )priv ;
520
581
priv -> sp = sp ;
521
582
sp -> type = SRB_NVME_CMD ;
522
583
sp -> name = "nvme_cmd" ;
523
584
sp -> done = qla_nvme_sp_done ;
585
+ sp -> put_fn = qla_nvme_release_fcp_cmd_kref ;
524
586
sp -> qpair = qpair ;
525
587
sp -> vha = vha ;
526
588
nvme = & sp -> u .iocb_cmd ;
@@ -530,9 +592,10 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
530
592
if (rval != QLA_SUCCESS ) {
531
593
ql_log (ql_log_warn , vha , 0x212d ,
532
594
"qla2x00_start_nvme_mq failed = %d\n" , rval );
533
- atomic_dec (& sp -> ref_count );
534
595
wake_up (& sp -> nvme_ls_waitq );
535
- sp -> free (sp );
596
+ sp -> priv = NULL ;
597
+ priv -> sp = NULL ;
598
+ qla2xxx_rel_qpair_sp (sp -> qpair , sp );
536
599
}
537
600
538
601
return rval ;
0 commit comments