Skip to content

Commit ab24864

Browse files
Bao D. Nguyenmartinkpetersen
authored andcommitted
scsi: ufs: core: Add error handling for MCQ mode
Add support for error handling for MCQ mode. Suggested-by: Can Guo <[email protected]> Co-developed-by: Stanley Chu <[email protected]> Signed-off-by: Stanley Chu <[email protected]> Signed-off-by: Bao D. Nguyen <[email protected]> Link: https://lore.kernel.org/r/f0d923ee1f009f171a55c258d044e814ec0917ab.1685396241.git.quic_nguyenb@quicinc.com Reviewed-by: Stanley Chu <[email protected]> Tested-by: Stanley Chu <[email protected]> Reviewed-by: Can Guo <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
1 parent 57d6ef4 commit ab24864

File tree

3 files changed

+139
-18
lines changed

3 files changed

+139
-18
lines changed

drivers/ufs/core/ufs-mcq.c

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -276,12 +276,34 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
276276
}
277277

278278
static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
279-
struct ufs_hw_queue *hwq)
279+
struct ufs_hw_queue *hwq)
280280
{
281281
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
282282
int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
283283

284-
ufshcd_compl_one_cqe(hba, tag, cqe);
284+
if (cqe->command_desc_base_addr) {
285+
ufshcd_compl_one_cqe(hba, tag, cqe);
286+
/* After processed the cqe, mark it empty (invalid) entry */
287+
cqe->command_desc_base_addr = 0;
288+
}
289+
}
290+
291+
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
292+
struct ufs_hw_queue *hwq)
293+
{
294+
unsigned long flags;
295+
u32 entries = hwq->max_entries;
296+
297+
spin_lock_irqsave(&hwq->cq_lock, flags);
298+
while (entries > 0) {
299+
ufshcd_mcq_process_cqe(hba, hwq);
300+
ufshcd_mcq_inc_cq_head_slot(hwq);
301+
entries--;
302+
}
303+
304+
ufshcd_mcq_update_cq_tail_slot(hwq);
305+
hwq->cq_head_slot = hwq->cq_tail_slot;
306+
spin_unlock_irqrestore(&hwq->cq_lock, flags);
285307
}
286308

287309
static unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,

drivers/ufs/core/ufshcd-priv.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,8 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
7575
struct request *req);
7676
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
7777
struct ufs_hw_queue *hwq);
78-
78+
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
79+
struct ufs_hw_queue *hwq);
7980
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
8081
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
8182
int ufshcd_mcq_abort(struct scsi_cmnd *cmd);

drivers/ufs/core/ufshcd.c

Lines changed: 113 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3141,6 +3141,15 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
31413141
err = -ETIMEDOUT;
31423142
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
31433143
__func__, lrbp->task_tag);
3144+
3145+
/* MCQ mode */
3146+
if (is_mcq_enabled(hba)) {
3147+
err = ufshcd_clear_cmd(hba, lrbp->task_tag);
3148+
hba->dev_cmd.complete = NULL;
3149+
return err;
3150+
}
3151+
3152+
/* SDB mode */
31443153
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
31453154
/* successfully cleared the command, retry if needed */
31463155
err = -EAGAIN;
@@ -5564,6 +5573,57 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
55645573
return completed_reqs != 0;
55655574
}
55665575

5576+
/**
5577+
* ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5578+
* invoked from the error handler context or ufshcd_host_reset_and_restore()
5579+
* to complete the pending transfers and free the resources associated with
5580+
* the scsi command.
5581+
*
5582+
* @hba: per adapter instance
5583+
* @force_compl: This flag is set to true when invoked
5584+
* from ufshcd_host_reset_and_restore() in which case it requires special
5585+
* handling because the host controller has been reset by ufshcd_hba_stop().
5586+
*/
5587+
static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
5588+
bool force_compl)
5589+
{
5590+
struct ufs_hw_queue *hwq;
5591+
struct ufshcd_lrb *lrbp;
5592+
struct scsi_cmnd *cmd;
5593+
unsigned long flags;
5594+
u32 hwq_num, utag;
5595+
int tag;
5596+
5597+
for (tag = 0; tag < hba->nutrs; tag++) {
5598+
lrbp = &hba->lrb[tag];
5599+
cmd = lrbp->cmd;
5600+
if (!ufshcd_cmd_inflight(cmd) ||
5601+
test_bit(SCMD_STATE_COMPLETE, &cmd->state))
5602+
continue;
5603+
5604+
utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
5605+
hwq_num = blk_mq_unique_tag_to_hwq(utag);
5606+
hwq = &hba->uhq[hwq_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
5607+
5608+
if (force_compl) {
5609+
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
5610+
/*
5611+
* For those cmds of which the cqes are not present
5612+
* in the cq, complete them explicitly.
5613+
*/
5614+
if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
5615+
spin_lock_irqsave(&hwq->cq_lock, flags);
5616+
set_host_byte(cmd, DID_REQUEUE);
5617+
ufshcd_release_scsi_cmd(hba, lrbp);
5618+
scsi_done(cmd);
5619+
spin_unlock_irqrestore(&hwq->cq_lock, flags);
5620+
}
5621+
} else {
5622+
ufshcd_mcq_poll_cqe_lock(hba, hwq);
5623+
}
5624+
}
5625+
}
5626+
55675627
/**
55685628
* ufshcd_transfer_req_compl - handle SCSI and query command completion
55695629
* @hba: per adapter instance
@@ -6128,9 +6188,13 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
61286188
}
61296189

61306190
/* Complete requests that have door-bell cleared */
6131-
static void ufshcd_complete_requests(struct ufs_hba *hba)
6191+
static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
61326192
{
6133-
ufshcd_transfer_req_compl(hba);
6193+
if (is_mcq_enabled(hba))
6194+
ufshcd_mcq_compl_pending_transfer(hba, force_compl);
6195+
else
6196+
ufshcd_transfer_req_compl(hba);
6197+
61346198
ufshcd_tmc_handler(hba);
61356199
}
61366200

@@ -6371,18 +6435,36 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
63716435
bool needs_reset = false;
63726436
int tag, ret;
63736437

6374-
/* Clear pending transfer requests */
6375-
for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6376-
ret = ufshcd_try_to_abort_task(hba, tag);
6377-
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6378-
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6379-
ret ? "failed" : "succeeded");
6380-
if (ret) {
6381-
needs_reset = true;
6382-
goto out;
6438+
if (is_mcq_enabled(hba)) {
6439+
struct ufshcd_lrb *lrbp;
6440+
int tag;
6441+
6442+
for (tag = 0; tag < hba->nutrs; tag++) {
6443+
lrbp = &hba->lrb[tag];
6444+
if (!ufshcd_cmd_inflight(lrbp->cmd))
6445+
continue;
6446+
ret = ufshcd_try_to_abort_task(hba, tag);
6447+
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6448+
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6449+
ret ? "failed" : "succeeded");
6450+
if (ret) {
6451+
needs_reset = true;
6452+
goto out;
6453+
}
6454+
}
6455+
} else {
6456+
/* Clear pending transfer requests */
6457+
for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6458+
ret = ufshcd_try_to_abort_task(hba, tag);
6459+
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6460+
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6461+
ret ? "failed" : "succeeded");
6462+
if (ret) {
6463+
needs_reset = true;
6464+
goto out;
6465+
}
63836466
}
63846467
}
6385-
63866468
/* Clear pending task management requests */
63876469
for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
63886470
if (ufshcd_clear_tm_cmd(hba, tag)) {
@@ -6393,7 +6475,7 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
63936475

63946476
out:
63956477
/* Complete the requests that are cleared by s/w */
6396-
ufshcd_complete_requests(hba);
6478+
ufshcd_complete_requests(hba, false);
63976479

63986480
return needs_reset;
63996481
}
@@ -6433,7 +6515,7 @@ static void ufshcd_err_handler(struct work_struct *work)
64336515
spin_unlock_irqrestore(hba->host->host_lock, flags);
64346516
ufshcd_err_handling_prepare(hba);
64356517
/* Complete requests that have door-bell cleared by h/w */
6436-
ufshcd_complete_requests(hba);
6518+
ufshcd_complete_requests(hba, false);
64376519
spin_lock_irqsave(hba->host->host_lock, flags);
64386520
again:
64396521
needs_restore = false;
@@ -7314,6 +7396,8 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
73147396
unsigned long flags, pending_reqs = 0, not_cleared = 0;
73157397
struct Scsi_Host *host;
73167398
struct ufs_hba *hba;
7399+
struct ufs_hw_queue *hwq;
7400+
struct ufshcd_lrb *lrbp;
73177401
u32 pos, not_cleared_mask = 0;
73187402
int err;
73197403
u8 resp = 0xF, lun;
@@ -7329,6 +7413,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
73297413
goto out;
73307414
}
73317415

7416+
if (is_mcq_enabled(hba)) {
7417+
for (pos = 0; pos < hba->nutrs; pos++) {
7418+
lrbp = &hba->lrb[pos];
7419+
if (ufshcd_cmd_inflight(lrbp->cmd) &&
7420+
lrbp->lun == lun) {
7421+
ufshcd_clear_cmd(hba, pos);
7422+
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
7423+
ufshcd_mcq_poll_cqe_lock(hba, hwq);
7424+
}
7425+
}
7426+
err = 0;
7427+
goto out;
7428+
}
7429+
73327430
/* clear the commands that were pending for corresponding LUN */
73337431
spin_lock_irqsave(&hba->outstanding_lock, flags);
73347432
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
@@ -7612,7 +7710,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
76127710
ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
76137711
ufshcd_hba_stop(hba);
76147712
hba->silence_err_logs = true;
7615-
ufshcd_complete_requests(hba);
7713+
ufshcd_complete_requests(hba, true);
76167714
hba->silence_err_logs = false;
76177715

76187716
/* scale up clocks to max frequency before full reinitialization */

0 commit comments

Comments
 (0)