1212#include <linux/module.h>
1313#include <linux/platform_device.h>
1414#include "ufshcd-priv.h"
15+ #include <linux/delay.h>
16+ #include <scsi/scsi_cmnd.h>
17+ #include <linux/bitfield.h>
18+ #include <linux/iopoll.h>
1519
1620#define MAX_QUEUE_SUP GENMASK(7, 0)
1721#define UFS_MCQ_MIN_RW_QUEUES 2
2731#define MCQ_ENTRY_SIZE_IN_DWORD 8
2832#define CQE_UCD_BA GENMASK_ULL(63, 7)
2933
34+ /* Max mcq register polling time in microseconds */
35+ #define MCQ_POLL_US 500000
36+
3037static int rw_queue_count_set (const char * val , const struct kernel_param * kp )
3138{
3239 return param_set_uint_minmax (val , kp , UFS_MCQ_MIN_RW_QUEUES ,
@@ -269,16 +276,38 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
269276}
270277
271278static void ufshcd_mcq_process_cqe (struct ufs_hba * hba ,
272- struct ufs_hw_queue * hwq )
279+ struct ufs_hw_queue * hwq )
273280{
274281 struct cq_entry * cqe = ufshcd_mcq_cur_cqe (hwq );
275282 int tag = ufshcd_mcq_get_tag (hba , hwq , cqe );
276283
277- ufshcd_compl_one_cqe (hba , tag , cqe );
284+ if (cqe -> command_desc_base_addr ) {
285+ ufshcd_compl_one_cqe (hba , tag , cqe );
286+ /* After processed the cqe, mark it empty (invalid) entry */
287+ cqe -> command_desc_base_addr = 0 ;
288+ }
278289}
279290
280- unsigned long ufshcd_mcq_poll_cqe_nolock (struct ufs_hba * hba ,
281- struct ufs_hw_queue * hwq )
291+ void ufshcd_mcq_compl_all_cqes_lock (struct ufs_hba * hba ,
292+ struct ufs_hw_queue * hwq )
293+ {
294+ unsigned long flags ;
295+ u32 entries = hwq -> max_entries ;
296+
297+ spin_lock_irqsave (& hwq -> cq_lock , flags );
298+ while (entries > 0 ) {
299+ ufshcd_mcq_process_cqe (hba , hwq );
300+ ufshcd_mcq_inc_cq_head_slot (hwq );
301+ entries -- ;
302+ }
303+
304+ ufshcd_mcq_update_cq_tail_slot (hwq );
305+ hwq -> cq_head_slot = hwq -> cq_tail_slot ;
306+ spin_unlock_irqrestore (& hwq -> cq_lock , flags );
307+ }
308+
309+ static unsigned long ufshcd_mcq_poll_cqe_nolock (struct ufs_hba * hba ,
310+ struct ufs_hw_queue * hwq )
282311{
283312 unsigned long completed_reqs = 0 ;
284313
@@ -294,7 +323,6 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
294323
295324 return completed_reqs ;
296325}
297- EXPORT_SYMBOL_GPL (ufshcd_mcq_poll_cqe_nolock );
298326
299327unsigned long ufshcd_mcq_poll_cqe_lock (struct ufs_hba * hba ,
300328 struct ufs_hw_queue * hwq )
@@ -307,6 +335,7 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
307335
308336 return completed_reqs ;
309337}
338+ EXPORT_SYMBOL_GPL (ufshcd_mcq_poll_cqe_lock );
310339
311340void ufshcd_mcq_make_queues_operational (struct ufs_hba * hba )
312341{
@@ -419,6 +448,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
419448 hwq -> max_entries = hba -> nutrs ;
420449 spin_lock_init (& hwq -> sq_lock );
421450 spin_lock_init (& hwq -> cq_lock );
451+ mutex_init (& hwq -> sq_mutex );
422452 }
423453
424454 /* The very first HW queue serves device commands */
@@ -429,3 +459,222 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
429459 host -> host_tagset = 1 ;
430460 return 0 ;
431461}
462+
463+ static int ufshcd_mcq_sq_stop (struct ufs_hba * hba , struct ufs_hw_queue * hwq )
464+ {
465+ void __iomem * reg ;
466+ u32 id = hwq -> id , val ;
467+ int err ;
468+
469+ writel (SQ_STOP , mcq_opr_base (hba , OPR_SQD , id ) + REG_SQRTC );
470+ reg = mcq_opr_base (hba , OPR_SQD , id ) + REG_SQRTS ;
471+ err = read_poll_timeout (readl , val , val & SQ_STS , 20 ,
472+ MCQ_POLL_US , false, reg );
473+ if (err )
474+ dev_err (hba -> dev , "%s: failed. hwq-id=%d, err=%d\n" ,
475+ __func__ , id , err );
476+ return err ;
477+ }
478+
479+ static int ufshcd_mcq_sq_start (struct ufs_hba * hba , struct ufs_hw_queue * hwq )
480+ {
481+ void __iomem * reg ;
482+ u32 id = hwq -> id , val ;
483+ int err ;
484+
485+ writel (SQ_START , mcq_opr_base (hba , OPR_SQD , id ) + REG_SQRTC );
486+ reg = mcq_opr_base (hba , OPR_SQD , id ) + REG_SQRTS ;
487+ err = read_poll_timeout (readl , val , !(val & SQ_STS ), 20 ,
488+ MCQ_POLL_US , false, reg );
489+ if (err )
490+ dev_err (hba -> dev , "%s: failed. hwq-id=%d, err=%d\n" ,
491+ __func__ , id , err );
492+ return err ;
493+ }
494+
495+ /**
496+ * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
497+ * associated with the pending command.
498+ * @hba - per adapter instance.
499+ * @task_tag - The command's task tag.
500+ *
501+ * Returns 0 for success; error code otherwise.
502+ */
503+ int ufshcd_mcq_sq_cleanup (struct ufs_hba * hba , int task_tag )
504+ {
505+ struct ufshcd_lrb * lrbp = & hba -> lrb [task_tag ];
506+ struct scsi_cmnd * cmd = lrbp -> cmd ;
507+ struct ufs_hw_queue * hwq ;
508+ void __iomem * reg , * opr_sqd_base ;
509+ u32 nexus , id , val ;
510+ int err ;
511+
512+ if (task_tag != hba -> nutrs - UFSHCD_NUM_RESERVED ) {
513+ if (!cmd )
514+ return - EINVAL ;
515+ hwq = ufshcd_mcq_req_to_hwq (hba , scsi_cmd_to_rq (cmd ));
516+ } else {
517+ hwq = hba -> dev_cmd_queue ;
518+ }
519+
520+ id = hwq -> id ;
521+
522+ mutex_lock (& hwq -> sq_mutex );
523+
524+ /* stop the SQ fetching before working on it */
525+ err = ufshcd_mcq_sq_stop (hba , hwq );
526+ if (err )
527+ goto unlock ;
528+
529+ /* SQCTI = EXT_IID, IID, LUN, Task Tag */
530+ nexus = lrbp -> lun << 8 | task_tag ;
531+ opr_sqd_base = mcq_opr_base (hba , OPR_SQD , id );
532+ writel (nexus , opr_sqd_base + REG_SQCTI );
533+
534+ /* SQRTCy.ICU = 1 */
535+ writel (SQ_ICU , opr_sqd_base + REG_SQRTC );
536+
537+ /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
538+ reg = opr_sqd_base + REG_SQRTS ;
539+ err = read_poll_timeout (readl , val , val & SQ_CUS , 20 ,
540+ MCQ_POLL_US , false, reg );
541+ if (err )
542+ dev_err (hba -> dev , "%s: failed. hwq=%d, tag=%d err=%ld\n" ,
543+ __func__ , id , task_tag ,
544+ FIELD_GET (SQ_ICU_ERR_CODE_MASK , readl (reg )));
545+
546+ if (ufshcd_mcq_sq_start (hba , hwq ))
547+ err = - ETIMEDOUT ;
548+
549+ unlock :
550+ mutex_unlock (& hwq -> sq_mutex );
551+ return err ;
552+ }
553+
554+ /**
555+ * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
556+ * Write the sqe's Command Type to 0xF. The host controller will not
557+ * fetch any sqe with Command Type = 0xF.
558+ *
559+ * @utrd - UTP Transfer Request Descriptor to be nullified.
560+ */
561+ static void ufshcd_mcq_nullify_sqe (struct utp_transfer_req_desc * utrd )
562+ {
563+ u32 dword_0 ;
564+
565+ dword_0 = le32_to_cpu (utrd -> header .dword_0 );
566+ dword_0 &= ~UPIU_COMMAND_TYPE_MASK ;
567+ dword_0 |= FIELD_PREP (UPIU_COMMAND_TYPE_MASK , 0xF );
568+ utrd -> header .dword_0 = cpu_to_le32 (dword_0 );
569+ }
570+
571+ /**
572+ * ufshcd_mcq_sqe_search - Search for the command in the submission queue
573+ * If the command is in the submission queue and not issued to the device yet,
574+ * nullify the sqe so the host controller will skip fetching the sqe.
575+ *
576+ * @hba - per adapter instance.
577+ * @hwq - Hardware Queue to be searched.
578+ * @task_tag - The command's task tag.
579+ *
580+ * Returns true if the SQE containing the command is present in the SQ
581+ * (not fetched by the controller); returns false if the SQE is not in the SQ.
582+ */
583+ static bool ufshcd_mcq_sqe_search (struct ufs_hba * hba ,
584+ struct ufs_hw_queue * hwq , int task_tag )
585+ {
586+ struct ufshcd_lrb * lrbp = & hba -> lrb [task_tag ];
587+ struct utp_transfer_req_desc * utrd ;
588+ u32 mask = hwq -> max_entries - 1 ;
589+ __le64 cmd_desc_base_addr ;
590+ bool ret = false;
591+ u64 addr , match ;
592+ u32 sq_head_slot ;
593+
594+ mutex_lock (& hwq -> sq_mutex );
595+
596+ ufshcd_mcq_sq_stop (hba , hwq );
597+ sq_head_slot = ufshcd_mcq_get_sq_head_slot (hwq );
598+ if (sq_head_slot == hwq -> sq_tail_slot )
599+ goto out ;
600+
601+ cmd_desc_base_addr = lrbp -> utr_descriptor_ptr -> command_desc_base_addr ;
602+ addr = le64_to_cpu (cmd_desc_base_addr ) & CQE_UCD_BA ;
603+
604+ while (sq_head_slot != hwq -> sq_tail_slot ) {
605+ utrd = hwq -> sqe_base_addr +
606+ sq_head_slot * sizeof (struct utp_transfer_req_desc );
607+ match = le64_to_cpu (utrd -> command_desc_base_addr ) & CQE_UCD_BA ;
608+ if (addr == match ) {
609+ ufshcd_mcq_nullify_sqe (utrd );
610+ ret = true;
611+ goto out ;
612+ }
613+ sq_head_slot = (sq_head_slot + 1 ) & mask ;
614+ }
615+
616+ out :
617+ ufshcd_mcq_sq_start (hba , hwq );
618+ mutex_unlock (& hwq -> sq_mutex );
619+ return ret ;
620+ }
621+
622+ /**
623+ * ufshcd_mcq_abort - Abort the command in MCQ.
624+ * @cmd - The command to be aborted.
625+ *
626+ * Returns SUCCESS or FAILED error codes
627+ */
628+ int ufshcd_mcq_abort (struct scsi_cmnd * cmd )
629+ {
630+ struct Scsi_Host * host = cmd -> device -> host ;
631+ struct ufs_hba * hba = shost_priv (host );
632+ int tag = scsi_cmd_to_rq (cmd )-> tag ;
633+ struct ufshcd_lrb * lrbp = & hba -> lrb [tag ];
634+ struct ufs_hw_queue * hwq ;
635+ int err = FAILED ;
636+
637+ if (!ufshcd_cmd_inflight (lrbp -> cmd )) {
638+ dev_err (hba -> dev ,
639+ "%s: skip abort. cmd at tag %d already completed.\n" ,
640+ __func__ , tag );
641+ goto out ;
642+ }
643+
644+ /* Skip task abort in case previous aborts failed and report failure */
645+ if (lrbp -> req_abort_skip ) {
646+ dev_err (hba -> dev , "%s: skip abort. tag %d failed earlier\n" ,
647+ __func__ , tag );
648+ goto out ;
649+ }
650+
651+ hwq = ufshcd_mcq_req_to_hwq (hba , scsi_cmd_to_rq (cmd ));
652+
653+ if (ufshcd_mcq_sqe_search (hba , hwq , tag )) {
654+ /*
655+ * Failure. The command should not be "stuck" in SQ for
656+ * a long time which resulted in command being aborted.
657+ */
658+ dev_err (hba -> dev , "%s: cmd found in sq. hwq=%d, tag=%d\n" ,
659+ __func__ , hwq -> id , tag );
660+ goto out ;
661+ }
662+
663+ /*
664+ * The command is not in the submission queue, and it is not
665+ * in the completion queue either. Query the device to see if
666+ * the command is being processed in the device.
667+ */
668+ if (ufshcd_try_to_abort_task (hba , tag )) {
669+ dev_err (hba -> dev , "%s: device abort failed %d\n" , __func__ , err );
670+ lrbp -> req_abort_skip = true;
671+ goto out ;
672+ }
673+
674+ err = SUCCESS ;
675+ if (ufshcd_cmd_inflight (lrbp -> cmd ))
676+ ufshcd_release_scsi_cmd (hba , lrbp );
677+
678+ out :
679+ return err ;
680+ }
0 commit comments