1212#include <linux/module.h>
1313#include <linux/platform_device.h>
1414#include "ufshcd-priv.h"
15+ #include <linux/delay.h>
16+ #include <scsi/scsi_cmnd.h>
17+ #include <linux/bitfield.h>
18+ #include <linux/iopoll.h>
1519
1620#define MAX_QUEUE_SUP GENMASK(7, 0)
1721#define UFS_MCQ_MIN_RW_QUEUES 2
2731#define MCQ_ENTRY_SIZE_IN_DWORD 8
2832#define CQE_UCD_BA GENMASK_ULL(63, 7)
2933
34+ /* Max mcq register polling time in microseconds */
35+ #define MCQ_POLL_US 500000
36+
3037static int rw_queue_count_set (const char * val , const struct kernel_param * kp )
3138{
3239 return param_set_uint_minmax (val , kp , UFS_MCQ_MIN_RW_QUEUES ,
@@ -419,6 +426,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
419426 hwq -> max_entries = hba -> nutrs ;
420427 spin_lock_init (& hwq -> sq_lock );
421428 spin_lock_init (& hwq -> cq_lock );
429+ mutex_init (& hwq -> sq_mutex );
422430 }
423431
424432 /* The very first HW queue serves device commands */
@@ -429,3 +437,162 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
429437 host -> host_tagset = 1 ;
430438 return 0 ;
431439}
440+
441+ static int ufshcd_mcq_sq_stop (struct ufs_hba * hba , struct ufs_hw_queue * hwq )
442+ {
443+ void __iomem * reg ;
444+ u32 id = hwq -> id , val ;
445+ int err ;
446+
447+ writel (SQ_STOP , mcq_opr_base (hba , OPR_SQD , id ) + REG_SQRTC );
448+ reg = mcq_opr_base (hba , OPR_SQD , id ) + REG_SQRTS ;
449+ err = read_poll_timeout (readl , val , val & SQ_STS , 20 ,
450+ MCQ_POLL_US , false, reg );
451+ if (err )
452+ dev_err (hba -> dev , "%s: failed. hwq-id=%d, err=%d\n" ,
453+ __func__ , id , err );
454+ return err ;
455+ }
456+
457+ static int ufshcd_mcq_sq_start (struct ufs_hba * hba , struct ufs_hw_queue * hwq )
458+ {
459+ void __iomem * reg ;
460+ u32 id = hwq -> id , val ;
461+ int err ;
462+
463+ writel (SQ_START , mcq_opr_base (hba , OPR_SQD , id ) + REG_SQRTC );
464+ reg = mcq_opr_base (hba , OPR_SQD , id ) + REG_SQRTS ;
465+ err = read_poll_timeout (readl , val , !(val & SQ_STS ), 20 ,
466+ MCQ_POLL_US , false, reg );
467+ if (err )
468+ dev_err (hba -> dev , "%s: failed. hwq-id=%d, err=%d\n" ,
469+ __func__ , id , err );
470+ return err ;
471+ }
472+
473+ /**
474+ * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
475+ * associated with the pending command.
476+ * @hba - per adapter instance.
477+ * @task_tag - The command's task tag.
478+ *
479+ * Returns 0 for success; error code otherwise.
480+ */
481+ int ufshcd_mcq_sq_cleanup (struct ufs_hba * hba , int task_tag )
482+ {
483+ struct ufshcd_lrb * lrbp = & hba -> lrb [task_tag ];
484+ struct scsi_cmnd * cmd = lrbp -> cmd ;
485+ struct ufs_hw_queue * hwq ;
486+ void __iomem * reg , * opr_sqd_base ;
487+ u32 nexus , id , val ;
488+ int err ;
489+
490+ if (task_tag != hba -> nutrs - UFSHCD_NUM_RESERVED ) {
491+ if (!cmd )
492+ return - EINVAL ;
493+ hwq = ufshcd_mcq_req_to_hwq (hba , scsi_cmd_to_rq (cmd ));
494+ } else {
495+ hwq = hba -> dev_cmd_queue ;
496+ }
497+
498+ id = hwq -> id ;
499+
500+ mutex_lock (& hwq -> sq_mutex );
501+
502+ /* stop the SQ fetching before working on it */
503+ err = ufshcd_mcq_sq_stop (hba , hwq );
504+ if (err )
505+ goto unlock ;
506+
507+ /* SQCTI = EXT_IID, IID, LUN, Task Tag */
508+ nexus = lrbp -> lun << 8 | task_tag ;
509+ opr_sqd_base = mcq_opr_base (hba , OPR_SQD , id );
510+ writel (nexus , opr_sqd_base + REG_SQCTI );
511+
512+ /* SQRTCy.ICU = 1 */
513+ writel (SQ_ICU , opr_sqd_base + REG_SQRTC );
514+
515+ /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
516+ reg = opr_sqd_base + REG_SQRTS ;
517+ err = read_poll_timeout (readl , val , val & SQ_CUS , 20 ,
518+ MCQ_POLL_US , false, reg );
519+ if (err )
520+ dev_err (hba -> dev , "%s: failed. hwq=%d, tag=%d err=%ld\n" ,
521+ __func__ , id , task_tag ,
522+ FIELD_GET (SQ_ICU_ERR_CODE_MASK , readl (reg )));
523+
524+ if (ufshcd_mcq_sq_start (hba , hwq ))
525+ err = - ETIMEDOUT ;
526+
527+ unlock :
528+ mutex_unlock (& hwq -> sq_mutex );
529+ return err ;
530+ }
531+
532+ /**
533+ * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
534+ * Write the sqe's Command Type to 0xF. The host controller will not
535+ * fetch any sqe with Command Type = 0xF.
536+ *
537+ * @utrd - UTP Transfer Request Descriptor to be nullified.
538+ */
539+ static void ufshcd_mcq_nullify_sqe (struct utp_transfer_req_desc * utrd )
540+ {
541+ u32 dword_0 ;
542+
543+ dword_0 = le32_to_cpu (utrd -> header .dword_0 );
544+ dword_0 &= ~UPIU_COMMAND_TYPE_MASK ;
545+ dword_0 |= FIELD_PREP (UPIU_COMMAND_TYPE_MASK , 0xF );
546+ utrd -> header .dword_0 = cpu_to_le32 (dword_0 );
547+ }
548+
549+ /**
550+ * ufshcd_mcq_sqe_search - Search for the command in the submission queue
551+ * If the command is in the submission queue and not issued to the device yet,
552+ * nullify the sqe so the host controller will skip fetching the sqe.
553+ *
554+ * @hba - per adapter instance.
555+ * @hwq - Hardware Queue to be searched.
556+ * @task_tag - The command's task tag.
557+ *
558+ * Returns true if the SQE containing the command is present in the SQ
559+ * (not fetched by the controller); returns false if the SQE is not in the SQ.
560+ */
561+ static bool ufshcd_mcq_sqe_search (struct ufs_hba * hba ,
562+ struct ufs_hw_queue * hwq , int task_tag )
563+ {
564+ struct ufshcd_lrb * lrbp = & hba -> lrb [task_tag ];
565+ struct utp_transfer_req_desc * utrd ;
566+ u32 mask = hwq -> max_entries - 1 ;
567+ __le64 cmd_desc_base_addr ;
568+ bool ret = false;
569+ u64 addr , match ;
570+ u32 sq_head_slot ;
571+
572+ mutex_lock (& hwq -> sq_mutex );
573+
574+ ufshcd_mcq_sq_stop (hba , hwq );
575+ sq_head_slot = ufshcd_mcq_get_sq_head_slot (hwq );
576+ if (sq_head_slot == hwq -> sq_tail_slot )
577+ goto out ;
578+
579+ cmd_desc_base_addr = lrbp -> utr_descriptor_ptr -> command_desc_base_addr ;
580+ addr = le64_to_cpu (cmd_desc_base_addr ) & CQE_UCD_BA ;
581+
582+ while (sq_head_slot != hwq -> sq_tail_slot ) {
583+ utrd = hwq -> sqe_base_addr +
584+ sq_head_slot * sizeof (struct utp_transfer_req_desc );
585+ match = le64_to_cpu (utrd -> command_desc_base_addr ) & CQE_UCD_BA ;
586+ if (addr == match ) {
587+ ufshcd_mcq_nullify_sqe (utrd );
588+ ret = true;
589+ goto out ;
590+ }
591+ sq_head_slot = (sq_head_slot + 1 ) & mask ;
592+ }
593+
594+ out :
595+ ufshcd_mcq_sq_start (hba , hwq );
596+ mutex_unlock (& hwq -> sq_mutex );
597+ return ret ;
598+ }
0 commit comments