Commit ed975065 authored by Asutosh Das's avatar Asutosh Das Committed by Martin K. Petersen

scsi: ufs: core: mcq: Add completion support in poll

Complete CQE requests in poll. Assumption is that several poll completion
may happen in different CPUs for the same completion queue. Hence a spin
lock protection is added.
Co-developed-by: default avatarCan Guo <quic_cang@quicinc.com>
Signed-off-by: default avatarCan Guo <quic_cang@quicinc.com>
Signed-off-by: default avatarAsutosh Das <quic_asutoshd@quicinc.com>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarManivannan Sadhasivam <mani@kernel.org>
Reviewed-by: default avatarStanley Chu <stanley.chu@mediatek.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent f87b2c41
......@@ -294,6 +294,18 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
return completed_reqs;
}
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
unsigned long completed_reqs;
spin_lock(&hwq->cq_lock);
completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
spin_unlock(&hwq->cq_lock);
return completed_reqs;
}
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
......@@ -390,6 +402,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
hwq = &hba->uhq[i];
hwq->max_entries = hba->nutrs;
spin_lock_init(&hwq->sq_lock);
spin_lock_init(&hwq->cq_lock);
}
/* The very first HW queue serves device commands */
......
......@@ -75,6 +75,8 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
......
......@@ -5461,6 +5461,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
u32 tr_doorbell;
struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) {
hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
......
......@@ -1086,6 +1086,7 @@ struct ufs_hba {
* @sq_lock: serialize submission queue access
* @cq_tail_slot: current slot to which CQ tail pointer is pointing
* @cq_head_slot: current slot to which CQ head pointer is pointing
* @cq_lock: Synchronize between multiple polling instances
*/
struct ufs_hw_queue {
void __iomem *mcq_sq_head;
......@@ -1103,6 +1104,7 @@ struct ufs_hw_queue {
spinlock_t sq_lock;
u32 cq_tail_slot;
u32 cq_head_slot;
spinlock_t cq_lock;
};
static inline bool is_mcq_enabled(struct ufs_hba *hba)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment