Commit af568c7e authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: ufs: mcq: Make .get_hba_mac() optional

UFSHCI controllers that are compliant with the UFSHCI 4.0 standard report
the maximum number of supported commands in the controller capabilities
register. Use that value if .get_hba_mac == NULL.
Reviewed-by: default avatarPeter Wang <peter.wang@mediatek.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20240708211716.2827751-11-bvanassche@acm.orgReviewed-by: default avatarManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 5e2053a4
...@@ -138,18 +138,26 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr); ...@@ -138,18 +138,26 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
* *
* MAC - Max. Active Command of the Host Controller (HC) * MAC - Max. Active Command of the Host Controller (HC)
* HC wouldn't send more than this commands to the device. * HC wouldn't send more than this commands to the device.
* It is mandatory to implement get_hba_mac() to enable MCQ mode.
* Calculates and adjusts the queue depth based on the depth * Calculates and adjusts the queue depth based on the depth
* supported by the HC and ufs device. * supported by the HC and ufs device.
*/ */
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
{ {
int mac = -EOPNOTSUPP; int mac;
if (!hba->vops || !hba->vops->get_hba_mac) if (!hba->vops || !hba->vops->get_hba_mac) {
goto err; /*
* Extract the maximum number of active transfer tasks value
mac = hba->vops->get_hba_mac(hba); * from the host controller capabilities register. This value is
* 0-based.
*/
hba->capabilities =
ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
mac++;
} else {
mac = hba->vops->get_hba_mac(hba);
}
if (mac < 0) if (mac < 0)
goto err; goto err;
...@@ -424,6 +432,12 @@ void ufshcd_mcq_enable(struct ufs_hba *hba) ...@@ -424,6 +432,12 @@ void ufshcd_mcq_enable(struct ufs_hba *hba)
} }
EXPORT_SYMBOL_GPL(ufshcd_mcq_enable); EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
void ufshcd_mcq_disable(struct ufs_hba *hba)
{
ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG);
hba->mcq_enabled = false;
}
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
{ {
ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
......
...@@ -64,6 +64,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); ...@@ -64,6 +64,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe); struct cq_entry *cqe);
int ufshcd_mcq_init(struct ufs_hba *hba); int ufshcd_mcq_init(struct ufs_hba *hba);
void ufshcd_mcq_disable(struct ufs_hba *hba);
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba); int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
......
...@@ -8753,12 +8753,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ...@@ -8753,12 +8753,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
if (ret) if (ret)
return ret; return ret;
if (is_mcq_supported(hba) && !hba->scsi_host_added) { if (is_mcq_supported(hba) && !hba->scsi_host_added) {
ufshcd_mcq_enable(hba);
ret = ufshcd_alloc_mcq(hba); ret = ufshcd_alloc_mcq(hba);
if (!ret) { if (!ret) {
ufshcd_config_mcq(hba); ufshcd_config_mcq(hba);
ufshcd_mcq_enable(hba);
} else { } else {
/* Continue with SDB mode */ /* Continue with SDB mode */
ufshcd_mcq_disable(hba);
use_mcq_mode = false; use_mcq_mode = false;
dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
ret); ret);
......
...@@ -325,7 +325,9 @@ struct ufs_pwr_mode_info { ...@@ -325,7 +325,9 @@ struct ufs_pwr_mode_info {
* @event_notify: called to notify important events * @event_notify: called to notify important events
* @reinit_notify: called to notify reinit of UFSHCD during max gear switch * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
* @mcq_config_resource: called to configure MCQ platform resources * @mcq_config_resource: called to configure MCQ platform resources
* @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode * @get_hba_mac: reports maximum number of outstanding commands supported by
* the controller. Should be implemented for UFSHCI 4.0 or later
* controllers that are not compliant with the UFSHCI 4.0 specification.
* @op_runtime_config: called to config Operation and runtime regs Pointers * @op_runtime_config: called to config Operation and runtime regs Pointers
* @get_outstanding_cqs: called to get outstanding completion queues * @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt * @config_esi: called to config Event Specific Interrupt
......
...@@ -68,6 +68,7 @@ enum { ...@@ -68,6 +68,7 @@ enum {
/* Controller capability masks */ /* Controller capability masks */
enum { enum {
MASK_TRANSFER_REQUESTS_SLOTS_SDB = 0x0000001F, MASK_TRANSFER_REQUESTS_SLOTS_SDB = 0x0000001F,
MASK_TRANSFER_REQUESTS_SLOTS_MCQ = 0x000000FF,
MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00, MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00,
MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000, MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
MASK_EHSLUTRD_SUPPORTED = 0x00400000, MASK_EHSLUTRD_SUPPORTED = 0x00400000,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment