Commit 57b1c0ef authored by Asutosh Das's avatar Asutosh Das Committed by Martin K. Petersen

scsi: ufs: core: mcq: Add support to allocate multiple queues

Multi-circular queue (MCQ) has been added in UFSHC v4.0 standard in
addition to the Single Doorbell mode.  The MCQ mode supports multiple
submission and completion queues.  Add support to allocate and configure
the queues.  Add module parameters support to configure the queues.
Co-developed-by: default avatarCan Guo <quic_cang@quicinc.com>
Signed-off-by: default avatarCan Guo <quic_cang@quicinc.com>
Signed-off-by: default avatarAsutosh Das <quic_asutoshd@quicinc.com>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarManivannan Sadhasivam <mani@kernel.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 0cab4023
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-y += ufshcd.o ufs-sysfs.o ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
*
* Authors:
* Asutosh Das <quic_asutoshd@quicinc.com>
* Can Guo <quic_cang@quicinc.com>
*/
#include <asm/unaligned.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "ufshcd-priv.h"
#define MAX_QUEUE_SUP GENMASK(7, 0)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0
static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
num_possible_cpus());
}
static const struct kernel_param_ops rw_queue_count_ops = {
.set = rw_queue_count_set,
.get = param_get_uint,
};
static unsigned int rw_queues;
module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
MODULE_PARM_DESC(rw_queues,
"Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
static int read_queue_count_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
num_possible_cpus());
}
static const struct kernel_param_ops read_queue_count_ops = {
.set = read_queue_count_set,
.get = param_get_uint,
};
static unsigned int read_queues;
module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
MODULE_PARM_DESC(read_queues,
"Number of interrupt driven read queues used for read. Default value is 0");
static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
num_possible_cpus());
}
static const struct kernel_param_ops poll_queue_count_ops = {
.set = poll_queue_count_set,
.get = param_get_uint,
};
static unsigned int poll_queues = 1;
module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues,
"Number of poll queues used for r/w. Default value is 1");
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
{
int i;
u32 hba_maxq, rem, tot_queues;
struct Scsi_Host *host = hba->host;
hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
rw_queues;
if (hba_maxq < tot_queues) {
dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
tot_queues, hba_maxq);
return -EOPNOTSUPP;
}
rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
if (rw_queues) {
hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
} else {
rw_queues = num_possible_cpus();
}
if (poll_queues) {
hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
rem -= hba->nr_queues[HCTX_TYPE_POLL];
}
if (read_queues) {
hba->nr_queues[HCTX_TYPE_READ] = read_queues;
rem -= hba->nr_queues[HCTX_TYPE_READ];
}
if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
num_possible_cpus());
for (i = 0; i < HCTX_MAX_TYPES; i++)
host->nr_hw_queues += hba->nr_queues[i];
hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
return 0;
}
int ufshcd_mcq_init(struct ufs_hba *hba)
{
int ret;
ret = ufshcd_mcq_config_nr_queues(hba);
return ret;
}
...@@ -61,6 +61,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, ...@@ -61,6 +61,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res); enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
int ufshcd_mcq_init(struct ufs_hba *hba);
#define SD_ASCII_STD true #define SD_ASCII_STD true
#define SD_RAW false #define SD_RAW false
......
...@@ -8294,6 +8294,11 @@ static int ufshcd_add_lus(struct ufs_hba *hba) ...@@ -8294,6 +8294,11 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
return ret; return ret;
} }
static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{
return ufshcd_mcq_init(hba);
}
static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
{ {
int ret; int ret;
...@@ -8333,6 +8338,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ...@@ -8333,6 +8338,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
if (ret) if (ret)
return ret; return ret;
if (is_mcq_supported(hba) && !hba->scsi_host_added) { if (is_mcq_supported(hba) && !hba->scsi_host_added) {
ret = ufshcd_alloc_mcq(hba);
if (ret) {
/* Continue with SDB mode */
use_mcq_mode = false;
dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
ret);
}
ret = scsi_add_host(host, hba->dev); ret = scsi_add_host(host, hba->dev);
if (ret) { if (ret) {
dev_err(hba->dev, "scsi_add_host failed\n"); dev_err(hba->dev, "scsi_add_host failed\n");
......
...@@ -840,6 +840,8 @@ struct ufs_hba_monitor { ...@@ -840,6 +840,8 @@ struct ufs_hba_monitor {
* ee_ctrl_mask * ee_ctrl_mask
* @luns_avail: number of regular and well known LUNs supported by the UFS * @luns_avail: number of regular and well known LUNs supported by the UFS
* device * device
* @nr_hw_queues: number of hardware queues configured
* @nr_queues: number of Queues of different queue types
* @complete_put: whether or not to call ufshcd_rpm_put() from inside * @complete_put: whether or not to call ufshcd_rpm_put() from inside
* ufshcd_resume_complete() * ufshcd_resume_complete()
* @ext_iid_sup: is EXT_IID is supported by UFSHC * @ext_iid_sup: is EXT_IID is supported by UFSHC
...@@ -994,6 +996,8 @@ struct ufs_hba { ...@@ -994,6 +996,8 @@ struct ufs_hba {
u32 debugfs_ee_rate_limit_ms; u32 debugfs_ee_rate_limit_ms;
#endif #endif
u32 luns_avail; u32 luns_avail;
unsigned int nr_hw_queues;
unsigned int nr_queues[HCTX_MAX_TYPES];
bool complete_put; bool complete_put;
bool ext_iid_sup; bool ext_iid_sup;
bool scsi_host_added; bool scsi_host_added;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment