Commit 7224c806 authored by Asutosh Das's avatar Asutosh Das Committed by Martin K. Petersen

scsi: ufs: core: mcq: Calculate queue depth

The UFS device defines the supported queuedepth by bqueuedepth which has a
max value of 256.  The HC defines MAC (Max Active Commands) that defines
the max number of commands that in flight to the UFS device.  Calculate and
configure the nutrs based on both these values.
Co-developed-by: default avatarCan Guo <quic_cang@quicinc.com>
Signed-off-by: default avatarCan Guo <quic_cang@quicinc.com>
Signed-off-by: default avatarAsutosh Das <quic_asutoshd@quicinc.com>
Reviewed-by: default avatarManivannan Sadhasivam <mani@kernel.org>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarStanley Chu <stanley.chu@mediatek.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent c263b4ef
...@@ -19,6 +19,9 @@ ...@@ -19,6 +19,9 @@
#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1 #define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0 #define UFS_MCQ_MIN_POLL_QUEUES 0
#define MAX_DEV_CMD_ENTRIES 2
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
static int rw_queue_count_set(const char *val, const struct kernel_param *kp) static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{ {
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES, return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
...@@ -67,6 +70,38 @@ module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644); ...@@ -67,6 +70,38 @@ module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues, MODULE_PARM_DESC(poll_queues,
"Number of poll queues used for r/w. Default value is 1"); "Number of poll queues used for r/w. Default value is 1");
/**
* ufshcd_mcq_decide_queue_depth - decide the queue depth
* @hba - per adapter instance
*
* Returns queue-depth on success, non-zero on error
*
* MAC - Max. Active Command of the Host Controller (HC)
* HC wouldn't send more than this commands to the device.
* It is mandatory to implement get_hba_mac() to enable MCQ mode.
* Calculates and adjusts the queue depth based on the depth
* supported by the HC and ufs device.
*/
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
{
int mac;
/* Mandatory to implement get_hba_mac() */
mac = ufshcd_mcq_vops_get_hba_mac(hba);
if (mac < 0) {
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac;
}
WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
/*
* max. value of bqueuedepth = 256, mac is host dependent.
* It is mandatory for UFS device to define bQueueDepth if
* shared queuing architecture is enabled.
*/
return min_t(int, mac, hba->dev_info.bqueuedepth);
}
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
{ {
int i; int i;
......
...@@ -62,6 +62,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, ...@@ -62,6 +62,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res); enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
int ufshcd_mcq_init(struct ufs_hba *hba); int ufshcd_mcq_init(struct ufs_hba *hba);
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
#define SD_ASCII_STD true #define SD_ASCII_STD true
#define SD_RAW false #define SD_RAW false
...@@ -238,6 +239,14 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba) ...@@ -238,6 +239,14 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->get_hba_mac)
return hba->vops->get_hba_mac(hba);
return -EOPNOTSUPP;
}
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/** /**
......
...@@ -7887,6 +7887,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba) ...@@ -7887,6 +7887,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */ /* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT]; b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
...@@ -8296,7 +8297,21 @@ static int ufshcd_add_lus(struct ufs_hba *hba) ...@@ -8296,7 +8297,21 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
static int ufshcd_alloc_mcq(struct ufs_hba *hba) static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{ {
return ufshcd_mcq_init(hba); int ret;
int old_nutrs = hba->nutrs;
ret = ufshcd_mcq_decide_queue_depth(hba);
if (ret < 0)
return ret;
hba->nutrs = ret;
ret = ufshcd_mcq_init(hba);
if (ret) {
hba->nutrs = old_nutrs;
return ret;
}
return 0;
} }
static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
......
...@@ -1496,6 +1496,12 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba) ...@@ -1496,6 +1496,12 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
return ret; return ret;
} }
static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
{
/* Qualcomm HC supports up to 64 */
return MAX_SUPP_MAC;
}
/* /*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
* *
...@@ -1521,6 +1527,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { ...@@ -1521,6 +1527,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.program_key = ufs_qcom_ice_program_key, .program_key = ufs_qcom_ice_program_key,
.reinit_notify = ufs_qcom_reinit_notify, .reinit_notify = ufs_qcom_reinit_notify,
.mcq_config_resource = ufs_qcom_mcq_config_resource, .mcq_config_resource = ufs_qcom_mcq_config_resource,
.get_hba_mac = ufs_qcom_get_hba_mac,
}; };
/** /**
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#define HBRN8_POLL_TOUT_MS 100 #define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000 #define DEFAULT_CLK_RATE_HZ 1000000
#define BUS_VECTOR_NAME_LEN 32 #define BUS_VECTOR_NAME_LEN 32
#define MAX_SUPP_MAC 64
#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28) #define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16) #define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
......
...@@ -617,6 +617,8 @@ struct ufs_dev_info { ...@@ -617,6 +617,8 @@ struct ufs_dev_info {
u8 *model; u8 *model;
u16 wspecversion; u16 wspecversion;
u32 clk_gating_wait_us; u32 clk_gating_wait_us;
/* Stores the depth of queue in UFS device */
u8 bqueuedepth;
/* UFS HPB related flag */ /* UFS HPB related flag */
bool hpb_enabled; bool hpb_enabled;
......
...@@ -301,6 +301,7 @@ struct ufs_pwr_mode_info { ...@@ -301,6 +301,7 @@ struct ufs_pwr_mode_info {
* @event_notify: called to notify important events * @event_notify: called to notify important events
* @reinit_notify: called to notify reinit of UFSHCD during max gear switch * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
* @mcq_config_resource: called to configure MCQ platform resources * @mcq_config_resource: called to configure MCQ platform resources
* @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
*/ */
struct ufs_hba_variant_ops { struct ufs_hba_variant_ops {
const char *name; const char *name;
...@@ -341,6 +342,7 @@ struct ufs_hba_variant_ops { ...@@ -341,6 +342,7 @@ struct ufs_hba_variant_ops {
enum ufs_event_type evt, void *data); enum ufs_event_type evt, void *data);
void (*reinit_notify)(struct ufs_hba *); void (*reinit_notify)(struct ufs_hba *);
int (*mcq_config_resource)(struct ufs_hba *hba); int (*mcq_config_resource)(struct ufs_hba *hba);
int (*get_hba_mac)(struct ufs_hba *hba);
}; };
/* clock gating state */ /* clock gating state */
......
...@@ -57,6 +57,7 @@ enum { ...@@ -57,6 +57,7 @@ enum {
REG_UFS_CCAP = 0x100, REG_UFS_CCAP = 0x100,
REG_UFS_CRYPTOCAP = 0x104, REG_UFS_CRYPTOCAP = 0x104,
REG_UFS_MCQ_CFG = 0x380,
UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400, UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment