Commit 4682abfa authored by Asutosh Das's avatar Asutosh Das Committed by Martin K. Petersen

scsi: ufs: core: mcq: Allocate memory for MCQ mode

To read the bqueuedepth, the device descriptor is fetched in Single
Doorbell Mode. This allocated memory may not be enough for MCQ mode because
the number of tags supported in MCQ mode may be larger than in SDB mode.
Hence, release the memory allocated in SDB mode and allocate memory for MCQ
mode operation.  Define the UFS hardware queue and Completion Queue Entry.
Co-developed-by: default avatarCan Guo <quic_cang@quicinc.com>
Signed-off-by: default avatarCan Guo <quic_cang@quicinc.com>
Signed-off-by: default avatarAsutosh Das <quic_asutoshd@quicinc.com>
Reviewed-by: default avatarManivannan Sadhasivam <mani@kernel.org>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 7224c806
...@@ -149,14 +149,69 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) ...@@ -149,14 +149,69 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
return 0; return 0;
} }
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
size_t utrdl_size, cqe_size;
int i;
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
utrdl_size = sizeof(struct utp_transfer_req_desc) *
hwq->max_entries;
hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
&hwq->sqe_dma_addr,
GFP_KERNEL);
if (!hwq->sqe_dma_addr) {
dev_err(hba->dev, "SQE allocation failed\n");
return -ENOMEM;
}
cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
&hwq->cqe_dma_addr,
GFP_KERNEL);
if (!hwq->cqe_dma_addr) {
dev_err(hba->dev, "CQE allocation failed\n");
return -ENOMEM;
}
}
return 0;
}
int ufshcd_mcq_init(struct ufs_hba *hba) int ufshcd_mcq_init(struct ufs_hba *hba)
{ {
int ret; struct ufs_hw_queue *hwq;
int ret, i;
ret = ufshcd_mcq_config_nr_queues(hba); ret = ufshcd_mcq_config_nr_queues(hba);
if (ret) if (ret)
return ret; return ret;
ret = ufshcd_vops_mcq_config_resource(hba); ret = ufshcd_vops_mcq_config_resource(hba);
if (ret)
return ret; return ret;
hba->uhq = devm_kzalloc(hba->dev,
hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
GFP_KERNEL);
if (!hba->uhq) {
dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
return -ENOMEM;
}
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
hwq->max_entries = hba->nutrs;
}
/* The very first HW queue serves device commands */
hba->dev_cmd_queue = &hba->uhq[0];
/* Give dev_cmd_queue the minimal number of entries */
hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
return 0;
} }
...@@ -63,6 +63,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, ...@@ -63,6 +63,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
int ufshcd_mcq_init(struct ufs_hba *hba); int ufshcd_mcq_init(struct ufs_hba *hba);
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
#define SD_ASCII_STD true #define SD_ASCII_STD true
#define SD_RAW false #define SD_RAW false
......
...@@ -3719,6 +3719,14 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) ...@@ -3719,6 +3719,14 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
goto out; goto out;
} }
/*
* Skip utmrdl allocation; it may have been
* allocated during first pass and not released during
* MCQ memory allocation.
* See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
*/
if (hba->utmrdl_base_addr)
goto skip_utmrdl;
/* /*
* Allocate memory for UTP Task Management descriptors * Allocate memory for UTP Task Management descriptors
* UFSHCI requires 1024 byte alignment of UTMRD * UFSHCI requires 1024 byte alignment of UTMRD
...@@ -3735,6 +3743,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) ...@@ -3735,6 +3743,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
goto out; goto out;
} }
skip_utmrdl:
/* Allocate memory for local reference block */ /* Allocate memory for local reference block */
hba->lrb = devm_kcalloc(hba->dev, hba->lrb = devm_kcalloc(hba->dev,
hba->nutrs, sizeof(struct ufshcd_lrb), hba->nutrs, sizeof(struct ufshcd_lrb),
...@@ -8295,6 +8304,22 @@ static int ufshcd_add_lus(struct ufs_hba *hba) ...@@ -8295,6 +8304,22 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
return ret; return ret;
} }
/* SDB - Single Doorbell */
static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
{
size_t ucdl_size, utrdl_size;
ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
hba->ucdl_dma_addr);
utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
hba->utrdl_dma_addr);
devm_kfree(hba->dev, hba->lrb);
}
static int ufshcd_alloc_mcq(struct ufs_hba *hba) static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{ {
int ret; int ret;
...@@ -8306,12 +8331,29 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba) ...@@ -8306,12 +8331,29 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
hba->nutrs = ret; hba->nutrs = ret;
ret = ufshcd_mcq_init(hba); ret = ufshcd_mcq_init(hba);
if (ret) { if (ret)
hba->nutrs = old_nutrs; goto err;
return ret;
/*
* Previously allocated memory for nutrs may not be enough in MCQ mode.
* Number of supported tags in MCQ mode may be larger than SDB mode.
*/
if (hba->nutrs != old_nutrs) {
ufshcd_release_sdb_queue(hba, old_nutrs);
ret = ufshcd_memory_alloc(hba);
if (ret)
goto err;
ufshcd_host_memory_configure(hba);
} }
ret = ufshcd_mcq_memory_alloc(hba);
if (ret)
goto err;
return 0; return 0;
err:
hba->nutrs = old_nutrs;
return ret;
} }
static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
......
...@@ -876,6 +876,8 @@ enum ufshcd_res { ...@@ -876,6 +876,8 @@ enum ufshcd_res {
* @mcq_sup: is mcq supported by UFSHC * @mcq_sup: is mcq supported by UFSHC
* @res: array of resource info of MCQ registers * @res: array of resource info of MCQ registers
* @mcq_base: Multi circular queue registers base address * @mcq_base: Multi circular queue registers base address
* @uhq: array of supported hardware queues
* @dev_cmd_queue: Queue for issuing device management commands
*/ */
struct ufs_hba { struct ufs_hba {
void __iomem *mmio_base; void __iomem *mmio_base;
...@@ -1034,6 +1036,24 @@ struct ufs_hba { ...@@ -1034,6 +1036,24 @@ struct ufs_hba {
bool mcq_sup; bool mcq_sup;
struct ufshcd_res_info res[RES_MAX]; struct ufshcd_res_info res[RES_MAX];
void __iomem *mcq_base; void __iomem *mcq_base;
struct ufs_hw_queue *uhq;
struct ufs_hw_queue *dev_cmd_queue;
};
/**
* struct ufs_hw_queue - per hardware queue structure
* @sqe_base_addr: submission queue entry base address
* @sqe_dma_addr: submission queue dma address
* @cqe_base_addr: completion queue base address
* @cqe_dma_addr: completion queue dma address
* @max_entries: max number of slots in this hardware queue
*/
struct ufs_hw_queue {
void *sqe_base_addr;
dma_addr_t sqe_dma_addr;
struct cq_entry *cqe_base_addr;
dma_addr_t cqe_dma_addr;
u32 max_entries;
}; };
#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
......
...@@ -492,6 +492,28 @@ struct utp_transfer_req_desc { ...@@ -492,6 +492,28 @@ struct utp_transfer_req_desc {
__le16 prd_table_offset; __le16 prd_table_offset;
}; };
/* MCQ Completion Queue Entry */
struct cq_entry {
/* DW 0-1 */
__le64 command_desc_base_addr;
/* DW 2 */
__le16 response_upiu_length;
__le16 response_upiu_offset;
/* DW 3 */
__le16 prd_table_length;
__le16 prd_table_offset;
/* DW 4 */
__le32 status;
/* DW 5-7 */
__le32 reserved[3];
};
static_assert(sizeof(struct cq_entry) == 32);
/* /*
* UTMRD structure. * UTMRD structure.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment