Commit 4a5bd1a9 authored by Martin K. Petersen's avatar Martin K. Petersen

Merge patch series "Prepare for upstreaming Pixel 6 and 7 UFS support"

Bart Van Assche <bvanassche@acm.org> says:

The patches in this series are a first step towards integrating
support in the upstream kernel for the UFS controller in the Pixel 6
and 7.

[mkp: resolve conflict with RPMB series]

Link: https://lore.kernel.org/r/20221208234358.252031-1-bvanassche@acm.orgSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 9a3a5a85 ada1e653
...@@ -531,7 +531,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) ...@@ -531,7 +531,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
prdt_length = le16_to_cpu( prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length); lrbp->utr_descriptor_ptr->prd_table_length);
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
prdt_length /= sizeof(struct ufshcd_sg_entry); prdt_length /= ufshcd_sg_entry_size(hba);
dev_err(hba->dev, dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n", "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
...@@ -540,7 +540,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) ...@@ -540,7 +540,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
if (pr_prdt) if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
sizeof(struct ufshcd_sg_entry) * prdt_length); ufshcd_sg_entry_size(hba) * prdt_length);
} }
} }
...@@ -1124,6 +1124,12 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba) ...@@ -1124,6 +1124,12 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
return pending; return pending;
} }
/*
* Wait until all pending SCSI commands and TMFs have finished or the timeout
* has expired.
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us) u64 wait_timeout_us)
{ {
...@@ -1157,7 +1163,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ...@@ -1157,7 +1163,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
} }
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
schedule(); io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) > if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) { wait_timeout_us) {
timeout = true; timeout = true;
...@@ -1228,9 +1234,14 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) ...@@ -1228,9 +1234,14 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
return ret; return ret;
} }
static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) /*
* Wait until all pending SCSI commands and TMFs have finished or the timeout
* has expired.
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
{ {
#define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
int ret = 0; int ret = 0;
/* /*
* make sure that there are no outstanding requests when * make sure that there are no outstanding requests when
...@@ -1240,7 +1251,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) ...@@ -1240,7 +1251,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
down_write(&hba->clk_scaling_lock); down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed || if (!hba->clk_scaling.is_allowed ||
ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
ret = -EBUSY; ret = -EBUSY;
up_write(&hba->clk_scaling_lock); up_write(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba); ufshcd_scsi_unblock_requests(hba);
...@@ -1278,7 +1289,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) ...@@ -1278,7 +1289,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
int ret = 0; int ret = 0;
bool is_writelock = true; bool is_writelock = true;
ret = ufshcd_clock_scaling_prepare(hba); ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret) if (ret)
return ret; return ret;
...@@ -2411,7 +2422,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ...@@ -2411,7 +2422,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries, static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
struct scatterlist *sg_list) struct scatterlist *sg_list)
{ {
struct ufshcd_sg_entry *prd_table; struct ufshcd_sg_entry *prd;
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
...@@ -2419,11 +2430,11 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int ...@@ -2419,11 +2430,11 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
lrbp->utr_descriptor_ptr->prd_table_length = lrbp->utr_descriptor_ptr->prd_table_length =
cpu_to_le16((sg_entries * sizeof(struct ufshcd_sg_entry))); cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
else else
lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries); lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
prd_table = lrbp->ucd_prdt_ptr; prd = lrbp->ucd_prdt_ptr;
for_each_sg(sg_list, sg, sg_entries, i) { for_each_sg(sg_list, sg, sg_entries, i) {
const unsigned int len = sg_dma_len(sg); const unsigned int len = sg_dma_len(sg);
...@@ -2437,9 +2448,10 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int ...@@ -2437,9 +2448,10 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
* indicates 4 bytes, '7' indicates 8 bytes, etc." * indicates 4 bytes, '7' indicates 8 bytes, etc."
*/ */
WARN_ONCE(len > 256 * 1024, "len = %#x\n", len); WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
prd_table[i].size = cpu_to_le32(len - 1); prd->size = cpu_to_le32(len - 1);
prd_table[i].addr = cpu_to_le64(sg->dma_address); prd->addr = cpu_to_le64(sg->dma_address);
prd_table[i].reserved = 0; prd->reserved = 0;
prd = (void *)prd + ufshcd_sg_entry_size(hba);
} }
} else { } else {
lrbp->utr_descriptor_ptr->prd_table_length = 0; lrbp->utr_descriptor_ptr->prd_table_length = 0;
...@@ -2746,10 +2758,11 @@ static void ufshcd_map_queues(struct Scsi_Host *shost) ...@@ -2746,10 +2758,11 @@ static void ufshcd_map_queues(struct Scsi_Host *shost)
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{ {
struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr; struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
i * sizeof_utp_transfer_cmd_desc(hba);
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
i * sizeof(struct utp_transfer_cmd_desc); i * sizeof_utp_transfer_cmd_desc(hba);
u16 response_offset = offsetof(struct utp_transfer_cmd_desc, u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
response_upiu); response_upiu);
u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
...@@ -2757,11 +2770,11 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) ...@@ -2757,11 +2770,11 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
lrb->utr_descriptor_ptr = utrdlp + i; lrb->utr_descriptor_ptr = utrdlp + i;
lrb->utrd_dma_addr = hba->utrdl_dma_addr + lrb->utrd_dma_addr = hba->utrdl_dma_addr +
i * sizeof(struct utp_transfer_req_desc); i * sizeof(struct utp_transfer_req_desc);
lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i); lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
lrb->ucd_req_dma_addr = cmd_desc_element_addr; lrb->ucd_req_dma_addr = cmd_desc_element_addr;
lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
lrb->ucd_prdt_ptr = cmd_descp[i].prd_table; lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
} }
...@@ -3676,7 +3689,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) ...@@ -3676,7 +3689,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
size_t utmrdl_size, utrdl_size, ucdl_size; size_t utmrdl_size, utrdl_size, ucdl_size;
/* Allocate memory for UTP command descriptors */ /* Allocate memory for UTP command descriptors */
ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
ucdl_size, ucdl_size,
&hba->ucdl_dma_addr, &hba->ucdl_dma_addr,
...@@ -3770,7 +3783,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) ...@@ -3770,7 +3783,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
prdt_offset = prdt_offset =
offsetof(struct utp_transfer_cmd_desc, prd_table); offsetof(struct utp_transfer_cmd_desc, prd_table);
cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
cmd_desc_dma_addr = hba->ucdl_dma_addr; cmd_desc_dma_addr = hba->ucdl_dma_addr;
for (i = 0; i < hba->nutrs; i++) { for (i = 0; i < hba->nutrs; i++) {
...@@ -9766,6 +9779,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) ...@@ -9766,6 +9779,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->dev = dev; hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
hba->nop_out_timeout = NOP_OUT_TIMEOUT; hba->nop_out_timeout = NOP_OUT_TIMEOUT;
ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
INIT_LIST_HEAD(&hba->clk_list_head); INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock); spin_lock_init(&hba->outstanding_lock);
...@@ -10145,11 +10159,6 @@ static int __init ufshcd_core_init(void) ...@@ -10145,11 +10159,6 @@ static int __init ufshcd_core_init(void)
{ {
int ret; int ret;
/* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
static_assert(sizeof(struct utp_transfer_cmd_desc) ==
2 * ALIGNED_UPIU_SIZE +
SG_ALL * sizeof(struct ufshcd_sg_entry));
ufs_debugfs_init(); ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
......
...@@ -124,3 +124,7 @@ config SCSI_UFS_EXYNOS ...@@ -124,3 +124,7 @@ config SCSI_UFS_EXYNOS
Select this if you have UFS host controller on Samsung Exynos SoC. Select this if you have UFS host controller on Samsung Exynos SoC.
If unsure, say N. If unsure, say N.
config SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
bool
default y if SCSI_UFS_EXYNOS && SCSI_UFS_CRYPTO
...@@ -755,6 +755,7 @@ struct ufs_hba_monitor { ...@@ -755,6 +755,7 @@ struct ufs_hba_monitor {
* @vops: pointer to variant specific operations * @vops: pointer to variant specific operations
* @vps: pointer to variant specific parameters * @vps: pointer to variant specific parameters
* @priv: pointer to variant specific private data * @priv: pointer to variant specific private data
* @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
* @irq: Irq number of the controller * @irq: Irq number of the controller
* @is_irq_enabled: whether or not the UFS controller interrupt is enabled. * @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
* @dev_ref_clk_freq: reference clock frequency * @dev_ref_clk_freq: reference clock frequency
...@@ -878,6 +879,9 @@ struct ufs_hba { ...@@ -878,6 +879,9 @@ struct ufs_hba {
const struct ufs_hba_variant_ops *vops; const struct ufs_hba_variant_ops *vops;
struct ufs_hba_variant_params *vps; struct ufs_hba_variant_params *vps;
void *priv; void *priv;
#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
size_t sg_entry_size;
#endif
unsigned int irq; unsigned int irq;
bool is_irq_enabled; bool is_irq_enabled;
enum ufs_ref_clk_freq dev_ref_clk_freq; enum ufs_ref_clk_freq dev_ref_clk_freq;
...@@ -981,6 +985,32 @@ struct ufs_hba { ...@@ -981,6 +985,32 @@ struct ufs_hba {
bool complete_put; bool complete_put;
}; };
#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
{
return hba->sg_entry_size;
}
static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size)
{
WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry));
hba->sg_entry_size = sg_entry_size;
}
#else
static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
{
return sizeof(struct ufshcd_sg_entry);
}
#define ufshcd_set_sg_entry_size(hba, sg_entry_size) \
({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
#endif
static inline size_t sizeof_utp_transfer_cmd_desc(const struct ufs_hba *hba)
{
return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
}
/* Returns true if clocks can be gated. Otherwise false */ /* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
{ {
......
...@@ -423,18 +423,23 @@ struct ufshcd_sg_entry { ...@@ -423,18 +423,23 @@ struct ufshcd_sg_entry {
__le64 addr; __le64 addr;
__le32 reserved; __le32 reserved;
__le32 size; __le32 size;
/*
* followed by variant-specific fields if
* CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE has been defined.
*/
}; };
/** /**
* struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD) * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
* @command_upiu: Command UPIU Frame address * @command_upiu: Command UPIU Frame address
* @response_upiu: Response UPIU Frame address * @response_upiu: Response UPIU Frame address
* @prd_table: Physical Region Descriptor * @prd_table: Physical Region Descriptor: an array of SG_ALL struct
* ufshcd_sg_entry's. Variant-specific fields may be present after each.
*/ */
struct utp_transfer_cmd_desc { struct utp_transfer_cmd_desc {
u8 command_upiu[ALIGNED_UPIU_SIZE]; u8 command_upiu[ALIGNED_UPIU_SIZE];
u8 response_upiu[ALIGNED_UPIU_SIZE]; u8 response_upiu[ALIGNED_UPIU_SIZE];
struct ufshcd_sg_entry prd_table[SG_ALL]; u8 prd_table[];
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment