Commit 69a6c269 authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: ufs: Use blk_{get,put}_request() to allocate and free TMFs

Manage TMF tags with blk_{get,put}_request() instead of
ufshcd_get_tm_free_slot() / ufshcd_put_tm_slot(). Store a per-request
completion pointer in request.end_io_data instead of using a waitqueue to
report TMF completion.

Cc: Can Guo <cang@codeaurora.org>
Cc: Stanley Chu <stanley.chu@mediatek.com>
Cc: Avri Altman <avri.altman@wdc.com>
Cc: Tomas Winkler <tomas.winkler@intel.com>
Link: https://lore.kernel.org/r/20191209181309.196233-3-bvanassche@acm.orgTested-by: default avatarBean Huo <beanhuo@micron.com>
Reviewed-by: default avatarAvri Altman <avri.altman@wdc.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 7252a360
...@@ -645,40 +645,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) ...@@ -645,40 +645,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
} }
/**
* ufshcd_get_tm_free_slot - get a free slot for task management request
* @hba: per adapter instance
* @free_slot: pointer to variable with available slot value
*
* Get a free tag and lock it until ufshcd_put_tm_slot() is called.
* Returns 0 if free slot is not available, else return 1 with tag value
* in @free_slot.
*/
static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
{
int tag;
bool ret = false;
if (!free_slot)
goto out;
do {
tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
if (tag >= hba->nutmrs)
goto out;
} while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
*free_slot = tag;
ret = true;
out:
return ret;
}
static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
{
clear_bit_unlock(slot, &hba->tm_slots_in_use);
}
/** /**
* ufshcd_utrl_clear - Clear a bit in UTRLCLR register * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
* @hba: per adapter instance * @hba: per adapter instance
...@@ -5570,6 +5536,27 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) ...@@ -5570,6 +5536,27 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
return retval; return retval;
} }
struct ctm_info {
struct ufs_hba *hba;
unsigned long pending;
unsigned int ncpl;
};
static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
{
struct ctm_info *const ci = priv;
struct completion *c;
WARN_ON_ONCE(reserved);
if (test_bit(req->tag, &ci->pending))
return true;
ci->ncpl++;
c = req->end_io_data;
if (c)
complete(c);
return true;
}
/** /**
* ufshcd_tmc_handler - handle task management function completion * ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance * @hba: per adapter instance
...@@ -5580,16 +5567,14 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) ...@@ -5580,16 +5567,14 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
*/ */
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{ {
u32 tm_doorbell; struct request_queue *q = hba->tmf_queue;
struct ctm_info ci = {
.hba = hba,
.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
};
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
if (hba->tm_condition) {
wake_up(&hba->tm_wq);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
} }
/** /**
...@@ -5695,7 +5680,10 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) ...@@ -5695,7 +5680,10 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
struct utp_task_req_desc *treq, u8 tm_function) struct utp_task_req_desc *treq, u8 tm_function)
{ {
struct request_queue *q = hba->tmf_queue;
struct Scsi_Host *host = hba->host; struct Scsi_Host *host = hba->host;
DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
unsigned long flags; unsigned long flags;
int free_slot, task_tag, err; int free_slot, task_tag, err;
...@@ -5704,7 +5692,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ...@@ -5704,7 +5692,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely, * Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT. * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/ */
wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
req->end_io_data = &wait;
free_slot = req->tag;
WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
ufshcd_hold(hba, false); ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
...@@ -5730,10 +5721,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ...@@ -5730,10 +5721,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
/* wait until the task management command is completed */ /* wait until the task management command is completed */
err = wait_event_timeout(hba->tm_wq, err = wait_for_completion_io_timeout(&wait,
test_bit(free_slot, &hba->tm_condition),
msecs_to_jiffies(TM_CMD_TIMEOUT)); msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) { if (!err) {
/*
* Make sure that ufshcd_compl_tm() does not trigger a
* use-after-free.
*/
req->end_io_data = NULL;
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function); __func__, tm_function);
...@@ -5752,9 +5747,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ...@@ -5752,9 +5747,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
__clear_bit(free_slot, &hba->outstanding_tasks); __clear_bit(free_slot, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
clear_bit(free_slot, &hba->tm_condition); blk_put_request(req);
ufshcd_put_tm_slot(hba, free_slot);
wake_up(&hba->tm_tag_wq);
ufshcd_release(hba); ufshcd_release(hba);
return err; return err;
...@@ -8219,6 +8212,8 @@ void ufshcd_remove(struct ufs_hba *hba) ...@@ -8219,6 +8212,8 @@ void ufshcd_remove(struct ufs_hba *hba)
{ {
ufs_bsg_remove(hba); ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev); ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
blk_cleanup_queue(hba->cmd_queue); blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host); scsi_remove_host(hba->host);
/* disable interrupts */ /* disable interrupts */
...@@ -8298,6 +8293,18 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) ...@@ -8298,6 +8293,18 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
} }
EXPORT_SYMBOL(ufshcd_alloc_host); EXPORT_SYMBOL(ufshcd_alloc_host);
/* This function exists because blk_mq_alloc_tag_set() requires this. */
static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
WARN_ON_ONCE(true);
return BLK_STS_NOTSUPP;
}
static const struct blk_mq_ops ufshcd_tmf_ops = {
.queue_rq = ufshcd_queue_tmf,
};
/** /**
* ufshcd_init - Driver initialization routine * ufshcd_init - Driver initialization routine
* @hba: per-adapter instance * @hba: per-adapter instance
...@@ -8367,10 +8374,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8367,10 +8374,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false; hba->max_pwr_info.is_valid = false;
/* Initailize wait queue for task management */
init_waitqueue_head(&hba->tm_wq);
init_waitqueue_head(&hba->tm_tag_wq);
/* Initialize work queues */ /* Initialize work queues */
INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
...@@ -8422,6 +8425,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8422,6 +8425,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_remove_scsi_host; goto out_remove_scsi_host;
} }
hba->tmf_tag_set = (struct blk_mq_tag_set) {
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
.ops = &ufshcd_tmf_ops,
.flags = BLK_MQ_F_NO_SCHED,
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
goto free_cmd_queue;
hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
if (IS_ERR(hba->tmf_queue)) {
err = PTR_ERR(hba->tmf_queue);
goto free_tmf_tag_set;
}
/* Reset the attached device */ /* Reset the attached device */
ufshcd_vops_device_reset(hba); ufshcd_vops_device_reset(hba);
...@@ -8431,7 +8449,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8431,7 +8449,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
dev_err(hba->dev, "Host controller enable failed\n"); dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_host_regs(hba); ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba); ufshcd_print_host_state(hba);
goto free_cmd_queue; goto free_tmf_queue;
} }
/* /*
...@@ -8468,6 +8486,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8468,6 +8486,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
return 0; return 0;
free_tmf_queue:
blk_cleanup_queue(hba->tmf_queue);
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
free_cmd_queue: free_cmd_queue:
blk_cleanup_queue(hba->cmd_queue); blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host: out_remove_scsi_host:
......
...@@ -493,11 +493,9 @@ struct ufs_stats { ...@@ -493,11 +493,9 @@ struct ufs_stats {
* @irq: Irq number of the controller * @irq: Irq number of the controller
* @active_uic_cmd: handle of active UIC command * @active_uic_cmd: handle of active UIC command
* @uic_cmd_mutex: mutex for uic command * @uic_cmd_mutex: mutex for uic command
* @tm_wq: wait queue for task management * @tmf_tag_set: TMF tag set.
* @tm_tag_wq: wait queue for free task management slots * @tmf_queue: Used to allocate TMF tags.
* @tm_slots_in_use: bit map of task management request slots in use
* @pwr_done: completion for power mode change * @pwr_done: completion for power mode change
* @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states * @ufshcd_state: UFSHCD states
* @eh_flags: Error handling flags * @eh_flags: Error handling flags
* @intr_mask: Interrupt Mask Bits * @intr_mask: Interrupt Mask Bits
...@@ -641,10 +639,8 @@ struct ufs_hba { ...@@ -641,10 +639,8 @@ struct ufs_hba {
/* Device deviations from standard UFS device spec. */ /* Device deviations from standard UFS device spec. */
unsigned int dev_quirks; unsigned int dev_quirks;
wait_queue_head_t tm_wq; struct blk_mq_tag_set tmf_tag_set;
wait_queue_head_t tm_tag_wq; struct request_queue *tmf_queue;
unsigned long tm_condition;
unsigned long tm_slots_in_use;
struct uic_command *active_uic_cmd; struct uic_command *active_uic_cmd;
struct mutex uic_cmd_mutex; struct mutex uic_cmd_mutex;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment