Commit 38135535 authored by Subhash Jadavani's avatar Subhash Jadavani Committed by Martin K. Petersen

scsi: ufs: add reference counting for scsi block requests

Currently we call the scsi_block_requests()/scsi_unblock_requests()
whenever we want to block/unblock scsi requests but as there is no
reference counting, nesting of these calls could leave us in undesired
state sometime. Consider following call flow sequence:

1. func1() calls scsi_block_requests() but calls func2() before
   calling scsi_unblock_requests()
2. func2() calls scsi_block_requests()
3. func2() calls scsi_unblock_requests()
4. func1() calls scsi_unblock_requests()

As there is no reference counting, we will have scsi requests unblocked
after #3 instead of it to be unblocked only after #4. Though we may not
have failures seen with this, we might run into some failures in future.
Better solution would be to fix this by adding reference counting.
Signed-off-by: default avatarSubhash Jadavani <subhashj@codeaurora.org>
Signed-off-by: default avatarCan Guo <cang@codeaurora.org>
Signed-off-by: default avatarAsutosh Das <asutoshd@codeaurora.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent b334456e
...@@ -264,6 +264,18 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) ...@@ -264,6 +264,18 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
} }
} }
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
{
if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
scsi_unblock_requests(hba->host);
}
static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
{
if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
scsi_block_requests(hba->host);
}
/* replace non-printable or non-ASCII characters with spaces */ /* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val) static inline void ufshcd_remove_non_printable(char *val)
{ {
...@@ -1074,12 +1086,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) ...@@ -1074,12 +1086,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
* make sure that there are no outstanding requests when * make sure that there are no outstanding requests when
* clock scaling is in progress * clock scaling is in progress
*/ */
scsi_block_requests(hba->host); ufshcd_scsi_block_requests(hba);
down_write(&hba->clk_scaling_lock); down_write(&hba->clk_scaling_lock);
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY; ret = -EBUSY;
up_write(&hba->clk_scaling_lock); up_write(&hba->clk_scaling_lock);
scsi_unblock_requests(hba->host); ufshcd_scsi_unblock_requests(hba);
} }
return ret; return ret;
...@@ -1088,7 +1100,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) ...@@ -1088,7 +1100,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba) static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{ {
up_write(&hba->clk_scaling_lock); up_write(&hba->clk_scaling_lock);
scsi_unblock_requests(hba->host); ufshcd_scsi_unblock_requests(hba);
} }
/** /**
...@@ -1460,7 +1472,7 @@ static void ufshcd_ungate_work(struct work_struct *work) ...@@ -1460,7 +1472,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false; hba->clk_gating.is_suspended = false;
} }
unblock_reqs: unblock_reqs:
scsi_unblock_requests(hba->host); ufshcd_scsi_unblock_requests(hba);
} }
/** /**
...@@ -1516,7 +1528,7 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) ...@@ -1516,7 +1528,7 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
* work and to enable clocks. * work and to enable clocks.
*/ */
case CLKS_OFF: case CLKS_OFF:
scsi_block_requests(hba->host); ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON; hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev), trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state); hba->clk_gating.state);
...@@ -5298,7 +5310,7 @@ static void ufshcd_err_handler(struct work_struct *work) ...@@ -5298,7 +5310,7 @@ static void ufshcd_err_handler(struct work_struct *work)
out: out:
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
scsi_unblock_requests(hba->host); ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba); ufshcd_release(hba);
pm_runtime_put_sync(hba->dev); pm_runtime_put_sync(hba->dev);
} }
...@@ -5400,7 +5412,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) ...@@ -5400,7 +5412,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* handle fatal errors only when link is functional */ /* handle fatal errors only when link is functional */
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
/* block commands from scsi mid-layer */ /* block commands from scsi mid-layer */
scsi_block_requests(hba->host); ufshcd_scsi_block_requests(hba);
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED; hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
...@@ -8032,7 +8044,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8032,7 +8044,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Hold auto suspend until async scan completes */ /* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
atomic_set(&hba->scsi_block_reqs_cnt, 0);
/* /*
* We are assuming that device wasn't put in sleep/power-down * We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel. * state exclusively during the boot stage before kernel.
......
...@@ -499,6 +499,7 @@ struct ufs_stats { ...@@ -499,6 +499,7 @@ struct ufs_stats {
* @urgent_bkops_lvl: keeps track of urgent bkops level for device * @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not. * device is known or not.
* @scsi_block_reqs_cnt: reference counting for scsi block requests
*/ */
struct ufs_hba { struct ufs_hba {
void __iomem *mmio_base; void __iomem *mmio_base;
...@@ -699,6 +700,7 @@ struct ufs_hba { ...@@ -699,6 +700,7 @@ struct ufs_hba {
struct rw_semaphore clk_scaling_lock; struct rw_semaphore clk_scaling_lock;
struct ufs_desc_size desc_size; struct ufs_desc_size desc_size;
atomic_t scsi_block_reqs_cnt;
}; };
/* Returns true if clocks can be gated. Otherwise false */ /* Returns true if clocks can be gated. Otherwise false */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment