Commit 9cd20d3f authored by Can Guo's avatar Can Guo Committed by Martin K. Petersen

scsi: ufs: Protect PM ops and err_handler from user access through sysfs

User layer may access sysfs nodes when system PM ops or error handling is
running. This can cause various problems. Rename eh_sem to host_sem and use
it to protect PM ops and error handling from user layer intervention.

Link: https://lore.kernel.org/r/1610594010-7254-3-git-send-email-cang@codeaurora.orgReviewed-by: default avatarStanley Chu <stanley.chu@mediatek.com>
Acked-by: default avatarAvri Altman <avri.altman@wdc.com>
Signed-off-by: default avatarCan Guo <cang@codeaurora.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent fb7afe24
......@@ -154,18 +154,29 @@ static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 ahit;
int ret;
struct ufs_hba *hba = dev_get_drvdata(dev);
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
ret = -EBUSY;
goto out;
}
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
out:
up(&hba->host_sem);
return ret;
}
static ssize_t auto_hibern8_store(struct device *dev,
......@@ -174,6 +185,7 @@ static ssize_t auto_hibern8_store(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned int timer;
int ret = 0;
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
......@@ -184,9 +196,17 @@ static ssize_t auto_hibern8_store(struct device *dev,
if (timer > UFSHCI_AHIBERN8_MAX)
return -EINVAL;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
ret = -EBUSY;
goto out;
}
ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
return count;
out:
up(&hba->host_sem);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(rpm_lvl);
......@@ -225,12 +245,21 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
if (param_size > 8)
return -EINVAL;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
ret = -EBUSY;
goto out;
}
pm_runtime_get_sync(hba->dev);
ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
param_offset, desc_buf, param_size);
pm_runtime_put_sync(hba->dev);
if (ret)
return -EINVAL;
if (ret) {
ret = -EINVAL;
goto out;
}
switch (param_size) {
case 1:
ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
......@@ -249,6 +278,8 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
break;
}
out:
up(&hba->host_sem);
return ret;
}
......@@ -591,9 +622,16 @@ static ssize_t _name##_show(struct device *dev, \
int desc_len = QUERY_DESC_MAX_SIZE; \
u8 *desc_buf; \
\
down(&hba->host_sem); \
if (!ufshcd_is_user_access_allowed(hba)) { \
up(&hba->host_sem); \
return -EBUSY; \
} \
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
if (!desc_buf) \
return -ENOMEM; \
if (!desc_buf) { \
up(&hba->host_sem); \
return -ENOMEM; \
} \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
......@@ -609,10 +647,11 @@ static ssize_t _name##_show(struct device *dev, \
SD_ASCII_STD); \
if (ret < 0) \
goto out; \
ret = sysfs_emit(buf, "%s\n", desc_buf); \
ret = sysfs_emit(buf, "%s\n", desc_buf); \
out: \
pm_runtime_put_sync(hba->dev); \
kfree(desc_buf); \
up(&hba->host_sem); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
......@@ -651,15 +690,26 @@ static ssize_t _name##_show(struct device *dev, \
u8 index = 0; \
int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
\
down(&hba->host_sem); \
if (!ufshcd_is_user_access_allowed(hba)) { \
up(&hba->host_sem); \
return -EBUSY; \
} \
if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
QUERY_FLAG_IDN##_uname, index, &flag); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
return sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
if (ret) { \
ret = -EINVAL; \
goto out; \
} \
ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
out: \
up(&hba->host_sem); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
......@@ -709,15 +759,26 @@ static ssize_t _name##_show(struct device *dev, \
u32 value; \
int ret; \
u8 index = 0; \
\
down(&hba->host_sem); \
if (!ufshcd_is_user_access_allowed(hba)) { \
up(&hba->host_sem); \
return -EBUSY; \
} \
if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
QUERY_ATTR_IDN##_uname, index, 0, &value); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
return sysfs_emit(buf, "0x%08X\n", value); \
if (ret) { \
ret = -EINVAL; \
goto out; \
} \
ret = sysfs_emit(buf, "0x%08X\n", value); \
out: \
up(&hba->host_sem); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
......@@ -851,13 +912,26 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
int ret;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
ret = -EBUSY;
goto out;
}
pm_runtime_get_sync(hba->dev);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
pm_runtime_put_sync(hba->dev);
if (ret)
return -EINVAL;
return sysfs_emit(buf, "0x%08X\n", value);
if (ret) {
ret = -EINVAL;
goto out;
}
ret = sysfs_emit(buf, "0x%08X\n", value);
out:
up(&hba->host_sem);
return ret;
}
static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
......
......@@ -1546,11 +1546,17 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
u32 value;
int err;
int err = 0;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
err = -EBUSY;
goto out;
}
value = !!value;
if (value == hba->clk_scaling.is_allowed)
goto out;
......@@ -1576,7 +1582,8 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
out:
return count;
up(&hba->host_sem);
return err ? err : count;
}
static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
......@@ -5775,9 +5782,10 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR ||
return (!hba->is_powered || hba->shutting_down ||
hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
ufshcd_is_link_broken(hba))));
ufshcd_is_link_broken(hba))));
}
#ifdef CONFIG_PM
......@@ -5847,13 +5855,13 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
down(&hba->eh_sem);
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
up(&hba->eh_sem);
up(&hba->host_sem);
return;
}
ufshcd_set_eh_in_progress(hba);
......@@ -6022,7 +6030,7 @@ static void ufshcd_err_handler(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba);
up(&hba->eh_sem);
up(&hba->host_sem);
}
/**
......@@ -7924,10 +7932,10 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
struct ufs_hba *hba = (struct ufs_hba *)data;
int ret;
down(&hba->eh_sem);
down(&hba->host_sem);
/* Initialize hba, detect and initialize UFS device */
ret = ufshcd_probe_hba(hba, true);
up(&hba->eh_sem);
up(&hba->host_sem);
if (ret)
goto out;
......@@ -8959,7 +8967,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
return 0;
}
down(&hba->eh_sem);
down(&hba->host_sem);
if (!hba->is_powered)
return 0;
......@@ -8992,7 +9000,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if (!ret)
hba->is_sys_suspended = true;
else
up(&hba->eh_sem);
up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
......@@ -9014,7 +9022,7 @@ int ufshcd_system_resume(struct ufs_hba *hba)
if (unlikely(early_suspend)) {
early_suspend = false;
down(&hba->eh_sem);
down(&hba->host_sem);
}
if (!hba->is_powered || pm_runtime_suspended(hba->dev))
......@@ -9031,7 +9039,7 @@ int ufshcd_system_resume(struct ufs_hba *hba)
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = false;
up(&hba->eh_sem);
up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
......@@ -9123,7 +9131,10 @@ int ufshcd_shutdown(struct ufs_hba *hba)
{
int ret = 0;
down(&hba->eh_sem);
down(&hba->host_sem);
hba->shutting_down = true;
up(&hba->host_sem);
if (!hba->is_powered)
goto out;
......@@ -9137,7 +9148,6 @@ int ufshcd_shutdown(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
hba->is_powered = false;
up(&hba->eh_sem);
/* allow force shutdown even in case of errors */
return 0;
}
......@@ -9333,7 +9343,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
sema_init(&hba->eh_sem, 1);
sema_init(&hba->host_sem, 1);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
......
......@@ -665,6 +665,8 @@ struct ufs_hba_variant_params {
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
* @shutting_down: flag to check if shutdown has been invoked
* @host_sem: semaphore used to serialize concurrent contexts
* @eh_wq: Workqueue that eh_work works on
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
......@@ -761,7 +763,8 @@ struct ufs_hba {
u32 intr_mask;
u16 ee_ctrl_mask;
bool is_powered;
struct semaphore eh_sem;
bool shutting_down;
struct semaphore host_sem;
/* Work Queues */
struct workqueue_struct *eh_wq;
......@@ -888,6 +891,11 @@ static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
return hba->caps & UFSHCD_CAP_WB_EN;
}
static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
{
return !hba->shutting_down;
}
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment