Commit f76c7451 authored by Arnd Bergmann's avatar Arnd Bergmann

Merge tag 'scmi-fixes-6.1' of...

Merge tag 'scmi-fixes-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/fixes

Arm SCMI fixes for v6.1

A bunch of fixes to handle:
1. A possible resource leak in scmi_remove(). The returned error
   value gets ignored by the driver core and can remove the device and
   free the devm-allocated resources. As a simple solution to be able to
   easily backport, the bind attributes in the driver is suppressed as
   there is no need to support it. Additionally the remove path is cleaned
   up by adding device links between the core and the protocol devices
   so that a proper and complete unbinding happens.
2. A possible spin-loop in the SCMI transmit path in case of misbehaving
   platform firmware. A timeout is added to the existing loop so that
   the SCMI stack can bailout aborting the transmission with warnings.
3. Optional Rx channel correctly by reporting any memory errors instead
   of ignoring the same with other allowed errors.
4. The use of proper device for all the device managed allocations in the
   virtio transport.
5. Incorrect deferred_tx_wq release on the error paths by using devres
   API(devm_add_action_or_reset) to manage the release in the error path.

* tag 'scmi-fixes-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux:
  firmware: arm_scmi: Fix deferred_tx_wq release on error paths
  firmware: arm_scmi: Fix devres allocation device in virtio transport
  firmware: arm_scmi: Make Rx chan_setup fail on memory errors
  firmware: arm_scmi: Make tx_prepare time out eventually
  firmware: arm_scmi: Suppress the driver's bind attributes
  firmware: arm_scmi: Cleanup the core driver removal callback

Link: https://lore.kernel.org/r/20221102140142.2758107-1-sudeep.holla@arm.comSigned-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parents a0e21508 1eff6929
......@@ -216,9 +216,20 @@ void scmi_device_destroy(struct scmi_device *scmi_dev)
device_unregister(&scmi_dev->dev);
}
void scmi_device_link_add(struct device *consumer, struct device *supplier)
{
struct device_link *link;
link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
WARN_ON(!link);
}
void scmi_set_handle(struct scmi_device *scmi_dev)
{
scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
if (scmi_dev->handle)
scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
}
int scmi_protocol_register(const struct scmi_protocol *proto)
......
......@@ -97,6 +97,7 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle *ph);
int scmi_handle_put(const struct scmi_handle *handle);
void scmi_device_link_add(struct device *consumer, struct device *supplier);
struct scmi_handle *scmi_handle_get(struct device *dev);
void scmi_set_handle(struct scmi_device *scmi_dev);
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
......@@ -117,6 +118,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
*
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @rx_timeout_ms: The configured RX timeout in milliseconds.
* @handle: Pointer to SCMI entity handle
* @no_completion_irq: Flag to indicate that this channel has no completion
* interrupt mechanism for synchronous commands.
......@@ -126,6 +128,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
*/
struct scmi_chan_info {
struct device *dev;
unsigned int rx_timeout_ms;
struct scmi_handle *handle;
bool no_completion_irq;
void *transport_info;
......@@ -232,7 +235,7 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
struct scmi_shared_mem;
void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
struct scmi_xfer *xfer, struct scmi_chan_info *cinfo);
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
......
......@@ -2013,6 +2013,7 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
return -ENOMEM;
cinfo->dev = dev;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
if (ret)
......@@ -2044,8 +2045,12 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
{
int ret = scmi_chan_setup(info, dev, prot_id, true);
if (!ret) /* Rx is optional, hence no error check */
scmi_chan_setup(info, dev, prot_id, false);
if (!ret) {
/* Rx is optional, report only memory errors */
ret = scmi_chan_setup(info, dev, prot_id, false);
if (ret && ret != -ENOMEM)
ret = 0;
}
return ret;
}
......@@ -2273,10 +2278,16 @@ int scmi_protocol_device_request(const struct scmi_device_id *id_table)
sdev = scmi_get_protocol_device(child, info,
id_table->protocol_id,
id_table->name);
/* Set handle if not already set: device existed */
if (sdev && !sdev->handle)
sdev->handle =
scmi_handle_get_from_info_unlocked(info);
if (sdev) {
/* Set handle if not already set: device existed */
if (!sdev->handle)
sdev->handle =
scmi_handle_get_from_info_unlocked(info);
/* Relink consumer and suppliers */
if (sdev->handle)
scmi_device_link_add(&sdev->dev,
sdev->handle->dev);
}
} else {
dev_err(info->dev,
"Failed. SCMI protocol %d not active.\n",
......@@ -2475,20 +2486,17 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
static int scmi_remove(struct platform_device *pdev)
{
int ret = 0, id;
int ret, id;
struct scmi_info *info = platform_get_drvdata(pdev);
struct device_node *child;
mutex_lock(&scmi_list_mutex);
if (info->users)
ret = -EBUSY;
else
list_del(&info->node);
dev_warn(&pdev->dev,
"Still active SCMI users will be forcibly unbound.\n");
list_del(&info->node);
mutex_unlock(&scmi_list_mutex);
if (ret)
return ret;
scmi_notification_exit(&info->handle);
mutex_lock(&info->protocols_mtx);
......@@ -2500,7 +2508,11 @@ static int scmi_remove(struct platform_device *pdev)
idr_destroy(&info->active_protocols);
/* Safe to free channels since no more users */
return scmi_cleanup_txrx_channels(info);
ret = scmi_cleanup_txrx_channels(info);
if (ret)
dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n");
return 0;
}
static ssize_t protocol_version_show(struct device *dev,
......@@ -2571,6 +2583,7 @@ MODULE_DEVICE_TABLE(of, scmi_of_match);
static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
.suppress_bind_attrs = true,
.of_match_table = scmi_of_match,
.dev_groups = versions_groups,
},
......
......@@ -36,7 +36,7 @@ static void tx_prepare(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
shmem_tx_prepare(smbox->shmem, m);
shmem_tx_prepare(smbox->shmem, m, smbox->cinfo);
}
static void rx_callback(struct mbox_client *cl, void *m)
......
......@@ -498,7 +498,7 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
msg_tx_prepare(channel->req.msg, xfer);
ret = invoke_process_msg_channel(channel, msg_command_size(xfer));
} else {
shmem_tx_prepare(channel->req.shmem, xfer);
shmem_tx_prepare(channel->req.shmem, xfer, cinfo);
ret = invoke_process_smt_channel(channel);
}
......
......@@ -5,10 +5,13 @@
* Copyright (C) 2019 ARM Ltd.
*/
#include <linux/ktime.h>
#include <linux/io.h>
#include <linux/processor.h>
#include <linux/types.h>
#include <asm-generic/bug.h>
#include "common.h"
/*
......@@ -30,16 +33,36 @@ struct scmi_shared_mem {
};
void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
struct scmi_xfer *xfer, struct scmi_chan_info *cinfo)
{
ktime_t stop;
/*
* Ideally channel must be free by now unless OS timeout last
* request and platform continued to process the same, wait
* until it releases the shared memory, otherwise we may endup
* overwriting its response with new message payload or vice-versa
* overwriting its response with new message payload or vice-versa.
* Giving up anyway after twice the expected channel timeout so as
* not to bail-out on intermittent issues where the platform is
* occasionally a bit slower to answer.
*
* Note that after a timeout is detected we bail-out and carry on but
* the transport functionality is probably permanently compromised:
* this is just to ease debugging and avoid complete hangs on boot
* due to a misbehaving SCMI firmware.
*/
spin_until_cond(ioread32(&shmem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms);
spin_until_cond((ioread32(&shmem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) ||
ktime_after(ktime_get(), stop));
if (!(ioread32(&shmem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) {
WARN_ON_ONCE(1);
dev_err(cinfo->dev,
"Timeout waiting for a free TX channel !\n");
return;
}
/* Mark channel busy + clear error */
iowrite32(0x0, &shmem->channel_status);
iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
......
......@@ -188,7 +188,7 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
*/
smc_channel_lock_acquire(scmi_info, xfer);
shmem_tx_prepare(scmi_info->shmem, xfer);
shmem_tx_prepare(scmi_info->shmem, xfer, cinfo);
arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
......
......@@ -148,7 +148,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
{
unsigned long flags;
DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
void *deferred_wq = NULL;
/*
* Prepare to wait for the last release if not already released
......@@ -162,16 +161,11 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
vioch->shutdown_done = &vioch_shutdown_done;
virtio_break_device(vioch->vqueue->vdev);
if (!vioch->is_rx && vioch->deferred_tx_wq) {
deferred_wq = vioch->deferred_tx_wq;
if (!vioch->is_rx && vioch->deferred_tx_wq)
/* Cannot be kicked anymore after this...*/
vioch->deferred_tx_wq = NULL;
}
spin_unlock_irqrestore(&vioch->lock, flags);
if (deferred_wq)
destroy_workqueue(deferred_wq);
scmi_vio_channel_release(vioch);
/* Let any possibly concurrent RX path release the channel */
......@@ -416,6 +410,11 @@ static bool virtio_chan_available(struct device *dev, int idx)
return vioch && !vioch->cinfo;
}
static void scmi_destroy_tx_workqueue(void *deferred_tx_wq)
{
destroy_workqueue(deferred_tx_wq);
}
static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
......@@ -430,6 +429,8 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
/* Setup a deferred worker for polling. */
if (tx && !vioch->deferred_tx_wq) {
int ret;
vioch->deferred_tx_wq =
alloc_workqueue(dev_name(&scmi_vdev->dev),
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
......@@ -437,6 +438,11 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!vioch->deferred_tx_wq)
return -ENOMEM;
ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue,
vioch->deferred_tx_wq);
if (ret)
return ret;
INIT_WORK(&vioch->deferred_tx_work,
scmi_vio_deferred_tx_worker);
}
......@@ -444,12 +450,12 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
for (i = 0; i < vioch->max_msg; i++) {
struct scmi_vio_msg *msg;
msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL);
msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (tx) {
msg->request = devm_kzalloc(cinfo->dev,
msg->request = devm_kzalloc(dev,
VIRTIO_SCMI_MAX_PDU_SIZE,
GFP_KERNEL);
if (!msg->request)
......@@ -458,7 +464,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
refcount_set(&msg->users, 1);
}
msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE,
GFP_KERNEL);
if (!msg->input)
return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment