Commit fbc4d81a authored by Sudeep Holla's avatar Sudeep Holla

firmware: arm_scmi: refactor in preparation to support per-protocol channels

In order to support per-protocol channels if available, we need to
factor out all the mailbox channel information(Tx/Rx payload and
channel handle) out of the main SCMI instance information structure.

This patch refactors the existing channel information into a separate
chan_info structure.

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarSudeep Holla <sudeep.holla@arm.com>
parent 5c4ba3cc
...@@ -90,6 +90,22 @@ struct scmi_desc { ...@@ -90,6 +90,22 @@ struct scmi_desc {
int max_msg_size; int max_msg_size;
}; };
/**
* struct scmi_chan_info - Structure representing a SCMI channel informfation
*
* @cl: Mailbox Client
* @chan: Transmit/Receive mailbox channel
* @payload: Transmit/Receive mailbox channel payload area
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
*/
struct scmi_chan_info {
struct mbox_client cl;
struct mbox_chan *chan;
void __iomem *payload;
struct device *dev;
};
/** /**
* struct scmi_info - Structure representing a SCMI instance * struct scmi_info - Structure representing a SCMI instance
* *
...@@ -98,10 +114,8 @@ struct scmi_desc { ...@@ -98,10 +114,8 @@ struct scmi_desc {
* @handle: Instance of SCMI handle to send to clients * @handle: Instance of SCMI handle to send to clients
* @version: SCMI revision information containing protocol version, * @version: SCMI revision information containing protocol version,
* implementation version and (sub-)vendor identification. * implementation version and (sub-)vendor identification.
* @cl: Mailbox Client
* @tx_chan: Transmit mailbox channel
* @tx_payload: Transmit mailbox channel payload area
* @minfo: Message info * @minfo: Message info
* @tx_cinfo: Reference to SCMI channel information
* @protocols_imp: list of protocols implemented, currently maximum of * @protocols_imp: list of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol * MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @node: list head * @node: list head
...@@ -112,16 +126,14 @@ struct scmi_info { ...@@ -112,16 +126,14 @@ struct scmi_info {
const struct scmi_desc *desc; const struct scmi_desc *desc;
struct scmi_revision_info version; struct scmi_revision_info version;
struct scmi_handle handle; struct scmi_handle handle;
struct mbox_client cl;
struct mbox_chan *tx_chan;
void __iomem *tx_payload;
struct scmi_xfers_info minfo; struct scmi_xfers_info minfo;
struct scmi_chan_info *tx_cinfo;
u8 *protocols_imp; u8 *protocols_imp;
struct list_head node; struct list_head node;
int users; int users;
}; };
#define client_to_scmi_info(c) container_of(c, struct scmi_info, cl) #define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
/* /*
...@@ -204,10 +216,11 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m) ...@@ -204,10 +216,11 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
{ {
u16 xfer_id; u16 xfer_id;
struct scmi_xfer *xfer; struct scmi_xfer *xfer;
struct scmi_info *info = client_to_scmi_info(cl); struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct device *dev = cinfo->dev;
struct scmi_info *info = dev_get_drvdata(dev);
struct scmi_xfers_info *minfo = &info->minfo; struct scmi_xfers_info *minfo = &info->minfo;
struct device *dev = info->dev; struct scmi_shared_mem __iomem *mem = cinfo->payload;
struct scmi_shared_mem __iomem *mem = info->tx_payload;
xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
...@@ -258,8 +271,8 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) ...@@ -258,8 +271,8 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
static void scmi_tx_prepare(struct mbox_client *cl, void *m) static void scmi_tx_prepare(struct mbox_client *cl, void *m)
{ {
struct scmi_xfer *t = m; struct scmi_xfer *t = m;
struct scmi_info *info = client_to_scmi_info(cl); struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct scmi_shared_mem __iomem *mem = info->tx_payload; struct scmi_shared_mem __iomem *mem = cinfo->payload;
/* Mark channel busy + clear error */ /* Mark channel busy + clear error */
iowrite32(0x0, &mem->channel_status); iowrite32(0x0, &mem->channel_status);
...@@ -338,27 +351,27 @@ void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) ...@@ -338,27 +351,27 @@ void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
} }
static bool static bool
scmi_xfer_poll_done(const struct scmi_info *info, struct scmi_xfer *xfer) scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{ {
struct scmi_shared_mem *mem = info->tx_payload; struct scmi_shared_mem __iomem *mem = cinfo->payload;
u16 xfer_id = MSG_XTRACT_TOKEN(le32_to_cpu(mem->msg_header)); u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
if (xfer->hdr.seq != xfer_id) if (xfer->hdr.seq != xfer_id)
return false; return false;
return le32_to_cpu(mem->channel_status) & return ioread32(&mem->channel_status) &
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
} }
#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
static bool scmi_xfer_done_no_timeout(const struct scmi_info *info, static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop) struct scmi_xfer *xfer, ktime_t stop)
{ {
ktime_t __cur = ktime_get(); ktime_t __cur = ktime_get();
return scmi_xfer_poll_done(info, xfer) || ktime_after(__cur, stop); return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
} }
/** /**
...@@ -377,8 +390,9 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) ...@@ -377,8 +390,9 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
int timeout; int timeout;
struct scmi_info *info = handle_to_scmi_info(handle); struct scmi_info *info = handle_to_scmi_info(handle);
struct device *dev = info->dev; struct device *dev = info->dev;
struct scmi_chan_info *cinfo = info->tx_cinfo;
ret = mbox_send_message(info->tx_chan, xfer); ret = mbox_send_message(cinfo->chan, xfer);
if (ret < 0) { if (ret < 0) {
dev_dbg(dev, "mbox send fail %d\n", ret); dev_dbg(dev, "mbox send fail %d\n", ret);
return ret; return ret;
...@@ -390,10 +404,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) ...@@ -390,10 +404,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
if (xfer->hdr.poll_completion) { if (xfer->hdr.poll_completion) {
ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
spin_until_cond(scmi_xfer_done_no_timeout(info, xfer, stop)); spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
if (ktime_before(ktime_get(), stop)) if (ktime_before(ktime_get(), stop))
scmi_fetch_response(xfer, info->tx_payload); scmi_fetch_response(xfer, cinfo->payload);
else else
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
} else { } else {
...@@ -415,7 +429,7 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) ...@@ -415,7 +429,7 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
* Unfortunately, we have to kick the mailbox framework after we have * Unfortunately, we have to kick the mailbox framework after we have
* received our message. * received our message.
*/ */
mbox_client_txdone(info->tx_chan, ret); mbox_client_txdone(cinfo->chan, ret);
return ret; return ret;
} }
...@@ -643,11 +657,11 @@ static int scmi_mailbox_check(struct device_node *np) ...@@ -643,11 +657,11 @@ static int scmi_mailbox_check(struct device_node *np)
return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg); return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
} }
static int scmi_mbox_free_channel(struct scmi_info *info) static int scmi_mbox_free_channel(struct scmi_chan_info *cinfo)
{ {
if (!IS_ERR_OR_NULL(info->tx_chan)) { if (!IS_ERR_OR_NULL(cinfo->chan)) {
mbox_free_channel(info->tx_chan); mbox_free_channel(cinfo->chan);
info->tx_chan = NULL; cinfo->chan = NULL;
} }
return 0; return 0;
...@@ -667,7 +681,7 @@ static int scmi_remove(struct platform_device *pdev) ...@@ -667,7 +681,7 @@ static int scmi_remove(struct platform_device *pdev)
if (!ret) if (!ret)
/* Safe to free channels since no more users */ /* Safe to free channels since no more users */
return scmi_mbox_free_channel(info); return scmi_mbox_free_channel(info->tx_cinfo);
return ret; return ret;
} }
...@@ -679,9 +693,17 @@ static inline int scmi_mbox_chan_setup(struct scmi_info *info) ...@@ -679,9 +693,17 @@ static inline int scmi_mbox_chan_setup(struct scmi_info *info)
resource_size_t size; resource_size_t size;
struct device *dev = info->dev; struct device *dev = info->dev;
struct device_node *shmem, *np = dev->of_node; struct device_node *shmem, *np = dev->of_node;
struct scmi_chan_info *cinfo;
struct mbox_client *cl; struct mbox_client *cl;
cl = &info->cl; cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
info->tx_cinfo = cinfo;
cinfo->dev = dev;
cl = &cinfo->cl;
cl->dev = dev; cl->dev = dev;
cl->rx_callback = scmi_rx_callback; cl->rx_callback = scmi_rx_callback;
cl->tx_prepare = scmi_tx_prepare; cl->tx_prepare = scmi_tx_prepare;
...@@ -697,16 +719,16 @@ static inline int scmi_mbox_chan_setup(struct scmi_info *info) ...@@ -697,16 +719,16 @@ static inline int scmi_mbox_chan_setup(struct scmi_info *info)
} }
size = resource_size(&res); size = resource_size(&res);
info->tx_payload = devm_ioremap(dev, res.start, size); cinfo->payload = devm_ioremap(info->dev, res.start, size);
if (!info->tx_payload) { if (!cinfo->payload) {
dev_err(dev, "failed to ioremap SCMI Tx payload\n"); dev_err(dev, "failed to ioremap SCMI Tx payload\n");
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
} }
/* Transmit channel is first entry i.e. index 0 */ /* Transmit channel is first entry i.e. index 0 */
info->tx_chan = mbox_request_channel(cl, 0); cinfo->chan = mbox_request_channel(cl, 0);
if (IS_ERR(info->tx_chan)) { if (IS_ERR(cinfo->chan)) {
ret = PTR_ERR(info->tx_chan); ret = PTR_ERR(cinfo->chan);
if (ret != -EPROBE_DEFER) if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to request SCMI Tx mailbox\n"); dev_err(dev, "failed to request SCMI Tx mailbox\n");
return ret; return ret;
...@@ -774,7 +796,7 @@ static int scmi_probe(struct platform_device *pdev) ...@@ -774,7 +796,7 @@ static int scmi_probe(struct platform_device *pdev)
ret = scmi_base_protocol_init(handle); ret = scmi_base_protocol_init(handle);
if (ret) { if (ret) {
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret); dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
scmi_mbox_free_channel(info); scmi_mbox_free_channel(info->tx_cinfo);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment