Commit 729d3530 authored by Lukasz Luba's avatar Lukasz Luba Committed by Sudeep Holla

drivers: firmware: scmi: Extend SCMI transport layer by trace events

The SCMI transport layer communicates via mailboxes and shared memory with
firmware running on a microcontroller. It is platform specific how long it
takes to pass a SCMI message. The most sensitive requests are coming from
CPUFreq subsystem, which might be used by the scheduler.
Thus, there is a need to measure these delays and capture anomalies.
This change introduces trace events wrapped around transfer code.

According to Jim's suggestion a unique transfer_id is to distinguish
similar entries which might have the same message id, protocol id and
sequence. This is a case then there are some timeouts in transfers.
Suggested-by: default avatarJim Quinlan <james.quinlan@broadcom.com>
Signed-off-by: default avatarLukasz Luba <lukasz.luba@arm.com>
Signed-off-by: default avatarSudeep Holla <sudeep.holla@arm.com>
parent 257d0e20
...@@ -81,6 +81,7 @@ struct scmi_msg { ...@@ -81,6 +81,7 @@ struct scmi_msg {
/** /**
* struct scmi_xfer - Structure representing a message flow * struct scmi_xfer - Structure representing a message flow
* *
* @transfer_id: Unique ID for debug & profiling purpose
* @hdr: Transmit message header * @hdr: Transmit message header
* @tx: Transmit message * @tx: Transmit message
* @rx: Receive message, the buffer should be pre-allocated to store * @rx: Receive message, the buffer should be pre-allocated to store
...@@ -90,6 +91,7 @@ struct scmi_msg { ...@@ -90,6 +91,7 @@ struct scmi_msg {
* @async: pointer to delayed response message received event completion * @async: pointer to delayed response message received event completion
*/ */
struct scmi_xfer { struct scmi_xfer {
int transfer_id;
struct scmi_msg_hdr hdr; struct scmi_msg_hdr hdr;
struct scmi_msg tx; struct scmi_msg tx;
struct scmi_msg rx; struct scmi_msg rx;
......
...@@ -29,6 +29,9 @@ ...@@ -29,6 +29,9 @@
#include "common.h" #include "common.h"
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
#define MSG_ID_MASK GENMASK(7, 0) #define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8) #define MSG_TYPE_MASK GENMASK(9, 8)
...@@ -61,6 +64,8 @@ enum scmi_error_codes { ...@@ -61,6 +64,8 @@ enum scmi_error_codes {
static LIST_HEAD(scmi_list); static LIST_HEAD(scmi_list);
/* Protection for the entire list */ /* Protection for the entire list */
static DEFINE_MUTEX(scmi_list_mutex); static DEFINE_MUTEX(scmi_list_mutex);
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id;
/** /**
* struct scmi_xfers_info - Structure to manage transfer information * struct scmi_xfers_info - Structure to manage transfer information
...@@ -304,6 +309,7 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, ...@@ -304,6 +309,7 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
xfer = &minfo->xfer_block[xfer_id]; xfer = &minfo->xfer_block[xfer_id];
xfer->hdr.seq = xfer_id; xfer->hdr.seq = xfer_id;
reinit_completion(&xfer->done); reinit_completion(&xfer->done);
xfer->transfer_id = atomic_inc_return(&transfer_last_id);
return xfer; return xfer;
} }
...@@ -374,6 +380,10 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m) ...@@ -374,6 +380,10 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
scmi_fetch_response(xfer, mem); scmi_fetch_response(xfer, mem);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
msg_type);
if (msg_type == MSG_TYPE_DELAYED_RESP) if (msg_type == MSG_TYPE_DELAYED_RESP)
complete(xfer->async_done); complete(xfer->async_done);
else else
...@@ -439,6 +449,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) ...@@ -439,6 +449,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
if (unlikely(!cinfo)) if (unlikely(!cinfo))
return -EINVAL; return -EINVAL;
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
ret = mbox_send_message(cinfo->chan, xfer); ret = mbox_send_message(cinfo->chan, xfer);
if (ret < 0) { if (ret < 0) {
dev_dbg(dev, "mbox send fail %d\n", ret); dev_dbg(dev, "mbox send fail %d\n", ret);
...@@ -478,6 +492,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) ...@@ -478,6 +492,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
*/ */
mbox_client_txdone(cinfo->chan, ret); mbox_client_txdone(cinfo->chan, ret);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.status);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment