Commit b1a1b152 authored by Sudeep Holla's avatar Sudeep Holla

Merge tag 'scmi-updates-5.17' of...

Merge tag 'scmi-updates-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into for-next/scmi

Arm SCMI firmware interface updates for v5.17

Couple of main additions:
- Support for OPTEE based SCMI transport to enable using SCMI service
  provided by OPTEE on some platforms
- Support for atomic SCMI transports which enables few SCMI transactions
  to be completed in atomic context. This involves other refactoring work
  associated with it. It also marks SMC and OPTEE as atomic transport as
  the commands are completed once the return

Other changes involves some trace and log enhancements and a miscellaneous
bug fix.

* tag 'scmi-updates-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux:
  firmware: arm_scmi: Add new parameter to mark_txdone
  firmware: arm_scmi: Add atomic mode support to smc transport
  firmware: arm_scmi: Add support for atomic transports
  firmware: arm_scmi: Make optee support sync_cmds_completed_on_ret
  firmware: arm_scmi: Make smc support sync_cmds_completed_on_ret
  firmware: arm_scmi: Add sync_cmds_completed_on_ret transport flag
  firmware: arm_scmi: Make smc transport use common completions
  firmware: arm_scmi: Add configurable polling mode for transports
  firmware: arm_scmi: Use new trace event scmi_xfer_response_wait
  include: trace: Add new scmi_xfer_response_wait event
  firmware: arm_scmi: Refactor message response path
  firmware: arm_scmi: Set polling timeout to max_rx_timeout_ms
  firmware: arm_scmi: Perform earlier cinfo lookup call in do_xfer
  firmware: arm_scmi: optee: Drop the support for the OPTEE shared dynamic buffer
  firmware: arm_scmi: optee: Fix missing mutex_init()
  firmware: arm_scmi: Make virtio Version_1 compliance optional
  firmware: arm_scmi: Add optee transport
  dt-bindings: arm: Add OP-TEE transport for SCMI
  firmware: arm_scmi: Review some virtio log messages
parents e783362e 94d0cd1d
...@@ -38,6 +38,9 @@ properties: ...@@ -38,6 +38,9 @@ properties:
The virtio transport only supports a single device. The virtio transport only supports a single device.
items: items:
- const: arm,scmi-virtio - const: arm,scmi-virtio
- description: SCMI compliant firmware with OP-TEE transport
items:
- const: linaro,scmi-optee
interrupts: interrupts:
description: description:
...@@ -83,6 +86,11 @@ properties: ...@@ -83,6 +86,11 @@ properties:
description: description:
SMC id required when using smc or hvc transports SMC id required when using smc or hvc transports
linaro,optee-channel-id:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Channel specifier required when using OP-TEE transport.
protocol@11: protocol@11:
type: object type: object
properties: properties:
...@@ -195,6 +203,12 @@ patternProperties: ...@@ -195,6 +203,12 @@ patternProperties:
minItems: 1 minItems: 1
maxItems: 2 maxItems: 2
linaro,optee-channel-id:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Channel specifier required when using OP-TEE transport and
protocol has a dedicated communication channel.
required: required:
- reg - reg
...@@ -226,6 +240,16 @@ else: ...@@ -226,6 +240,16 @@ else:
- arm,smc-id - arm,smc-id
- shmem - shmem
else:
if:
properties:
compatible:
contains:
const: linaro,scmi-optee
then:
required:
- linaro,optee-channel-id
examples: examples:
- | - |
firmware { firmware {
...@@ -340,7 +364,48 @@ examples: ...@@ -340,7 +364,48 @@ examples:
reg = <0x11>; reg = <0x11>;
#power-domain-cells = <1>; #power-domain-cells = <1>;
}; };
};
};
- |
firmware {
scmi {
compatible = "linaro,scmi-optee";
linaro,optee-channel-id = <0>;
#address-cells = <1>;
#size-cells = <0>;
scmi_dvfs1: protocol@13 {
reg = <0x13>;
linaro,optee-channel-id = <1>;
shmem = <&cpu_optee_lpri0>;
#clock-cells = <1>;
};
scmi_clk0: protocol@14 {
reg = <0x14>;
#clock-cells = <1>;
};
};
};
soc {
#address-cells = <2>;
#size-cells = <2>;
sram@51000000 {
compatible = "mmio-sram";
reg = <0x0 0x51000000 0x0 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x0 0x51000000 0x10000>;
cpu_optee_lpri0: optee-sram-section@0 {
compatible = "arm,scmi-shmem";
reg = <0x0 0x80>;
};
}; };
}; };
......
...@@ -54,6 +54,18 @@ config ARM_SCMI_TRANSPORT_MAILBOX ...@@ -54,6 +54,18 @@ config ARM_SCMI_TRANSPORT_MAILBOX
If you want the ARM SCMI PROTOCOL stack to include support for a If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on mailboxes, answer Y. transport based on mailboxes, answer Y.
config ARM_SCMI_TRANSPORT_OPTEE
bool "SCMI transport based on OP-TEE service"
depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL
select ARM_SCMI_HAVE_TRANSPORT
select ARM_SCMI_HAVE_SHMEM
default y
help
This enables the OP-TEE service based transport for SCMI.
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on OP-TEE SCMI service, answer Y.
config ARM_SCMI_TRANSPORT_SMC config ARM_SCMI_TRANSPORT_SMC
bool "SCMI transport based on SMC" bool "SCMI transport based on SMC"
depends on HAVE_ARM_SMCCC_DISCOVERY depends on HAVE_ARM_SMCCC_DISCOVERY
...@@ -66,6 +78,20 @@ config ARM_SCMI_TRANSPORT_SMC ...@@ -66,6 +78,20 @@ config ARM_SCMI_TRANSPORT_SMC
If you want the ARM SCMI PROTOCOL stack to include support for a If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on SMC, answer Y. transport based on SMC, answer Y.
config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE
bool "Enable atomic mode support for SCMI SMC transport"
depends on ARM_SCMI_TRANSPORT_SMC
help
Enable support of atomic operation for SCMI SMC based transport.
If you want the SCMI SMC based transport to operate in atomic
mode, avoiding any kind of sleeping behaviour for selected
transactions on the TX path, answer Y.
Enabling atomic mode operations allows any SCMI driver using this
transport to optionally ask for atomic SCMI transactions and operate
in atomic context too, at the price of using a number of busy-waiting
primitives all over instead. If unsure say N.
config ARM_SCMI_TRANSPORT_VIRTIO config ARM_SCMI_TRANSPORT_VIRTIO
bool "SCMI transport based on VirtIO" bool "SCMI transport based on VirtIO"
depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
...@@ -77,6 +103,21 @@ config ARM_SCMI_TRANSPORT_VIRTIO ...@@ -77,6 +103,21 @@ config ARM_SCMI_TRANSPORT_VIRTIO
If you want the ARM SCMI PROTOCOL stack to include support for a If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on VirtIO, answer Y. transport based on VirtIO, answer Y.
config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
bool "SCMI VirtIO transport Version 1 compliance"
depends on ARM_SCMI_TRANSPORT_VIRTIO
default y
help
This enforces strict compliance with VirtIO Version 1 specification.
If you want the ARM SCMI VirtIO transport layer to refuse to work
with Legacy VirtIO backends and instead support only VirtIO Version 1
devices (or above), answer Y.
If you want instead to support also old Legacy VirtIO backends (like
the ones implemented by kvmtool) and let the core Kernel VirtIO layer
take care of the needed conversions, say N.
endif #ARM_SCMI_PROTOCOL endif #ARM_SCMI_PROTOCOL
config ARM_SCMI_POWER_DOMAIN config ARM_SCMI_POWER_DOMAIN
......
...@@ -6,6 +6,7 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o ...@@ -6,6 +6,7 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o
scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
$(scmi-transport-y) $(scmi-transport-y)
......
...@@ -339,11 +339,16 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); ...@@ -339,11 +339,16 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* @dev: Reference to device in the SCMI hierarchy corresponding to this * @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel * channel
* @handle: Pointer to SCMI entity handle * @handle: Pointer to SCMI entity handle
* @no_completion_irq: Flag to indicate that this channel has no completion
* interrupt mechanism for synchronous commands.
* This can be dynamically set by transports at run-time
* inside their provided .chan_setup().
* @transport_info: Transport layer related information * @transport_info: Transport layer related information
*/ */
struct scmi_chan_info { struct scmi_chan_info {
struct device *dev; struct device *dev;
struct scmi_handle *handle; struct scmi_handle *handle;
bool no_completion_irq;
void *transport_info; void *transport_info;
}; };
...@@ -373,7 +378,8 @@ struct scmi_transport_ops { ...@@ -373,7 +378,8 @@ struct scmi_transport_ops {
unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo); unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
int (*send_message)(struct scmi_chan_info *cinfo, int (*send_message)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer); struct scmi_xfer *xfer);
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret); void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *xfer);
void (*fetch_response)(struct scmi_chan_info *cinfo, void (*fetch_response)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer); struct scmi_xfer *xfer);
void (*fetch_notification)(struct scmi_chan_info *cinfo, void (*fetch_notification)(struct scmi_chan_info *cinfo,
...@@ -402,6 +408,18 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, ...@@ -402,6 +408,18 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
* be pending simultaneously in the system. May be overridden by the * be pending simultaneously in the system. May be overridden by the
* get_max_msg op. * get_max_msg op.
* @max_msg_size: Maximum size of data per message that can be handled. * @max_msg_size: Maximum size of data per message that can be handled.
* @force_polling: Flag to force this whole transport to use SCMI core polling
* mechanism instead of completion interrupts even if available.
* @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
* synchronous-command messages are atomically
* completed on .send_message: no need to poll
* actively waiting for a response.
* Used by core internally only when polling is
* selected as a waiting for reply method: i.e.
* if a completion irq was found use that anyway.
* @atomic_enabled: Flag to indicate that this transport, which is assured not
* to sleep anywhere on the TX path, can be used in atomic mode
* when requested.
*/ */
struct scmi_desc { struct scmi_desc {
int (*transport_init)(void); int (*transport_init)(void);
...@@ -410,6 +428,9 @@ struct scmi_desc { ...@@ -410,6 +428,9 @@ struct scmi_desc {
int max_rx_timeout_ms; int max_rx_timeout_ms;
int max_msg; int max_msg;
int max_msg_size; int max_msg_size;
const bool force_polling;
const bool sync_cmds_completed_on_ret;
const bool atomic_enabled;
}; };
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
...@@ -421,6 +442,9 @@ extern const struct scmi_desc scmi_smc_desc; ...@@ -421,6 +442,9 @@ extern const struct scmi_desc scmi_smc_desc;
#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
extern const struct scmi_desc scmi_virtio_desc; extern const struct scmi_desc scmi_virtio_desc;
#endif #endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
extern const struct scmi_desc scmi_optee_desc;
#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
......
...@@ -609,6 +609,25 @@ static inline void scmi_clear_channel(struct scmi_info *info, ...@@ -609,6 +609,25 @@ static inline void scmi_clear_channel(struct scmi_info *info,
info->desc->ops->clear_channel(cinfo); info->desc->ops->clear_channel(cinfo);
} }
static inline bool is_polling_required(struct scmi_chan_info *cinfo,
struct scmi_info *info)
{
return cinfo->no_completion_irq || info->desc->force_polling;
}
static inline bool is_transport_polling_capable(struct scmi_info *info)
{
return info->desc->ops->poll_done ||
info->desc->sync_cmds_completed_on_ret;
}
static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
struct scmi_info *info)
{
return is_polling_required(cinfo, info) &&
is_transport_polling_capable(info);
}
static void scmi_handle_notification(struct scmi_chan_info *cinfo, static void scmi_handle_notification(struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv) u32 msg_hdr, void *priv)
{ {
...@@ -724,8 +743,6 @@ static void xfer_put(const struct scmi_protocol_handle *ph, ...@@ -724,8 +743,6 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
__scmi_xfer_put(&info->tx_minfo, xfer); __scmi_xfer_put(&info->tx_minfo, xfer);
} }
#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop) struct scmi_xfer *xfer, ktime_t stop)
{ {
...@@ -740,6 +757,79 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, ...@@ -740,6 +757,79 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
ktime_after(ktime_get(), stop); ktime_after(ktime_get(), stop);
} }
/**
* scmi_wait_for_message_response - An helper to group all the possible ways of
* waiting for a synchronous message response.
*
* @cinfo: SCMI channel info
* @xfer: Reference to the transfer being waited for.
*
* Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
* configuration flags like xfer->hdr.poll_completion.
*
* Return: 0 on Success, error otherwise.
*/
static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct device *dev = info->dev;
int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
timeout_ms,
xfer->hdr.poll_completion);
if (xfer->hdr.poll_completion) {
/*
* Real polling is needed only if transport has NOT declared
* itself to support synchronous commands replies.
*/
if (!info->desc->sync_cmds_completed_on_ret) {
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
*/
ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
xfer, stop));
if (ktime_after(ktime_get(), stop)) {
dev_err(dev,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
}
if (!ret) {
unsigned long flags;
/*
* Do not fetch_response if an out-of-order delayed
* response is being processed.
*/
spin_lock_irqsave(&xfer->lock, flags);
if (xfer->state == SCMI_XFER_SENT_OK) {
info->desc->ops->fetch_response(cinfo, xfer);
xfer->state = SCMI_XFER_RESP_OK;
}
spin_unlock_irqrestore(&xfer->lock, flags);
}
} else {
/* And we wait for the response. */
if (!wait_for_completion_timeout(&xfer->done,
msecs_to_jiffies(timeout_ms))) {
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
}
return ret;
}
/** /**
* do_xfer() - Do one transfer * do_xfer() - Do one transfer
* *
...@@ -754,18 +844,26 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ...@@ -754,18 +844,26 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer) struct scmi_xfer *xfer)
{ {
int ret; int ret;
int timeout;
const struct scmi_protocol_instance *pi = ph_to_pi(ph); const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle); struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct device *dev = info->dev; struct device *dev = info->dev;
struct scmi_chan_info *cinfo; struct scmi_chan_info *cinfo;
if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) { /* Check for polling request on custom command xfers at first */
if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
dev_warn_once(dev, dev_warn_once(dev,
"Polling mode is not supported by transport.\n"); "Polling mode is not supported by transport.\n");
return -EINVAL; return -EINVAL;
} }
cinfo = idr_find(&info->tx_idr, pi->proto->id);
if (unlikely(!cinfo))
return -EINVAL;
/* True ONLY if also supported by transport. */
if (is_polling_enabled(cinfo, info))
xfer->hdr.poll_completion = true;
/* /*
* Initialise protocol id now from protocol handle to avoid it being * Initialise protocol id now from protocol handle to avoid it being
* overridden by mistake (or malice) by the protocol code mangling with * overridden by mistake (or malice) by the protocol code mangling with
...@@ -774,10 +872,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ...@@ -774,10 +872,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
xfer->hdr.protocol_id = pi->proto->id; xfer->hdr.protocol_id = pi->proto->id;
reinit_completion(&xfer->done); reinit_completion(&xfer->done);
cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
if (unlikely(!cinfo))
return -EINVAL;
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion); xfer->hdr.poll_completion);
...@@ -798,41 +892,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ...@@ -798,41 +892,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret; return ret;
} }
if (xfer->hdr.poll_completion) { ret = scmi_wait_for_message_response(cinfo, xfer);
ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
if (ktime_before(ktime_get(), stop)) {
unsigned long flags;
/*
* Do not fetch_response if an out-of-order delayed
* response is being processed.
*/
spin_lock_irqsave(&xfer->lock, flags);
if (xfer->state == SCMI_XFER_SENT_OK) {
info->desc->ops->fetch_response(cinfo, xfer);
xfer->state = SCMI_XFER_RESP_OK;
}
spin_unlock_irqrestore(&xfer->lock, flags);
} else {
ret = -ETIMEDOUT;
}
} else {
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout)) {
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
}
if (!ret && xfer->hdr.status) if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status); ret = scmi_to_linux_errno(xfer->hdr.status);
if (info->desc->ops->mark_txdone) if (info->desc->ops->mark_txdone)
info->desc->ops->mark_txdone(cinfo, ret); info->desc->ops->mark_txdone(cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq, ret); xfer->hdr.protocol_id, xfer->hdr.seq, ret);
...@@ -858,6 +923,20 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph, ...@@ -858,6 +923,20 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
* @ph: Pointer to SCMI protocol handle * @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response * @xfer: Transfer to initiate and wait for response
* *
* Using asynchronous commands in atomic/polling mode should be avoided since
* it could cause long busy-waiting here, so ignore polling for the delayed
* response and WARN if it was requested for this command transaction since
* upper layers should refrain from issuing such kind of requests.
*
* The only other option would have been to refrain from using any asynchronous
* command even if made available, when an atomic transport is detected, and
* instead forcibly use the synchronous version (thing that can be easily
* attained at the protocol layer), but this would also have led to longer
* stalls of the channel for synchronous commands and possibly timeouts.
* (in other words there is usually a good reason if a platform provides an
* asynchronous version of a command and we should prefer to use it...just not
* when using atomic/polling mode)
*
* Return: -ETIMEDOUT in case of no delayed response, if transmit error, * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
* return corresponding error, else if all goes well, return 0. * return corresponding error, else if all goes well, return 0.
*/ */
...@@ -869,13 +948,25 @@ static int do_xfer_with_response(const struct scmi_protocol_handle *ph, ...@@ -869,13 +948,25 @@ static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
xfer->async_done = &async_response; xfer->async_done = &async_response;
/*
* Delayed responses should not be polled, so an async command should
* not have been used when requiring an atomic/poll context; WARN and
* perform instead a sleeping wait.
* (Note Async + IgnoreDelayedResponses are sent via do_xfer)
*/
WARN_ON_ONCE(xfer->hdr.poll_completion);
ret = do_xfer(ph, xfer); ret = do_xfer(ph, xfer);
if (!ret) { if (!ret) {
if (!wait_for_completion_timeout(xfer->async_done, timeout)) if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
dev_err(ph->dev,
"timed out in delayed resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
else if (xfer->hdr.status) } else if (xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status); ret = scmi_to_linux_errno(xfer->hdr.status);
} }
}
xfer->async_done = NULL; xfer->async_done = NULL;
return ret; return ret;
...@@ -1308,6 +1399,22 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id) ...@@ -1308,6 +1399,22 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
WARN_ON(ret); WARN_ON(ret);
} }
/**
* scmi_is_transport_atomic - Method to check if underlying transport for an
* SCMI instance is configured as atomic.
*
* @handle: A reference to the SCMI platform instance.
*
* Return: True if transport is configured as atomic
*/
static bool scmi_is_transport_atomic(const struct scmi_handle *handle)
{
struct scmi_info *info = handle_to_scmi_info(handle);
return info->desc->atomic_enabled &&
is_transport_polling_capable(info);
}
static inline static inline
struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info) struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
{ {
...@@ -1499,6 +1606,16 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev, ...@@ -1499,6 +1606,16 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
if (ret) if (ret)
return ret; return ret;
if (tx && is_polling_required(cinfo, info)) {
if (is_transport_polling_capable(info))
dev_info(dev,
"Enabled polling mode TX channel - prot_id:%d\n",
prot_id);
else
dev_warn(dev,
"Polling mode NOT supported by transport.\n");
}
idr_alloc: idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) { if (ret != prot_id) {
...@@ -1835,6 +1952,7 @@ static int scmi_probe(struct platform_device *pdev) ...@@ -1835,6 +1952,7 @@ static int scmi_probe(struct platform_device *pdev)
handle->version = &info->version; handle->version = &info->version;
handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put; handle->devm_protocol_put = scmi_devm_protocol_put;
handle->is_transport_atomic = scmi_is_transport_atomic;
if (desc->ops->link_supplier) { if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev); ret = desc->ops->link_supplier(dev);
...@@ -1853,6 +1971,10 @@ static int scmi_probe(struct platform_device *pdev) ...@@ -1853,6 +1971,10 @@ static int scmi_probe(struct platform_device *pdev)
if (scmi_notification_init(handle)) if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n"); dev_err(dev, "SCMI Notifications NOT available.\n");
if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
dev_err(dev,
"Transport is not polling capable. Atomic mode not supported.\n");
/* /*
* Trigger SCMI Base protocol initialization. * Trigger SCMI Base protocol initialization.
* It's mandatory and won't be ever released/deinit until the * It's mandatory and won't be ever released/deinit until the
...@@ -1994,6 +2116,9 @@ static const struct of_device_id scmi_of_match[] = { ...@@ -1994,6 +2116,9 @@ static const struct of_device_id scmi_of_match[] = {
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
#endif #endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
{ .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
#endif #endif
......
...@@ -140,7 +140,8 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo, ...@@ -140,7 +140,8 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
return ret; return ret;
} }
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret) static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *__unused)
{ {
struct scmi_mailbox *smbox = cinfo->transport_info; struct scmi_mailbox *smbox = cinfo->transport_info;
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uuid.h>
#include <uapi/linux/tee.h>
#include "common.h"
#define SCMI_OPTEE_MAX_MSG_SIZE 128
enum scmi_optee_pta_cmd {
/*
* PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities
*
* [out] value[0].a: Capability bit mask (enum pta_scmi_caps)
* [out] value[0].b: Extended capabilities or 0
*/
PTA_SCMI_CMD_CAPABILITIES = 0,
/*
* PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer
*
* [in] value[0].a: Channel handle
*
* Shared memory used for SCMI message/response exhange is expected
* already identified and bound to channel handle in both SCMI agent
* and SCMI server (OP-TEE) parts.
* The memory uses SMT header to carry SCMI meta-data (protocol ID and
* protocol message ID).
*/
PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1,
/*
* PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message
*
* [in] value[0].a: Channel handle
* [in/out] memref[1]: Message/response buffer (SMT and SCMI payload)
*
* Shared memory used for SCMI message/response is a SMT buffer
* referenced by param[1]. It shall be 128 bytes large to fit response
* payload whatever message playload size.
* The memory uses SMT header to carry SCMI meta-data (protocol ID and
* protocol message ID).
*/
PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2,
/*
* PTA_SCMI_CMD_GET_CHANNEL - Get channel handle
*
* SCMI shm information are 0 if agent expects to use OP-TEE regular SHM
*
* [in] value[0].a: Channel identifier
* [out] value[0].a: Returned channel handle
* [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps)
*/
PTA_SCMI_CMD_GET_CHANNEL = 3,
};
/*
* OP-TEE SCMI service capabilities bit flags (32bit)
*
* PTA_SCMI_CAPS_SMT_HEADER
* When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in
* shared memory buffers to carry SCMI protocol synchronisation information.
*/
#define PTA_SCMI_CAPS_NONE 0
#define PTA_SCMI_CAPS_SMT_HEADER BIT(0)
/**
* struct scmi_optee_channel - Description of an OP-TEE SCMI channel
*
* @channel_id: OP-TEE channel ID used for this transport
* @tee_session: TEE session identifier
* @caps: OP-TEE SCMI channel capabilities
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
* @shmem: Virtual base address of the shared memory
* @tee_shm: Reference to TEE shared memory or NULL if using static shmem
* @link: Reference in agent's channel list
*/
struct scmi_optee_channel {
u32 channel_id;
u32 tee_session;
u32 caps;
struct mutex mu;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
struct tee_shm *tee_shm;
struct list_head link;
};
/**
* struct scmi_optee_agent - OP-TEE transport private data
*
* @dev: Device used for communication with TEE
* @tee_ctx: TEE context used for communication
* @caps: Supported channel capabilities
* @mu: Mutex for protection of @channel_list
* @channel_list: List of all created channels for the agent
*/
struct scmi_optee_agent {
struct device *dev;
struct tee_context *tee_ctx;
u32 caps;
struct mutex mu;
struct list_head channel_list;
};
/* There can be only 1 SCMI service in OP-TEE we connect to */
static struct scmi_optee_agent *scmi_optee_private;
/* Forward reference to scmi_optee transport initialization */
static int scmi_optee_init(void);
/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */
static int open_session(struct scmi_optee_agent *agent, u32 *tee_session)
{
struct device *dev = agent->dev;
struct tee_client_device *scmi_pta = to_tee_client_device(dev);
struct tee_ioctl_open_session_arg arg = { };
int ret;
memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN);
arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
ret = tee_client_open_session(agent->tee_ctx, &arg, NULL);
if (ret < 0 || arg.ret) {
dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret);
return -EOPNOTSUPP;
}
*tee_session = arg.session;
return 0;
}
static void close_session(struct scmi_optee_agent *agent, u32 tee_session)
{
tee_client_close_session(agent->tee_ctx, tee_session);
}
static int get_capabilities(struct scmi_optee_agent *agent)
{
struct tee_ioctl_invoke_arg arg = { };
struct tee_param param[1] = { };
u32 caps;
u32 tee_session;
int ret;
ret = open_session(agent, &tee_session);
if (ret)
return ret;
arg.func = PTA_SCMI_CMD_CAPABILITIES;
arg.session = tee_session;
arg.num_params = 1;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
ret = tee_client_invoke_func(agent->tee_ctx, &arg, param);
close_session(agent, tee_session);
if (ret < 0 || arg.ret) {
dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret);
return -EOPNOTSUPP;
}
caps = param[0].u.value.a;
if (!(caps & PTA_SCMI_CAPS_SMT_HEADER)) {
dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT\n");
return -EOPNOTSUPP;
}
agent->caps = caps;
return 0;
}
static int get_channel(struct scmi_optee_channel *channel)
{
struct device *dev = scmi_optee_private->dev;
struct tee_ioctl_invoke_arg arg = { };
struct tee_param param[1] = { };
unsigned int caps = PTA_SCMI_CAPS_SMT_HEADER;
int ret;
arg.func = PTA_SCMI_CMD_GET_CHANNEL;
arg.session = channel->tee_session;
arg.num_params = 1;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
param[0].u.value.a = channel->channel_id;
param[0].u.value.b = caps;
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret || arg.ret) {
dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret);
return -EOPNOTSUPP;
}
/* From now on use channel identifer provided by OP-TEE SCMI service */
channel->channel_id = param[0].u.value.a;
channel->caps = caps;
return 0;
}
static int invoke_process_smt_channel(struct scmi_optee_channel *channel)
{
struct tee_ioctl_invoke_arg arg = { };
struct tee_param param[2] = { };
int ret;
arg.session = channel->tee_session;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
param[0].u.value.a = channel->channel_id;
if (channel->tee_shm) {
param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
param[1].u.memref.shm = channel->tee_shm;
param[1].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
arg.num_params = 2;
arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE;
} else {
arg.num_params = 1;
arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL;
}
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret < 0 || arg.ret) {
dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
channel->channel_id, ret, arg.ret);
return -EIO;
}
return 0;
}
static int scmi_optee_link_supplier(struct device *dev)
{
if (!scmi_optee_private) {
if (scmi_optee_init())
dev_dbg(dev, "Optee bus not yet ready\n");
/* Wait for optee bus */
return -EPROBE_DEFER;
}
if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) {
dev_err(dev, "Adding link to supplier optee device failed\n");
return -ECANCELED;
}
return 0;
}
static bool scmi_optee_chan_available(struct device *dev, int idx)
{
u32 channel_id;
return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id",
idx, &channel_id);
}
static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
shmem_clear_channel(channel->shmem);
}
static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel)
{
struct device_node *np;
resource_size_t size;
struct resource res;
int ret;
np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0);
if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
ret = -ENXIO;
goto out;
}
ret = of_address_to_resource(np, 0, &res);
if (ret) {
dev_err(dev, "Failed to get SCMI Tx shared memory\n");
goto out;
}
size = resource_size(&res);
channel->shmem = devm_ioremap(dev, res.start, size);
if (!channel->shmem) {
dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n");
ret = -EADDRNOTAVAIL;
goto out;
}
ret = 0;
out:
of_node_put(np);
return ret;
}
static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel)
{
if (of_find_property(cinfo->dev->of_node, "shmem", NULL))
return setup_static_shmem(dev, cinfo, channel);
else
return -ENOMEM;
}
static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx)
{
struct scmi_optee_channel *channel;
uint32_t channel_id;
int ret;
if (!tx)
return -ENODEV;
channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL);
if (!channel)
return -ENOMEM;
ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id",
0, &channel_id);
if (ret)
return ret;
cinfo->transport_info = channel;
channel->cinfo = cinfo;
channel->channel_id = channel_id;
mutex_init(&channel->mu);
ret = setup_shmem(dev, cinfo, channel);
if (ret)
return ret;
ret = open_session(scmi_optee_private, &channel->tee_session);
if (ret)
goto err_free_shm;
ret = get_channel(channel);
if (ret)
goto err_close_sess;
/* Enable polling */
cinfo->no_completion_irq = true;
mutex_lock(&scmi_optee_private->mu);
list_add(&channel->link, &scmi_optee_private->channel_list);
mutex_unlock(&scmi_optee_private->mu);
return 0;
err_close_sess:
close_session(scmi_optee_private, channel->tee_session);
err_free_shm:
if (channel->tee_shm)
tee_shm_free(channel->tee_shm);
return ret;
}
static int scmi_optee_chan_free(int id, void *p, void *data)
{
struct scmi_chan_info *cinfo = p;
struct scmi_optee_channel *channel = cinfo->transport_info;
mutex_lock(&scmi_optee_private->mu);
list_del(&channel->link);
mutex_unlock(&scmi_optee_private->mu);
close_session(scmi_optee_private, channel->tee_session);
if (channel->tee_shm) {
tee_shm_free(channel->tee_shm);
channel->tee_shm = NULL;
}
cinfo->transport_info = NULL;
channel->cinfo = NULL;
scmi_free_channel(cinfo, data, id);
return 0;
}
static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan,
struct scmi_xfer *xfer)
{
if (!chan)
return NULL;
return chan->shmem;
}
static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
int ret;
mutex_lock(&channel->mu);
shmem_tx_prepare(shmem, xfer);
ret = invoke_process_smt_channel(channel);
if (ret)
mutex_unlock(&channel->mu);
return ret;
}
static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
shmem_fetch_response(shmem, xfer);
}
static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *__unused)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
mutex_unlock(&channel->mu);
}
static struct scmi_transport_ops scmi_optee_ops = {
.link_supplier = scmi_optee_link_supplier,
.chan_available = scmi_optee_chan_available,
.chan_setup = scmi_optee_chan_setup,
.chan_free = scmi_optee_chan_free,
.send_message = scmi_optee_send_message,
.mark_txdone = scmi_optee_mark_txdone,
.fetch_response = scmi_optee_fetch_response,
.clear_channel = scmi_optee_clear_channel,
};
static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
{
return ver->impl_id == TEE_IMPL_ID_OPTEE;
}
static int scmi_optee_service_probe(struct device *dev)
{
struct scmi_optee_agent *agent;
struct tee_context *tee_ctx;
int ret;
/* Only one SCMI OP-TEE device allowed */
if (scmi_optee_private) {
dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n");
return -EBUSY;
}
tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL);
if (IS_ERR(tee_ctx))
return -ENODEV;
agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL);
if (!agent) {
ret = -ENOMEM;
goto err;
}
agent->dev = dev;
agent->tee_ctx = tee_ctx;
INIT_LIST_HEAD(&agent->channel_list);
mutex_init(&agent->mu);
ret = get_capabilities(agent);
if (ret)
goto err;
/* Ensure agent resources are all visible before scmi_optee_private is */
smp_mb();
scmi_optee_private = agent;
return 0;
err:
tee_client_close_context(tee_ctx);
return ret;
}
static int scmi_optee_service_remove(struct device *dev)
{
struct scmi_optee_agent *agent = scmi_optee_private;
if (!scmi_optee_private)
return -EINVAL;
if (!list_empty(&scmi_optee_private->channel_list))
return -EBUSY;
/* Ensure cleared reference is visible before resources are released */
smp_store_mb(scmi_optee_private, NULL);
tee_client_close_context(agent->tee_ctx);
return 0;
}
static const struct tee_client_device_id scmi_optee_service_id[] = {
{
UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e,
0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99)
},
{ }
};
MODULE_DEVICE_TABLE(tee, scmi_optee_service_id);
static struct tee_client_driver scmi_optee_driver = {
.id_table = scmi_optee_service_id,
.driver = {
.name = "scmi-optee",
.bus = &tee_bus_type,
.probe = scmi_optee_service_probe,
.remove = scmi_optee_service_remove,
},
};
static int scmi_optee_init(void)
{
return driver_register(&scmi_optee_driver.driver);
}
static void scmi_optee_exit(void)
{
if (scmi_optee_private)
driver_unregister(&scmi_optee_driver.driver);
}
const struct scmi_desc scmi_optee_desc = {
.transport_exit = scmi_optee_exit,
.ops = &scmi_optee_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
.max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
.sync_cmds_completed_on_ret = true,
};
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/atomic.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -14,6 +15,7 @@ ...@@ -14,6 +15,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/processor.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "common.h" #include "common.h"
...@@ -23,26 +25,29 @@ ...@@ -23,26 +25,29 @@
* *
* @cinfo: SCMI channel info * @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area * @shmem: Transmit/Receive shared memory area
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
* Used when NOT operating in atomic mode.
* @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
* Used when operating in atomic mode.
* @func_id: smc/hvc call function id * @func_id: smc/hvc call function id
* @irq: Optional; employed when platforms indicates msg completion by intr.
* @tx_complete: Optional, employed only when irq is valid.
*/ */
struct scmi_smc { struct scmi_smc {
struct scmi_chan_info *cinfo; struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem; struct scmi_shared_mem __iomem *shmem;
/* Protect access to shmem area */
struct mutex shmem_lock; struct mutex shmem_lock;
#define INFLIGHT_NONE MSG_TOKEN_MAX
atomic_t inflight;
u32 func_id; u32 func_id;
int irq;
struct completion tx_complete;
}; };
static irqreturn_t smc_msg_done_isr(int irq, void *data) static irqreturn_t smc_msg_done_isr(int irq, void *data)
{ {
struct scmi_smc *scmi_info = data; struct scmi_smc *scmi_info = data;
complete(&scmi_info->tx_complete); scmi_rx_callback(scmi_info->cinfo,
shmem_read_header(scmi_info->shmem), NULL);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -57,6 +62,41 @@ static bool smc_chan_available(struct device *dev, int idx) ...@@ -57,6 +62,41 @@ static bool smc_chan_available(struct device *dev, int idx)
return true; return true;
} }
static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
else
mutex_init(&scmi_info->shmem_lock);
}
static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
{
int ret;
ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
return ret == INFLIGHT_NONE;
}
static inline void
smc_channel_lock_acquire(struct scmi_smc *scmi_info,
struct scmi_xfer *xfer __maybe_unused)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
else
mutex_lock(&scmi_info->shmem_lock);
}
static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
else
mutex_unlock(&scmi_info->shmem_lock);
}
static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx) bool tx)
{ {
...@@ -111,13 +151,13 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, ...@@ -111,13 +151,13 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
dev_err(dev, "failed to setup SCMI smc irq\n"); dev_err(dev, "failed to setup SCMI smc irq\n");
return ret; return ret;
} }
init_completion(&scmi_info->tx_complete); } else {
scmi_info->irq = irq; cinfo->no_completion_irq = true;
} }
scmi_info->func_id = func_id; scmi_info->func_id = func_id;
scmi_info->cinfo = cinfo; scmi_info->cinfo = cinfo;
mutex_init(&scmi_info->shmem_lock); smc_channel_lock_init(scmi_info);
cinfo->transport_info = scmi_info; cinfo->transport_info = scmi_info;
return 0; return 0;
...@@ -142,26 +182,22 @@ static int smc_send_message(struct scmi_chan_info *cinfo, ...@@ -142,26 +182,22 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
struct scmi_smc *scmi_info = cinfo->transport_info; struct scmi_smc *scmi_info = cinfo->transport_info;
struct arm_smccc_res res; struct arm_smccc_res res;
mutex_lock(&scmi_info->shmem_lock); /*
* Channel will be released only once response has been
* surely fully retrieved, so after .mark_txdone()
*/
smc_channel_lock_acquire(scmi_info, xfer);
shmem_tx_prepare(scmi_info->shmem, xfer); shmem_tx_prepare(scmi_info->shmem, xfer);
if (scmi_info->irq)
reinit_completion(&scmi_info->tx_complete);
arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
if (scmi_info->irq)
wait_for_completion(&scmi_info->tx_complete);
scmi_rx_callback(scmi_info->cinfo,
shmem_read_header(scmi_info->shmem), NULL);
mutex_unlock(&scmi_info->shmem_lock);
/* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
if (res.a0) if (res.a0) {
smc_channel_lock_release(scmi_info);
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
return 0; return 0;
} }
...@@ -173,12 +209,12 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo, ...@@ -173,12 +209,12 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo,
shmem_fetch_response(scmi_info->shmem, xfer); shmem_fetch_response(scmi_info->shmem, xfer);
} }
static bool static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) struct scmi_xfer *__unused)
{ {
struct scmi_smc *scmi_info = cinfo->transport_info; struct scmi_smc *scmi_info = cinfo->transport_info;
return shmem_poll_done(scmi_info->shmem, xfer); smc_channel_lock_release(scmi_info);
} }
static const struct scmi_transport_ops scmi_smc_ops = { static const struct scmi_transport_ops scmi_smc_ops = {
...@@ -186,8 +222,8 @@ static const struct scmi_transport_ops scmi_smc_ops = { ...@@ -186,8 +222,8 @@ static const struct scmi_transport_ops scmi_smc_ops = {
.chan_setup = smc_chan_setup, .chan_setup = smc_chan_setup,
.chan_free = smc_chan_free, .chan_free = smc_chan_free,
.send_message = smc_send_message, .send_message = smc_send_message,
.mark_txdone = smc_mark_txdone,
.fetch_response = smc_fetch_response, .fetch_response = smc_fetch_response,
.poll_done = smc_poll_done,
}; };
const struct scmi_desc scmi_smc_desc = { const struct scmi_desc scmi_smc_desc = {
...@@ -195,4 +231,14 @@ const struct scmi_desc scmi_smc_desc = { ...@@ -195,4 +231,14 @@ const struct scmi_desc scmi_smc_desc = {
.max_rx_timeout_ms = 30, .max_rx_timeout_ms = 30,
.max_msg = 20, .max_msg = 20,
.max_msg_size = 128, .max_msg_size = 128,
/*
* Setting .sync_cmds_atomic_replies to true for SMC assumes that,
* once the SMC instruction has completed successfully, the issued
* SCMI command would have been already fully processed by the SCMI
* platform firmware and so any possible response value expected
* for the issued command will be immmediately ready to be fetched
* from the shared memory area.
*/
.sync_cmds_completed_on_ret = true,
.atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
}; };
...@@ -95,7 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, ...@@ -95,7 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
if (rc) if (rc)
dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc); dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc);
else else
virtqueue_kick(vioch->vqueue); virtqueue_kick(vioch->vqueue);
...@@ -193,7 +193,7 @@ static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) ...@@ -193,7 +193,7 @@ static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
static int virtio_link_supplier(struct device *dev) static int virtio_link_supplier(struct device *dev)
{ {
if (!scmi_vdev) { if (!scmi_vdev) {
dev_notice_once(dev, dev_notice(dev,
"Deferring probe after not finding a bound scmi-virtio device\n"); "Deferring probe after not finding a bound scmi-virtio device\n");
return -EPROBE_DEFER; return -EPROBE_DEFER;
} }
...@@ -334,9 +334,8 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, ...@@ -334,9 +334,8 @@ static int virtio_send_message(struct scmi_chan_info *cinfo,
rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
if (rc) { if (rc) {
list_add(&msg->list, &vioch->free_list); list_add(&msg->list, &vioch->free_list);
dev_err_once(vioch->cinfo->dev, dev_err(vioch->cinfo->dev,
"%s() failed to add to virtqueue (%d)\n", __func__, "failed to add to TX virtqueue (%d)\n", rc);
rc);
} else { } else {
virtqueue_kick(vioch->vqueue); virtqueue_kick(vioch->vqueue);
} }
...@@ -427,7 +426,7 @@ static int scmi_vio_probe(struct virtio_device *vdev) ...@@ -427,7 +426,7 @@ static int scmi_vio_probe(struct virtio_device *vdev)
sz /= DESCRIPTORS_PER_TX_MSG; sz /= DESCRIPTORS_PER_TX_MSG;
if (sz > MSG_TOKEN_MAX) { if (sz > MSG_TOKEN_MAX) {
dev_info_once(dev, dev_info(dev,
"%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
channels[i].is_rx ? "rx" : "tx", channels[i].is_rx ? "rx" : "tx",
sz, MSG_TOKEN_MAX); sz, MSG_TOKEN_MAX);
...@@ -460,12 +459,13 @@ static void scmi_vio_remove(struct virtio_device *vdev) ...@@ -460,12 +459,13 @@ static void scmi_vio_remove(struct virtio_device *vdev)
static int scmi_vio_validate(struct virtio_device *vdev) static int scmi_vio_validate(struct virtio_device *vdev)
{ {
#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
dev_err(&vdev->dev, dev_err(&vdev->dev,
"device does not comply with spec version 1.x\n"); "device does not comply with spec version 1.x\n");
return -EINVAL; return -EINVAL;
} }
#endif
return 0; return 0;
} }
......
...@@ -612,6 +612,13 @@ struct scmi_notify_ops { ...@@ -612,6 +612,13 @@ struct scmi_notify_ops {
* @devm_protocol_get: devres managed method to acquire a protocol and get specific * @devm_protocol_get: devres managed method to acquire a protocol and get specific
* operations and a dedicated protocol handler * operations and a dedicated protocol handler
* @devm_protocol_put: devres managed method to release a protocol * @devm_protocol_put: devres managed method to release a protocol
* @is_transport_atomic: method to check if the underlying transport for this
* instance handle is configured to support atomic
* transactions for commands.
* Some users of the SCMI stack in the upper layers could
* be interested to know if they can assume SCMI
* command transactions associated to this handle will
* never sleep and act accordingly.
* @notify_ops: pointer to set of notifications related operations * @notify_ops: pointer to set of notifications related operations
*/ */
struct scmi_handle { struct scmi_handle {
...@@ -622,6 +629,7 @@ struct scmi_handle { ...@@ -622,6 +629,7 @@ struct scmi_handle {
(*devm_protocol_get)(struct scmi_device *sdev, u8 proto, (*devm_protocol_get)(struct scmi_device *sdev, u8 proto,
struct scmi_protocol_handle **ph); struct scmi_protocol_handle **ph);
void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto); void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto);
bool (*is_transport_atomic)(const struct scmi_handle *handle);
const struct scmi_notify_ops *notify_ops; const struct scmi_notify_ops *notify_ops;
}; };
......
...@@ -33,6 +33,34 @@ TRACE_EVENT(scmi_xfer_begin, ...@@ -33,6 +33,34 @@ TRACE_EVENT(scmi_xfer_begin,
__entry->seq, __entry->poll) __entry->seq, __entry->poll)
); );
TRACE_EVENT(scmi_xfer_response_wait,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
u32 timeout, bool poll),
TP_ARGS(transfer_id, msg_id, protocol_id, seq, timeout, poll),
TP_STRUCT__entry(
__field(int, transfer_id)
__field(u8, msg_id)
__field(u8, protocol_id)
__field(u16, seq)
__field(u32, timeout)
__field(bool, poll)
),
TP_fast_assign(
__entry->transfer_id = transfer_id;
__entry->msg_id = msg_id;
__entry->protocol_id = protocol_id;
__entry->seq = seq;
__entry->timeout = timeout;
__entry->poll = poll;
),
TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u tmo_ms=%u poll=%u",
__entry->transfer_id, __entry->msg_id, __entry->protocol_id,
__entry->seq, __entry->timeout, __entry->poll)
);
TRACE_EVENT(scmi_xfer_end, TRACE_EVENT(scmi_xfer_end,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq, TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
int status), int status),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment