Commit 55742d2a authored by Johan Hovold's avatar Johan Hovold Committed by Greg Kroah-Hartman

greybus: interface: implement generic mode-switch functionality

Add a generic interface for bundle drivers to use to request that a
mode switch is carried out on its behalf.

Mode switching involves tearing down all connections to an interface,
sending a unidirectional mode-switch request, and waiting for a mailbox
event that triggers deferred control connection reset and re-enumeration
of the interface. In case of a timeout waiting for the interface mailbox
event, or on other errors, the interface is powered off.

All of this needs to be done by core from work-queue context in order
not to block incoming SVC requests and bundle-device tear down. Care
must also be taken to serialise against concurrent module removal events
and eject requests.

Special handling of legacy mode-switching is also added in order to
continue to support the ES3 bootrom.
Signed-off-by: default avatarJohan Hovold <johan@hovoldconsulting.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parent 08f94352
...@@ -487,10 +487,23 @@ gb_connection_control_disconnected(struct gb_connection *connection) ...@@ -487,10 +487,23 @@ gb_connection_control_disconnected(struct gb_connection *connection)
if (gb_connection_is_static(connection)) if (gb_connection_is_static(connection))
return; return;
if (gb_connection_is_control(connection)) control = connection->intf->control;
if (gb_connection_is_control(connection)) {
if (connection->mode_switch) {
ret = gb_control_mode_switch_operation(control);
if (ret) {
/*
* Allow mode switch to time out waiting for
* mailbox event.
*/
return;
}
}
return; return;
}
control = connection->intf->control;
ret = gb_control_disconnected_operation(control, cport_id); ret = gb_control_disconnected_operation(control, cport_id);
if (ret) { if (ret) {
...@@ -743,6 +756,18 @@ void gb_connection_disable_rx(struct gb_connection *connection) ...@@ -743,6 +756,18 @@ void gb_connection_disable_rx(struct gb_connection *connection)
} }
EXPORT_SYMBOL_GPL(gb_connection_disable_rx); EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
void gb_connection_mode_switch_prepare(struct gb_connection *connection)
{
connection->mode_switch = true;
}
void gb_connection_mode_switch_complete(struct gb_connection *connection)
{
gb_connection_svc_connection_destroy(connection);
gb_connection_hd_cport_disable(connection);
connection->mode_switch = false;
}
void gb_connection_disable(struct gb_connection *connection) void gb_connection_disable(struct gb_connection *connection)
{ {
mutex_lock(&connection->mutex); mutex_lock(&connection->mutex);
...@@ -768,8 +793,11 @@ void gb_connection_disable(struct gb_connection *connection) ...@@ -768,8 +793,11 @@ void gb_connection_disable(struct gb_connection *connection)
connection->state = GB_CONNECTION_STATE_DISABLED; connection->state = GB_CONNECTION_STATE_DISABLED;
gb_connection_svc_connection_destroy(connection); /* control-connection tear down is deferred when mode switching */
gb_connection_hd_cport_disable(connection); if (!connection->mode_switch) {
gb_connection_svc_connection_destroy(connection);
gb_connection_hd_cport_disable(connection);
}
out_unlock: out_unlock:
mutex_unlock(&connection->mutex); mutex_unlock(&connection->mutex);
......
...@@ -58,6 +58,8 @@ struct gb_connection { ...@@ -58,6 +58,8 @@ struct gb_connection {
atomic_t op_cycle; atomic_t op_cycle;
void *private; void *private;
bool mode_switch;
}; };
struct gb_connection *gb_connection_create_static(struct gb_host_device *hd, struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
...@@ -83,6 +85,9 @@ void gb_connection_disable_rx(struct gb_connection *connection); ...@@ -83,6 +85,9 @@ void gb_connection_disable_rx(struct gb_connection *connection);
void gb_connection_disable(struct gb_connection *connection); void gb_connection_disable(struct gb_connection *connection);
void gb_connection_disable_forced(struct gb_connection *connection); void gb_connection_disable_forced(struct gb_connection *connection);
void gb_connection_mode_switch_prepare(struct gb_connection *connection);
void gb_connection_mode_switch_complete(struct gb_connection *connection);
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
u8 *data, size_t length); u8 *data, size_t length);
......
...@@ -364,7 +364,24 @@ void gb_control_del(struct gb_control *control) ...@@ -364,7 +364,24 @@ void gb_control_del(struct gb_control *control)
device_del(&control->dev); device_del(&control->dev);
} }
struct gb_control *gb_control_get(struct gb_control *control)
{
get_device(&control->dev);
return control;
}
void gb_control_put(struct gb_control *control) void gb_control_put(struct gb_control *control)
{ {
put_device(&control->dev); put_device(&control->dev);
} }
void gb_control_mode_switch_prepare(struct gb_control *control)
{
gb_connection_mode_switch_prepare(control->connection);
}
void gb_control_mode_switch_complete(struct gb_control *control)
{
gb_connection_mode_switch_complete(control->connection);
}
...@@ -31,6 +31,7 @@ int gb_control_enable(struct gb_control *control); ...@@ -31,6 +31,7 @@ int gb_control_enable(struct gb_control *control);
void gb_control_disable(struct gb_control *control); void gb_control_disable(struct gb_control *control);
int gb_control_add(struct gb_control *control); int gb_control_add(struct gb_control *control);
void gb_control_del(struct gb_control *control); void gb_control_del(struct gb_control *control);
struct gb_control *gb_control_get(struct gb_control *control);
void gb_control_put(struct gb_control *control); void gb_control_put(struct gb_control *control);
int gb_control_get_bundle_versions(struct gb_control *control); int gb_control_get_bundle_versions(struct gb_control *control);
...@@ -39,6 +40,8 @@ int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id); ...@@ -39,6 +40,8 @@ int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
int gb_control_disconnecting_operation(struct gb_control *control, int gb_control_disconnecting_operation(struct gb_control *control,
u16 cport_id); u16 cport_id);
int gb_control_mode_switch_operation(struct gb_control *control); int gb_control_mode_switch_operation(struct gb_control *control);
void gb_control_mode_switch_prepare(struct gb_control *control);
void gb_control_mode_switch_complete(struct gb_control *control);
int gb_control_get_manifest_size_operation(struct gb_interface *intf); int gb_control_get_manifest_size_operation(struct gb_interface *intf);
int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
size_t size); size_t size);
......
...@@ -8,9 +8,10 @@ ...@@ -8,9 +8,10 @@
*/ */
#include "greybus.h" #include "greybus.h"
#include "greybus_trace.h" #include "greybus_trace.h"
#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 1000
#define GB_INTERFACE_DEVICE_ID_BAD 0xff #define GB_INTERFACE_DEVICE_ID_BAD 0xff
/* Don't-care selector index */ /* Don't-care selector index */
...@@ -163,6 +164,174 @@ static void gb_interface_route_destroy(struct gb_interface *intf) ...@@ -163,6 +164,174 @@ static void gb_interface_route_destroy(struct gb_interface *intf)
intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
} }
/* Locking: Caller holds the interface mutex. */
static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
{
int ret;
dev_info(&intf->dev, "legacy mode switch detected\n");
/* Mark as disconnected to prevent I/O during disable. */
intf->disconnected = true;
gb_interface_disable(intf);
intf->disconnected = false;
ret = gb_interface_enable(intf);
if (ret) {
dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
gb_interface_deactivate(intf);
}
return ret;
}
void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
u32 mailbox)
{
mutex_lock(&intf->mutex);
if (result) {
dev_warn(&intf->dev,
"mailbox event with UniPro error: 0x%04x\n",
result);
goto err_disable;
}
if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
dev_warn(&intf->dev,
"mailbox event with unexpected value: 0x%08x\n",
mailbox);
goto err_disable;
}
if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
gb_interface_legacy_mode_switch(intf);
goto out_unlock;
}
if (!intf->mode_switch) {
dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
mailbox);
goto err_disable;
}
dev_info(&intf->dev, "mode switch detected\n");
complete(&intf->mode_switch_completion);
out_unlock:
mutex_unlock(&intf->mutex);
return;
err_disable:
gb_interface_disable(intf);
gb_interface_deactivate(intf);
mutex_unlock(&intf->mutex);
}
static void gb_interface_mode_switch_work(struct work_struct *work)
{
struct gb_interface *intf;
struct gb_control *control;
unsigned long timeout;
int ret;
intf = container_of(work, struct gb_interface, mode_switch_work);
mutex_lock(&intf->mutex);
/* Make sure interface is still enabled. */
if (!intf->enabled) {
dev_dbg(&intf->dev, "mode switch aborted\n");
intf->mode_switch = false;
mutex_unlock(&intf->mutex);
goto out_interface_put;
}
/*
* Prepare the control device for mode switch and make sure to get an
* extra reference before it goes away during interface disable.
*/
control = gb_control_get(intf->control);
gb_control_mode_switch_prepare(control);
gb_interface_disable(intf);
mutex_unlock(&intf->mutex);
timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
ret = wait_for_completion_interruptible_timeout(
&intf->mode_switch_completion, timeout);
/* Finalise control-connection mode switch. */
gb_control_mode_switch_complete(control);
gb_control_put(control);
if (ret < 0) {
dev_err(&intf->dev, "mode switch interrupted\n");
goto err_deactivate;
} else if (ret == 0) {
dev_err(&intf->dev, "mode switch timed out\n");
goto err_deactivate;
}
/* Re-enable (re-enumerate) interface if still active. */
mutex_lock(&intf->mutex);
intf->mode_switch = false;
if (intf->active) {
ret = gb_interface_enable(intf);
if (ret) {
dev_err(&intf->dev, "failed to re-enable interface: %d\n",
ret);
gb_interface_deactivate(intf);
}
}
mutex_unlock(&intf->mutex);
out_interface_put:
gb_interface_put(intf);
return;
err_deactivate:
mutex_lock(&intf->mutex);
intf->mode_switch = false;
gb_interface_deactivate(intf);
mutex_unlock(&intf->mutex);
gb_interface_put(intf);
}
int gb_interface_request_mode_switch(struct gb_interface *intf)
{
int ret = 0;
mutex_lock(&intf->mutex);
if (intf->mode_switch) {
ret = -EBUSY;
goto out_unlock;
}
intf->mode_switch = true;
reinit_completion(&intf->mode_switch_completion);
/*
* Get a reference to the interface device, which will be put once the
* mode switch is complete.
*/
get_device(&intf->dev);
if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
put_device(&intf->dev);
ret = -EBUSY;
goto out_unlock;
}
out_unlock:
mutex_unlock(&intf->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
/* /*
* T_TstSrcIncrement is written by the module on ES2 as a stand-in for the * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
* init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
...@@ -222,7 +391,8 @@ static int gb_interface_read_and_clear_init_status(struct gb_interface *intf) ...@@ -222,7 +391,8 @@ static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
* for example, requires E2EFC, CSD and CSV to be disabled. * for example, requires E2EFC, CSD and CSV to be disabled.
*/ */
bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES | bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
GB_INTERFACE_QUIRK_FORCED_DISABLE; GB_INTERFACE_QUIRK_FORCED_DISABLE |
GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH;
switch (init_status) { switch (init_status) {
case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED: case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED: case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
...@@ -368,6 +538,8 @@ struct gb_interface *gb_interface_create(struct gb_module *module, ...@@ -368,6 +538,8 @@ struct gb_interface *gb_interface_create(struct gb_module *module,
INIT_LIST_HEAD(&intf->bundles); INIT_LIST_HEAD(&intf->bundles);
INIT_LIST_HEAD(&intf->manifest_descs); INIT_LIST_HEAD(&intf->manifest_descs);
mutex_init(&intf->mutex); mutex_init(&intf->mutex);
INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
init_completion(&intf->mode_switch_completion);
/* Invalid device id to start with */ /* Invalid device id to start with */
intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
...@@ -542,6 +714,10 @@ void gb_interface_deactivate(struct gb_interface *intf) ...@@ -542,6 +714,10 @@ void gb_interface_deactivate(struct gb_interface *intf)
trace_gb_interface_deactivate(intf); trace_gb_interface_deactivate(intf);
/* Abort any ongoing mode switch. */
if (intf->mode_switch)
complete(&intf->mode_switch_completion);
gb_interface_route_destroy(intf); gb_interface_route_destroy(intf);
gb_interface_hibernate_link(intf); gb_interface_hibernate_link(intf);
gb_interface_unipro_set(intf, false); gb_interface_unipro_set(intf, false);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1) #define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1)
#define GB_INTERFACE_QUIRK_NO_ARA_IDS BIT(2) #define GB_INTERFACE_QUIRK_NO_ARA_IDS BIT(2)
#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3) #define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3)
#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4)
struct gb_interface { struct gb_interface {
struct device dev; struct device dev;
...@@ -40,9 +41,14 @@ struct gb_interface { ...@@ -40,9 +41,14 @@ struct gb_interface {
struct mutex mutex; struct mutex mutex;
bool disconnected; bool disconnected;
bool ejected; bool ejected;
bool active; bool active;
bool enabled; bool enabled;
bool mode_switch;
struct work_struct mode_switch_work;
struct completion mode_switch_completion;
}; };
#define to_gb_interface(d) container_of(d, struct gb_interface, dev) #define to_gb_interface(d) container_of(d, struct gb_interface, dev)
...@@ -55,5 +61,9 @@ void gb_interface_disable(struct gb_interface *intf); ...@@ -55,5 +61,9 @@ void gb_interface_disable(struct gb_interface *intf);
int gb_interface_add(struct gb_interface *intf); int gb_interface_add(struct gb_interface *intf);
void gb_interface_del(struct gb_interface *intf); void gb_interface_del(struct gb_interface *intf);
void gb_interface_put(struct gb_interface *intf); void gb_interface_put(struct gb_interface *intf);
void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
u32 mailbox);
int gb_interface_request_mode_switch(struct gb_interface *intf);
#endif /* __INTERFACE_H */ #endif /* __INTERFACE_H */
...@@ -895,28 +895,6 @@ static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id) ...@@ -895,28 +895,6 @@ static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
return NULL; return NULL;
} }
static void gb_svc_intf_reenable(struct gb_svc *svc, struct gb_interface *intf)
{
int ret;
mutex_lock(&intf->mutex);
/* Mark as disconnected to prevent I/O during disable. */
intf->disconnected = true;
gb_interface_disable(intf);
intf->disconnected = false;
ret = gb_interface_enable(intf);
if (ret) {
dev_err(&svc->dev, "failed to enable interface %u: %d\n",
intf->interface_id, ret);
gb_interface_deactivate(intf);
}
mutex_unlock(&intf->mutex);
}
static void gb_svc_process_hello_deferred(struct gb_operation *operation) static void gb_svc_process_hello_deferred(struct gb_operation *operation)
{ {
struct gb_connection *connection = operation->connection; struct gb_connection *connection = operation->connection;
...@@ -965,10 +943,9 @@ static void gb_svc_process_intf_hotplug(struct gb_operation *operation) ...@@ -965,10 +943,9 @@ static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
/* All modules are considered 1x2 for now */ /* All modules are considered 1x2 for now */
module = gb_svc_module_lookup(svc, intf_id); module = gb_svc_module_lookup(svc, intf_id);
if (module) { if (module) {
dev_info(&svc->dev, "mode switch detected on interface %u\n", /* legacy mode switch */
intf_id); return gb_interface_mailbox_event(module->interfaces[0], 0,
GB_SVC_INTF_MAILBOX_GREYBUS);
return gb_svc_intf_reenable(svc, module->interfaces[0]);
} }
module = gb_module_create(hd, intf_id, 1); module = gb_module_create(hd, intf_id, 1);
...@@ -1115,31 +1092,7 @@ static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation) ...@@ -1115,31 +1092,7 @@ static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
return; return;
} }
if (result_code) { gb_interface_mailbox_event(intf, result_code, mailbox);
dev_warn(&svc->dev,
"mailbox event %u with UniPro error: 0x%04x\n",
intf_id, result_code);
goto err_disable_interface;
}
if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
dev_warn(&svc->dev,
"mailbox event %u with unexected value: 0x%08x\n",
intf_id, mailbox);
goto err_disable_interface;
}
dev_info(&svc->dev, "mode switch detected on interface %u\n", intf_id);
gb_svc_intf_reenable(svc, intf);
return;
err_disable_interface:
mutex_lock(&intf->mutex);
gb_interface_disable(intf);
gb_interface_deactivate(intf);
mutex_unlock(&intf->mutex);
} }
static void gb_svc_process_deferred_request(struct work_struct *work) static void gb_svc_process_deferred_request(struct work_struct *work)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment