Commit 21ddff5c authored by David S. Miller's avatar David S. Miller

Merge branch 'net-ipa-small-improvements'

Alex Elder says:

====================
net: ipa: small improvements

This series contains two patches that improve the error output
that's reported when an error occurs while changing the state of a
GSI channel or event ring.  The first ensures all such error
conditions report an error, and the second simplifies the messages a
little and ensures they are all consistent.

A third (independent) patch gets rid of an unused symbol in the
microcontroller code.

Version 2 fixes two alignment problems pointed out by checkpatch.pl,
as requested by Jakub Kicinski.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6f6746d7 722208ea
......@@ -336,6 +336,7 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
u32 val;
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
......@@ -344,8 +345,8 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
return 0; /* Success! */
dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
"(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
return -ETIMEDOUT;
}
......@@ -358,13 +359,15 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
dev_err(gsi->dev, "bad event ring state %u before alloc\n",
evt_ring->state);
return -EINVAL;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
dev_err(gsi->dev, "bad event ring state %u after alloc\n",
evt_ring->state);
ret = -EIO;
}
......@@ -381,14 +384,14 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
dev_err(gsi->dev, "bad event ring state %u before reset\n",
evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
dev_err(gsi->dev, "bad event ring state %u after reset\n",
evt_ring->state);
}
......@@ -399,14 +402,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
dev_err(gsi->dev, "bad event ring state %u before dealloc\n",
evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
dev_err(gsi->dev, "bad event ring state %u after dealloc\n",
evt_ring->state);
}
......@@ -429,6 +432,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
u32 val;
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
......@@ -437,8 +441,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
return 0; /* Success! */
dev_err(gsi->dev,
"GSI command %u to channel %u timed out (state is %u)\n",
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
return -ETIMEDOUT;
......@@ -448,21 +451,23 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
int ret;
/* Get initial channel state */
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
dev_err(dev, "bad channel state %u before alloc\n", state);
return -EINVAL;
}
ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
state);
dev_err(dev, "bad channel state %u after alloc\n", state);
ret = -EIO;
}
......@@ -472,21 +477,23 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
/* Start an ALLOCATED channel */
static int gsi_channel_start_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
state != GSI_CHANNEL_STATE_STOPPED)
state != GSI_CHANNEL_STATE_STOPPED) {
dev_err(dev, "bad channel state %u before start\n", state);
return -EINVAL;
}
ret = gsi_channel_command(channel, GSI_CH_START);
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
dev_err(channel->gsi->dev,
"bad channel state (%u) after start\n", state);
dev_err(dev, "bad channel state %u after start\n", state);
ret = -EIO;
}
......@@ -496,13 +503,16 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
/* Stop a GSI channel in STARTED state */
static int gsi_channel_stop_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STARTED &&
state != GSI_CHANNEL_STATE_STOP_IN_PROC)
state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
dev_err(dev, "bad channel state %u before stop\n", state);
return -EINVAL;
}
ret = gsi_channel_command(channel, GSI_CH_STOP);
......@@ -515,8 +525,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EAGAIN;
dev_err(channel->gsi->dev,
"bad channel state (%u) after stop\n", state);
dev_err(dev, "bad channel state %u after stop\n", state);
return -EIO;
}
......@@ -524,6 +533,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
/* Reset a GSI channel in ALLOCATED or ERROR state. */
static void gsi_channel_reset_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
int ret;
......@@ -532,8 +542,7 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
state != GSI_CHANNEL_STATE_ERROR) {
dev_err(channel->gsi->dev,
"bad channel state (%u) before reset\n", state);
dev_err(dev, "bad channel state %u before reset\n", state);
return;
}
......@@ -542,21 +551,20 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(channel->gsi->dev,
"bad channel state (%u) after reset\n", state);
dev_err(dev, "bad channel state %u after reset\n", state);
}
/* Deallocate an ALLOCATED GSI channel */
static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
dev_err(gsi->dev,
"bad channel state (%u) before dealloc\n", state);
dev_err(dev, "bad channel state %u before dealloc\n", state);
return;
}
......@@ -565,8 +573,7 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
dev_err(gsi->dev,
"bad channel state (%u) after dealloc\n", state);
dev_err(dev, "bad channel state %u after dealloc\n", state);
}
/* Ring an event ring doorbell, reporting the last entry processed by the AP.
......@@ -1148,8 +1155,8 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
break;
default:
dev_err(gsi->dev,
"%s: unrecognized type 0x%08x\n",
__func__, gsi_intr);
"unrecognized interrupt type 0x%08x\n",
gsi_intr);
break;
}
} while (intr_mask);
......@@ -1253,7 +1260,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
if (ring->virt && addr % size) {
dma_free_coherent(dev, size, ring->virt, ring->addr);
dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
size);
size);
return -EINVAL; /* Not a good error value, but distinct */
} else if (!ring->virt) {
return -ENOMEM;
......@@ -1644,12 +1651,13 @@ static void gsi_channel_teardown(struct gsi *gsi)
/* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi, bool legacy)
{
struct device *dev = gsi->dev;
u32 val;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
if (!(val & ENABLED_FMASK)) {
dev_err(gsi->dev, "GSI has not been enabled\n");
dev_err(dev, "GSI has not been enabled\n");
return -EIO;
}
......@@ -1657,24 +1665,24 @@ int gsi_setup(struct gsi *gsi, bool legacy)
gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
if (!gsi->channel_count) {
dev_err(gsi->dev, "GSI reports zero channels supported\n");
dev_err(dev, "GSI reports zero channels supported\n");
return -EINVAL;
}
if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
dev_warn(gsi->dev,
"limiting to %u channels (hardware supports %u)\n",
dev_warn(dev,
"limiting to %u channels; hardware supports %u\n",
GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
}
gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
if (!gsi->evt_ring_count) {
dev_err(gsi->dev, "GSI reports zero event rings supported\n");
dev_err(dev, "GSI reports zero event rings supported\n");
return -EINVAL;
}
if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
dev_warn(gsi->dev,
"limiting to %u event rings (hardware supports %u)\n",
dev_warn(dev,
"limiting to %u event rings; hardware supports %u\n",
GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
}
......@@ -1760,19 +1768,19 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
/* Make sure channel ids are in the range driver supports */
if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
dev_err(dev, "bad channel id %u (must be less than %u)\n",
dev_err(dev, "bad channel id %u; must be less than %u\n",
channel_id, GSI_CHANNEL_COUNT_MAX);
return false;
}
if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
return false;
}
if (!data->channel.tlv_count ||
data->channel.tlv_count > GSI_TLV_MAX) {
dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
channel_id, data->channel.tlv_count, GSI_TLV_MAX);
return false;
}
......@@ -1790,13 +1798,13 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
}
if (!is_power_of_2(data->channel.tre_count)) {
dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
channel_id, data->channel.tre_count);
return false;
}
if (!is_power_of_2(data->channel.event_count)) {
dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
channel_id, data->channel.event_count);
return false;
}
......@@ -1950,6 +1958,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
u32 count, const struct ipa_gsi_endpoint_data *data,
bool modem_alloc)
{
struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
unsigned int irq;
......@@ -1957,7 +1966,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
gsi_validate_build();
gsi->dev = &pdev->dev;
gsi->dev = dev;
/* The GSI layer performs NAPI on all endpoints. NAPI requires a
* network device structure, but the GSI layer does not have one,
......@@ -1968,43 +1977,41 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
/* Get the GSI IRQ and request for it to wake the system */
ret = platform_get_irq_byname(pdev, "gsi");
if (ret <= 0) {
dev_err(gsi->dev,
"DT error %d getting \"gsi\" IRQ property\n", ret);
dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
return ret ? : -EINVAL;
}
irq = ret;
ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
if (ret) {
dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
return ret;
}
gsi->irq = irq;
ret = enable_irq_wake(gsi->irq);
if (ret)
dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
gsi->irq_wake_enabled = !ret;
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(gsi->dev,
"DT error getting \"gsi\" memory property\n");
dev_err(dev, "DT error getting \"gsi\" memory property\n");
ret = -ENODEV;
goto err_disable_irq_wake;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
ret = -EINVAL;
goto err_disable_irq_wake;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
dev_err(dev, "unable to remap \"gsi\" memory\n");
ret = -ENOMEM;
goto err_disable_irq_wake;
}
......
......@@ -35,12 +35,6 @@
*/
/* Supports hardware interface version 0x2000 */
/* Offset relative to the base of the IPA shared address space of the
* shared region used for communication with the microcontroller. The
* region is 128 bytes in size, but only the first 40 bytes are used.
*/
#define IPA_MEM_UC_OFFSET 0x0000
/* Delay to allow a the microcontroller to save state when crashing */
#define IPA_SEND_DELAY 100 /* microseconds */
......@@ -60,6 +54,10 @@
* @hw_state: state of hardware (including error type information)
* @warning_counter: counter of non-fatal hardware errors
* @interface_version: hardware-reported interface version
*
* A shared memory area at the base of IPA resident memory is used for
* communication with the microcontroller. The region is 128 bytes in
* size, but only the first 40 bytes (structured this way) are used.
*/
struct ipa_uc_mem_area {
u8 command; /* enum ipa_uc_command */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment