Commit 95ec6bce authored by David S. Miller's avatar David S. Miller

Merge branch 'net-ipa-more-endpoints'

Alex Elder says:

====================
net: ipa: support more endpoints

This series adds support for more than 32 IPA endpoints.  To do
this, five registers whose bits represent endpoint state are
replicated as needed to represent endpoints beyond 32.  For existing
platforms, the number of endpoints is never greater than 32, so
there is just one of each register.  IPA v5.0+ supports more than
that though; these changes prepare the code for that.

Beyond that, the IPA fields that represent endpoints in a 32-bit
bitmask are updated to support an arbitrary number of these endpoint
registers.  (There is one exception, explained in patch 7.)

The first two patches are some sort of unrelated cleanups, making
use of a helper function introduced recently.

The third and fourth use parameterized functions to determine the
register offset for registers that represent endpoints.

The last five convert fields representing endpoints to allow more
than 32 endpoints to be represented.

Since v1, I have implemented Jakub's suggestions:
  - Don't print a message on (bitmap) memory allocation failure
  - Do not do "mass null checks" when allocating bitmaps
  - Rework some code to ensure error path is sane
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d28c0e73 9b7a0065
...@@ -61,12 +61,13 @@ struct ipa_interrupt; ...@@ -61,12 +61,13 @@ struct ipa_interrupt;
* @zero_addr: DMA address of preallocated zero-filled memory * @zero_addr: DMA address of preallocated zero-filled memory
* @zero_virt: Virtual address of preallocated zero-filled memory * @zero_virt: Virtual address of preallocated zero-filled memory
* @zero_size: Size (bytes) of preallocated zero-filled memory * @zero_size: Size (bytes) of preallocated zero-filled memory
* @endpoint_count: Number of endpoints represented by bit masks below * @endpoint_count: Number of defined bits in most bitmaps below
* @defined: Bit mask indicating endpoints defined in config data * @available_count: Number of defined bits in the available bitmap
* @available: Bit mask indicating endpoints hardware supports * @defined: Bitmap of endpoints defined in config data
* @filter_map: Bit mask indicating endpoints that support filtering * @available: Bitmap of endpoints supported by hardware
* @set_up: Bit mask indicating endpoints set up * @filtered: Bitmap of endpoints that support filtering
* @enabled: Bit mask indicating endpoints enabled * @set_up: Bitmap of endpoints that are set up for use
* @enabled: Bitmap of currently enabled endpoints
* @modem_tx_count: Number of defined modem TX endoints * @modem_tx_count: Number of defined modem TX endoints
* @endpoint: Array of endpoint information * @endpoint: Array of endpoint information
* @channel_map: Mapping of GSI channel to IPA endpoint * @channel_map: Mapping of GSI channel to IPA endpoint
...@@ -117,13 +118,14 @@ struct ipa { ...@@ -117,13 +118,14 @@ struct ipa {
void *zero_virt; void *zero_virt;
size_t zero_size; size_t zero_size;
/* Bit masks indicating endpoint state */ /* Bitmaps indicating endpoint state */
u32 endpoint_count; u32 endpoint_count;
u32 defined; /* Defined in configuration data */ u32 available_count;
u32 available; /* Supported by hardware */ unsigned long *defined; /* Defined in configuration data */
u32 filter_map; unsigned long *available; /* Supported by hardware */
u32 set_up; u64 filtered; /* Support filtering (AP and modem) */
u32 enabled; unsigned long *set_up;
unsigned long *enabled;
u32 modem_tx_count; u32 modem_tx_count;
struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX]; struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
......
...@@ -350,29 +350,32 @@ ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) ...@@ -350,29 +350,32 @@ ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
{ {
u32 mask = BIT(endpoint->endpoint_id); u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa; struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
const struct ipa_reg *reg; const struct ipa_reg *reg;
u32 val; u32 val;
WARN_ON(!(mask & ipa->available)); WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, STATE_AGGR_ACTIVE); reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
val = ioread32(ipa->reg_virt + ipa_reg_offset(reg)); val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
return !!(val & mask); return !!(val & BIT(endpoint_id % 32));
} }
static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
{ {
u32 mask = BIT(endpoint->endpoint_id); u32 endpoint_id = endpoint->endpoint_id;
u32 mask = BIT(endpoint_id % 32);
struct ipa *ipa = endpoint->ipa; struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
const struct ipa_reg *reg; const struct ipa_reg *reg;
WARN_ON(!(mask & ipa->available)); WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, AGGR_FORCE_CLOSE); reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg)); iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
} }
/** /**
...@@ -453,8 +456,8 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) ...@@ -453,8 +456,8 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
/* Reset all modem endpoints to use the default exception endpoint */ /* Reset all modem endpoints to use the default exception endpoint */
int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
{ {
u32 defined = ipa->defined;
struct gsi_trans *trans; struct gsi_trans *trans;
u32 endpoint_id;
u32 count; u32 count;
/* We need one command per modem TX endpoint, plus the commands /* We need one command per modem TX endpoint, plus the commands
...@@ -468,14 +471,11 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) ...@@ -468,14 +471,11 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
return -EBUSY; return -EBUSY;
} }
while (defined) { for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
u32 endpoint_id = __ffs(defined);
struct ipa_endpoint *endpoint; struct ipa_endpoint *endpoint;
const struct ipa_reg *reg; const struct ipa_reg *reg;
u32 offset; u32 offset;
defined ^= BIT(endpoint_id);
/* We only reset modem TX endpoints */ /* We only reset modem TX endpoints */
endpoint = &ipa->endpoint[endpoint_id]; endpoint = &ipa->endpoint[endpoint_id];
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
...@@ -1666,6 +1666,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) ...@@ -1666,6 +1666,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
{ {
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa; struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi; struct gsi *gsi = &ipa->gsi;
int ret; int ret;
...@@ -1675,37 +1676,35 @@ int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) ...@@ -1675,37 +1676,35 @@ int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
dev_err(&ipa->pdev->dev, dev_err(&ipa->pdev->dev,
"error %d starting %cX channel %u for endpoint %u\n", "error %d starting %cX channel %u for endpoint %u\n",
ret, endpoint->toward_ipa ? 'T' : 'R', ret, endpoint->toward_ipa ? 'T' : 'R',
endpoint->channel_id, endpoint->endpoint_id); endpoint->channel_id, endpoint_id);
return ret; return ret;
} }
if (!endpoint->toward_ipa) { if (!endpoint->toward_ipa) {
ipa_interrupt_suspend_enable(ipa->interrupt, ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
endpoint->endpoint_id);
ipa_endpoint_replenish_enable(endpoint); ipa_endpoint_replenish_enable(endpoint);
} }
ipa->enabled |= BIT(endpoint->endpoint_id); __set_bit(endpoint_id, ipa->enabled);
return 0; return 0;
} }
void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
{ {
u32 mask = BIT(endpoint->endpoint_id); u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa; struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi; struct gsi *gsi = &ipa->gsi;
int ret; int ret;
if (!(ipa->enabled & mask)) if (!test_bit(endpoint_id, ipa->enabled))
return; return;
ipa->enabled ^= mask; __clear_bit(endpoint_id, endpoint->ipa->enabled);
if (!endpoint->toward_ipa) { if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint); ipa_endpoint_replenish_disable(endpoint);
ipa_interrupt_suspend_disable(ipa->interrupt, ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
endpoint->endpoint_id);
} }
/* Note that if stop fails, the channel's state is not well-defined */ /* Note that if stop fails, the channel's state is not well-defined */
...@@ -1713,7 +1712,7 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) ...@@ -1713,7 +1712,7 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
if (ret) if (ret)
dev_err(&ipa->pdev->dev, dev_err(&ipa->pdev->dev,
"error %d attempting to stop endpoint %u\n", ret, "error %d attempting to stop endpoint %u\n", ret,
endpoint->endpoint_id); endpoint_id);
} }
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
...@@ -1722,7 +1721,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) ...@@ -1722,7 +1721,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
struct gsi *gsi = &endpoint->ipa->gsi; struct gsi *gsi = &endpoint->ipa->gsi;
int ret; int ret;
if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
return; return;
if (!endpoint->toward_ipa) { if (!endpoint->toward_ipa) {
...@@ -1742,7 +1741,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) ...@@ -1742,7 +1741,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
struct gsi *gsi = &endpoint->ipa->gsi; struct gsi *gsi = &endpoint->ipa->gsi;
int ret; int ret;
if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
return; return;
if (!endpoint->toward_ipa) if (!endpoint->toward_ipa)
...@@ -1802,12 +1801,12 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) ...@@ -1802,12 +1801,12 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
ipa_endpoint_program(endpoint); ipa_endpoint_program(endpoint);
endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
} }
static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
{ {
endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
if (!endpoint->toward_ipa) if (!endpoint->toward_ipa)
cancel_delayed_work_sync(&endpoint->replenish_work); cancel_delayed_work_sync(&endpoint->replenish_work);
...@@ -1817,40 +1816,35 @@ static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) ...@@ -1817,40 +1816,35 @@ static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_setup(struct ipa *ipa) void ipa_endpoint_setup(struct ipa *ipa)
{ {
u32 defined = ipa->defined; u32 endpoint_id;
ipa->set_up = 0;
while (defined) {
u32 endpoint_id = __ffs(defined);
defined ^= BIT(endpoint_id);
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
}
} }
void ipa_endpoint_teardown(struct ipa *ipa) void ipa_endpoint_teardown(struct ipa *ipa)
{ {
u32 set_up = ipa->set_up; u32 endpoint_id;
while (set_up) {
u32 endpoint_id = __fls(set_up);
set_up ^= BIT(endpoint_id);
for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
} }
ipa->set_up = 0;
void ipa_endpoint_deconfig(struct ipa *ipa)
{
ipa->available_count = 0;
bitmap_free(ipa->available);
ipa->available = NULL;
} }
int ipa_endpoint_config(struct ipa *ipa) int ipa_endpoint_config(struct ipa *ipa)
{ {
struct device *dev = &ipa->pdev->dev; struct device *dev = &ipa->pdev->dev;
const struct ipa_reg *reg; const struct ipa_reg *reg;
u32 endpoint_id;
u32 tx_count; u32 tx_count;
u32 rx_count; u32 rx_count;
u32 rx_base; u32 rx_base;
u32 defined;
u32 limit; u32 limit;
u32 val; u32 val;
...@@ -1865,7 +1859,13 @@ int ipa_endpoint_config(struct ipa *ipa) ...@@ -1865,7 +1859,13 @@ int ipa_endpoint_config(struct ipa *ipa)
* assume the configuration is valid. * assume the configuration is valid.
*/ */
if (ipa->version < IPA_VERSION_3_5) { if (ipa->version < IPA_VERSION_3_5) {
ipa->available = ~0; ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
if (!ipa->available)
return -ENOMEM;
ipa->available_count = IPA_ENDPOINT_MAX;
bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
return 0; return 0;
} }
...@@ -1887,26 +1887,29 @@ int ipa_endpoint_config(struct ipa *ipa) ...@@ -1887,26 +1887,29 @@ int ipa_endpoint_config(struct ipa *ipa)
return -EINVAL; return -EINVAL;
} }
/* Allocate and initialize the available endpoint bitmap */
ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
if (!ipa->available)
return -ENOMEM;
ipa->available_count = limit;
/* Mark all supported RX and TX endpoints as available */ /* Mark all supported RX and TX endpoints as available */
ipa->available = GENMASK(limit - 1, rx_base) | GENMASK(tx_count - 1, 0); bitmap_set(ipa->available, 0, tx_count);
bitmap_set(ipa->available, rx_base, rx_count);
defined = ipa->defined; for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
while (defined) {
u32 endpoint_id = __ffs(defined);
struct ipa_endpoint *endpoint; struct ipa_endpoint *endpoint;
defined ^= BIT(endpoint_id);
if (endpoint_id >= limit) { if (endpoint_id >= limit) {
dev_err(dev, "invalid endpoint id, %u > %u\n", dev_err(dev, "invalid endpoint id, %u > %u\n",
endpoint_id, limit - 1); endpoint_id, limit - 1);
return -EINVAL; goto err_free_bitmap;
} }
if (!(BIT(endpoint_id) & ipa->available)) { if (!test_bit(endpoint_id, ipa->available)) {
dev_err(dev, "unavailable endpoint id %u\n", dev_err(dev, "unavailable endpoint id %u\n",
endpoint_id); endpoint_id);
return -EINVAL; goto err_free_bitmap;
} }
/* Make sure it's pointing in the right direction */ /* Make sure it's pointing in the right direction */
...@@ -1919,15 +1922,15 @@ int ipa_endpoint_config(struct ipa *ipa) ...@@ -1919,15 +1922,15 @@ int ipa_endpoint_config(struct ipa *ipa)
} }
dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id); dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
return -EINVAL; goto err_free_bitmap;
} }
return 0; return 0;
}
void ipa_endpoint_deconfig(struct ipa *ipa) err_free_bitmap:
{ ipa_endpoint_deconfig(ipa);
ipa->available = 0; /* Nothing more to do */
return -EINVAL;
} }
static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
...@@ -1948,48 +1951,64 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, ...@@ -1948,48 +1951,64 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
endpoint->toward_ipa = data->toward_ipa; endpoint->toward_ipa = data->toward_ipa;
endpoint->config = data->endpoint.config; endpoint->config = data->endpoint.config;
ipa->defined |= BIT(endpoint->endpoint_id); __set_bit(endpoint->endpoint_id, ipa->defined);
} }
static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
{ {
endpoint->ipa->defined &= ~BIT(endpoint->endpoint_id); __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
memset(endpoint, 0, sizeof(*endpoint)); memset(endpoint, 0, sizeof(*endpoint));
} }
void ipa_endpoint_exit(struct ipa *ipa) void ipa_endpoint_exit(struct ipa *ipa)
{ {
u32 defined = ipa->defined; u32 endpoint_id;
while (defined) {
u32 endpoint_id = __fls(defined);
defined ^= BIT(endpoint_id); ipa->filtered = 0;
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
}
bitmap_free(ipa->enabled);
ipa->enabled = NULL;
bitmap_free(ipa->set_up);
ipa->set_up = NULL;
bitmap_free(ipa->defined);
ipa->defined = NULL;
memset(ipa->name_map, 0, sizeof(ipa->name_map)); memset(ipa->name_map, 0, sizeof(ipa->name_map));
memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
} }
/* Returns a bitmask of endpoints that support filtering, or 0 on error */ /* Returns a bitmask of endpoints that support filtering, or 0 on error */
u32 ipa_endpoint_init(struct ipa *ipa, u32 count, int ipa_endpoint_init(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data) const struct ipa_gsi_endpoint_data *data)
{ {
enum ipa_endpoint_name name; enum ipa_endpoint_name name;
u32 filter_map; u32 filtered;
BUILD_BUG_ON(!IPA_REPLENISH_BATCH); BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
/* Number of endpoints is one more than the maximum ID */ /* Number of endpoints is one more than the maximum ID */
ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1; ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
if (!ipa->endpoint_count) if (!ipa->endpoint_count)
return 0; /* Error */ return -EINVAL;
/* Initialize endpoint state bitmaps */
ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
if (!ipa->defined)
return -ENOMEM;
ipa->defined = 0; ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
if (!ipa->set_up)
goto err_free_defined;
filter_map = 0; ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
if (!ipa->enabled)
goto err_free_set_up;
filtered = 0;
for (name = 0; name < count; name++, data++) { for (name = 0; name < count; name++, data++) {
if (ipa_gsi_endpoint_data_empty(data)) if (ipa_gsi_endpoint_data_empty(data))
continue; /* Skip over empty slots */ continue; /* Skip over empty slots */
...@@ -1997,18 +2016,28 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count, ...@@ -1997,18 +2016,28 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
ipa_endpoint_init_one(ipa, name, data); ipa_endpoint_init_one(ipa, name, data);
if (data->endpoint.filter_support) if (data->endpoint.filter_support)
filter_map |= BIT(data->endpoint_id); filtered |= BIT(data->endpoint_id);
if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
ipa->modem_tx_count++; ipa->modem_tx_count++;
} }
if (!ipa_filter_map_valid(ipa, filter_map)) /* Make sure the set of filtered endpoints is valid */
goto err_endpoint_exit; if (!ipa_filtered_valid(ipa, filtered)) {
ipa_endpoint_exit(ipa);
return filter_map; /* Non-zero bitmask */ return -EINVAL;
}
err_endpoint_exit: ipa->filtered = filtered;
ipa_endpoint_exit(ipa);
return 0;
err_free_set_up:
bitmap_free(ipa->set_up);
ipa->set_up = NULL;
err_free_defined:
bitmap_free(ipa->defined);
ipa->defined = NULL;
return 0; /* Error */ return -ENOMEM;
} }
...@@ -195,7 +195,7 @@ void ipa_endpoint_deconfig(struct ipa *ipa); ...@@ -195,7 +195,7 @@ void ipa_endpoint_deconfig(struct ipa *ipa);
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id); void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id);
void ipa_endpoint_default_route_clear(struct ipa *ipa); void ipa_endpoint_default_route_clear(struct ipa *ipa);
u32 ipa_endpoint_init(struct ipa *ipa, u32 count, int ipa_endpoint_init(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data); const struct ipa_gsi_endpoint_data *data);
void ipa_endpoint_exit(struct ipa *ipa); void ipa_endpoint_exit(struct ipa *ipa);
......
...@@ -132,24 +132,28 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, ...@@ -132,24 +132,28 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 endpoint_id, bool enable) u32 endpoint_id, bool enable)
{ {
struct ipa *ipa = interrupt->ipa; struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id); u32 unit = endpoint_id / 32;
const struct ipa_reg *reg; const struct ipa_reg *reg;
u32 offset; u32 offset;
u32 mask;
u32 val; u32 val;
WARN_ON(!(mask & ipa->available)); WARN_ON(!test_bit(endpoint_id, ipa->available));
/* IPA version 3.0 does not support TX_SUSPEND interrupt control */ /* IPA version 3.0 does not support TX_SUSPEND interrupt control */
if (ipa->version == IPA_VERSION_3_0) if (ipa->version == IPA_VERSION_3_0)
return; return;
reg = ipa_reg(ipa, IRQ_SUSPEND_EN); reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
offset = ipa_reg_offset(reg); offset = ipa_reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset); val = ioread32(ipa->reg_virt + offset);
mask = BIT(endpoint_id);
if (enable) if (enable)
val |= mask; val |= mask;
else else
val &= ~mask; val &= ~mask;
iowrite32(val, ipa->reg_virt + offset); iowrite32(val, ipa->reg_virt + offset);
} }
...@@ -171,18 +175,24 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id) ...@@ -171,18 +175,24 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt) void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{ {
struct ipa *ipa = interrupt->ipa; struct ipa *ipa = interrupt->ipa;
u32 unit_count;
u32 unit;
unit_count = roundup(ipa->endpoint_count, 32);
for (unit = 0; unit < unit_count; unit++) {
const struct ipa_reg *reg; const struct ipa_reg *reg;
u32 val; u32 val;
reg = ipa_reg(ipa, IRQ_SUSPEND_INFO); reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
val = ioread32(ipa->reg_virt + ipa_reg_offset(reg)); val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */ /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0) if (ipa->version == IPA_VERSION_3_0)
return; continue;
reg = ipa_reg(ipa, IRQ_SUSPEND_CLR); reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
}
} }
/* Simulate arrival of an IPA TX_SUSPEND interrupt */ /* Simulate arrival of an IPA TX_SUSPEND interrupt */
......
...@@ -788,12 +788,9 @@ static int ipa_probe(struct platform_device *pdev) ...@@ -788,12 +788,9 @@ static int ipa_probe(struct platform_device *pdev)
goto err_mem_exit; goto err_mem_exit;
/* Result is a non-zero mask of endpoints that support filtering */ /* Result is a non-zero mask of endpoints that support filtering */
ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count, ret = ipa_endpoint_init(ipa, data->endpoint_count, data->endpoint_data);
data->endpoint_data); if (ret)
if (!ipa->filter_map) {
ret = -EINVAL;
goto err_gsi_exit; goto err_gsi_exit;
}
ret = ipa_table_init(ipa); ret = ipa_table_init(ipa);
if (ret) if (ret)
......
...@@ -161,20 +161,20 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6) ...@@ -161,20 +161,20 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
return ipa_mem_find(ipa, mem_id); return ipa_mem_find(ipa, mem_id);
} }
bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map) bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{ {
struct device *dev = &ipa->pdev->dev; struct device *dev = &ipa->pdev->dev;
u32 count; u32 count;
if (!filter_map) { if (!filtered) {
dev_err(dev, "at least one filtering endpoint is required\n"); dev_err(dev, "at least one filtering endpoint is required\n");
return false; return false;
} }
count = hweight32(filter_map); count = hweight64(filtered);
if (count > ipa->filter_count) { if (count > ipa->filter_count) {
dev_err(dev, "too many filtering endpoints (%u, max %u)\n", dev_err(dev, "too many filtering endpoints (%u > %u)\n",
count, ipa->filter_count); count, ipa->filter_count);
return false; return false;
...@@ -200,16 +200,17 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) ...@@ -200,16 +200,17 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
} }
static void ipa_table_reset_add(struct gsi_trans *trans, bool filter, static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
u16 first, u16 count, enum ipa_mem_id mem_id) bool hashed, bool ipv6, u16 first, u16 count)
{ {
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id); const struct ipa_mem *mem;
dma_addr_t addr; dma_addr_t addr;
u32 offset; u32 offset;
u16 size; u16 size;
/* Nothing to do if the table memory region is empty */ /* Nothing to do if the memory region is doesn't exist or is empty */
if (!mem->size) mem = ipa_table_mem(ipa, filter, hashed, ipv6);
if (!mem || !mem->size)
return; return;
if (filter) if (filter)
...@@ -227,14 +228,13 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter, ...@@ -227,14 +228,13 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
* for the IPv4 and IPv6 non-hashed and hashed filter tables. * for the IPv4 and IPv6 non-hashed and hashed filter tables.
*/ */
static int static int
ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem) ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
{ {
u32 ep_mask = ipa->filter_map; u64 ep_mask = ipa->filtered;
u32 count = hweight32(ep_mask);
struct gsi_trans *trans; struct gsi_trans *trans;
enum gsi_ee_id ee_id; enum gsi_ee_id ee_id;
trans = ipa_cmd_trans_alloc(ipa, count); trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
if (!trans) { if (!trans) {
dev_err(&ipa->pdev->dev, dev_err(&ipa->pdev->dev,
"no transaction for %s filter reset\n", "no transaction for %s filter reset\n",
...@@ -253,7 +253,7 @@ ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem) ...@@ -253,7 +253,7 @@ ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
if (endpoint->ee_id != ee_id) if (endpoint->ee_id != ee_id)
continue; continue;
ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id); ipa_table_reset_add(trans, true, hashed, ipv6, endpoint_id, 1);
} }
gsi_trans_commit_wait(trans); gsi_trans_commit_wait(trans);
...@@ -269,18 +269,18 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem) ...@@ -269,18 +269,18 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
{ {
int ret; int ret;
ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem); ret = ipa_filter_reset_table(ipa, false, false, modem);
if (ret) if (ret)
return ret; return ret;
ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem); ret = ipa_filter_reset_table(ipa, true, false, modem);
if (ret) if (ret)
return ret; return ret;
ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem); ret = ipa_filter_reset_table(ipa, false, true, modem);
if (ret) if (ret)
return ret; return ret;
ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem); ret = ipa_filter_reset_table(ipa, true, true, modem);
return ret; return ret;
} }
...@@ -312,13 +312,11 @@ static int ipa_route_reset(struct ipa *ipa, bool modem) ...@@ -312,13 +312,11 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
count = ipa->route_count - modem_route_count; count = ipa->route_count - modem_route_count;
} }
ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE); ipa_table_reset_add(trans, false, false, false, first, count);
ipa_table_reset_add(trans, false, first, count, ipa_table_reset_add(trans, false, true, false, first, count);
IPA_MEM_V4_ROUTE_HASHED);
ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE); ipa_table_reset_add(trans, false, false, true, first, count);
ipa_table_reset_add(trans, false, first, count, ipa_table_reset_add(trans, false, true, true, first, count);
IPA_MEM_V6_ROUTE_HASHED);
gsi_trans_commit_wait(trans); gsi_trans_commit_wait(trans);
...@@ -376,14 +374,12 @@ int ipa_table_hash_flush(struct ipa *ipa) ...@@ -376,14 +374,12 @@ int ipa_table_hash_flush(struct ipa *ipa)
return 0; return 0;
} }
static void ipa_table_init_add(struct gsi_trans *trans, bool filter, static void ipa_table_init_add(struct gsi_trans *trans, bool filter, bool ipv6)
enum ipa_cmd_opcode opcode,
enum ipa_mem_id mem_id,
enum ipa_mem_id hash_mem_id)
{ {
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id); const struct ipa_mem *hash_mem;
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id); enum ipa_cmd_opcode opcode;
const struct ipa_mem *mem;
dma_addr_t hash_addr; dma_addr_t hash_addr;
dma_addr_t addr; dma_addr_t addr;
u32 zero_offset; u32 zero_offset;
...@@ -393,6 +389,14 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, ...@@ -393,6 +389,14 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
u16 count; u16 count;
u16 size; u16 size;
opcode = filter ? ipv6 ? IPA_CMD_IP_V6_FILTER_INIT
: IPA_CMD_IP_V4_FILTER_INIT
: ipv6 ? IPA_CMD_IP_V6_ROUTING_INIT
: IPA_CMD_IP_V4_ROUTING_INIT;
mem = ipa_table_mem(ipa, filter, false, ipv6);
hash_mem = ipa_table_mem(ipa, filter, true, ipv6);
/* Compute the number of table entries to initialize */ /* Compute the number of table entries to initialize */
if (filter) { if (filter) {
/* The number of filtering endpoints determines number of /* The number of filtering endpoints determines number of
...@@ -400,14 +404,14 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, ...@@ -400,14 +404,14 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
* to hold the bitmap itself. The size of the hashed filter * to hold the bitmap itself. The size of the hashed filter
* table is either the same as the non-hashed one, or zero. * table is either the same as the non-hashed one, or zero.
*/ */
count = 1 + hweight32(ipa->filter_map); count = 1 + hweight64(ipa->filtered);
hash_count = hash_mem->size ? count : 0; hash_count = hash_mem && hash_mem->size ? count : 0;
} else { } else {
/* The size of a route table region determines the number /* The size of a route table region determines the number
* of entries it has. * of entries it has.
*/ */
count = mem->size / sizeof(__le64); count = mem->size / sizeof(__le64);
hash_count = hash_mem->size / sizeof(__le64); hash_count = hash_mem && hash_mem->size / sizeof(__le64);
} }
size = count * sizeof(__le64); size = count * sizeof(__le64);
hash_size = hash_count * sizeof(__le64); hash_size = hash_count * sizeof(__le64);
...@@ -458,17 +462,10 @@ int ipa_table_setup(struct ipa *ipa) ...@@ -458,17 +462,10 @@ int ipa_table_setup(struct ipa *ipa)
return -EBUSY; return -EBUSY;
} }
ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT, ipa_table_init_add(trans, false, false);
IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED); ipa_table_init_add(trans, false, true);
ipa_table_init_add(trans, true, false);
ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT, ipa_table_init_add(trans, true, true);
IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED);
gsi_trans_commit_wait(trans); gsi_trans_commit_wait(trans);
...@@ -505,7 +502,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint) ...@@ -505,7 +502,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
static void ipa_filter_config(struct ipa *ipa, bool modem) static void ipa_filter_config(struct ipa *ipa, bool modem)
{ {
enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP; enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
u32 ep_mask = ipa->filter_map; u64 ep_mask = ipa->filtered;
if (!ipa_table_hash_support(ipa)) if (!ipa_table_hash_support(ipa))
return; return;
...@@ -617,7 +614,7 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter) ...@@ -617,7 +614,7 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
/* Filter tables must able to hold the endpoint bitmap plus /* Filter tables must able to hold the endpoint bitmap plus
* an entry for each endpoint that supports filtering * an entry for each endpoint that supports filtering
*/ */
if (count < 1 + hweight32(ipa->filter_map)) if (count < 1 + hweight64(ipa->filtered))
return false; return false;
} else { } else {
/* Routing tables must be able to hold all modem entries, /* Routing tables must be able to hold all modem entries,
...@@ -722,9 +719,9 @@ int ipa_table_init(struct ipa *ipa) ...@@ -722,9 +719,9 @@ int ipa_table_init(struct ipa *ipa)
* that option, so there's no shifting required. * that option, so there's no shifting required.
*/ */
if (ipa->version < IPA_VERSION_5_0) if (ipa->version < IPA_VERSION_5_0)
*virt++ = cpu_to_le64((u64)ipa->filter_map << 1); *virt++ = cpu_to_le64(ipa->filtered << 1);
else else
*virt++ = cpu_to_le64((u64)ipa->filter_map); *virt++ = cpu_to_le64(ipa->filtered);
/* All the rest contain the DMA address of the zero rule */ /* All the rest contain the DMA address of the zero rule */
le_addr = cpu_to_le64(addr); le_addr = cpu_to_le64(addr);
......
...@@ -11,13 +11,13 @@ ...@@ -11,13 +11,13 @@
struct ipa; struct ipa;
/** /**
* ipa_filter_map_valid() - Validate a filter table endpoint bitmap * ipa_filtered_valid() - Validate a filter table endpoint bitmap
* @ipa: IPA pointer * @ipa: IPA pointer
* @filter_mask: Filter table endpoint bitmap to check * @filtered: Filter table endpoint bitmap to check
* *
* Return: true if all regions are valid, false otherwise * Return: true if all regions are valid, false otherwise
*/ */
bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask); bool ipa_filtered_valid(struct ipa *ipa, u64 filtered);
/** /**
* ipa_table_hash_support() - Return true if hashed tables are supported * ipa_table_hash_support() - Return true if hashed tables are supported
......
...@@ -103,7 +103,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = { ...@@ -103,7 +103,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090); IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c); IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0); IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
...@@ -116,7 +116,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = { ...@@ -116,7 +116,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8); IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec); IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_counter_cfg_fmask[] = { static const u32 ipa_reg_counter_cfg_fmask[] = {
[EOT_COAL_GRANULARITY] = GENMASK(3, 0), [EOT_COAL_GRANULARITY] = GENMASK(3, 0),
...@@ -386,13 +386,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = { ...@@ -386,13 +386,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP); IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = { static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg, [COMP_CFG] = &ipa_reg_comp_cfg,
......
...@@ -108,7 +108,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = { ...@@ -108,7 +108,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090); IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c); IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0); IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
...@@ -121,7 +121,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = { ...@@ -121,7 +121,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8); IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec); IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_counter_cfg_fmask[] = { static const u32 ipa_reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */ /* Bits 0-3 reserved */
...@@ -397,13 +397,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = { ...@@ -397,13 +397,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP); IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = { static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg, [COMP_CFG] = &ipa_reg_comp_cfg,
......
...@@ -140,7 +140,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = { ...@@ -140,7 +140,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c); IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4); IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = { static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0), [IPA_BASE_ADDR] = GENMASK(17, 0),
...@@ -151,7 +151,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = { ...@@ -151,7 +151,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8); IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec); IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_ipa_tx_cfg_fmask[] = { static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */ /* Bits 0-1 reserved */
...@@ -453,13 +453,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = { ...@@ -453,13 +453,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP); IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = { static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg, [COMP_CFG] = &ipa_reg_comp_cfg,
......
...@@ -132,7 +132,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = { ...@@ -132,7 +132,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c); IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4); IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0); IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
...@@ -145,7 +145,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = { ...@@ -145,7 +145,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8); IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec); IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_counter_cfg_fmask[] = { static const u32 ipa_reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */ /* Bits 0-3 reserved */
...@@ -399,13 +399,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = { ...@@ -399,13 +399,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP); IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = { static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg, [COMP_CFG] = &ipa_reg_comp_cfg,
......
...@@ -134,7 +134,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = { ...@@ -134,7 +134,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c); IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4); IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = { static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0), [IPA_BASE_ADDR] = GENMASK(17, 0),
...@@ -145,7 +145,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = { ...@@ -145,7 +145,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8); IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec); IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_ipa_tx_cfg_fmask[] = { static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */ /* Bits 0-1 reserved */
...@@ -472,13 +472,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = { ...@@ -472,13 +472,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP); IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = { static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg, [COMP_CFG] = &ipa_reg_comp_cfg,
......
...@@ -139,7 +139,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = { ...@@ -139,7 +139,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c); IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4); IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = { static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0), [IPA_BASE_ADDR] = GENMASK(17, 0),
...@@ -150,7 +150,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = { ...@@ -150,7 +150,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8); IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec); IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_ipa_tx_cfg_fmask[] = { static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */ /* Bits 0-1 reserved */
...@@ -450,13 +450,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = { ...@@ -450,13 +450,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP); IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */ /* Valid bits defined by ipa->available */
IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP); IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = { static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg, [COMP_CFG] = &ipa_reg_comp_cfg,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment