Commit 2739bd76 authored by David S. Miller's avatar David S. Miller

Merge branch 'ipa-kill-validation'

Alex Elder says:

====================
net: ipa: kill IPA_VALIDATION

A few months ago I proposed cleaning up some code that validates
certain things conditionally, arguing that doing so once is enough,
thus doing so always should not be necessary.
  https://lore.kernel.org/netdev/20210320141729.1956732-1-elder@linaro.org/
Leon Romanovsky felt strongly that this was a mistake, and in the
end I agreed to change my plans.

This series finally completes what I said I would do about this,
ultimately eliminating the IPA_VALIDATION symbol and conditional
code entirely.

The first patch both extends and simplifies some validation done for
IPA immediate commands, and performs those tests unconditionally.

The second patch fixes a bug that wasn't normally exposed because of
the conditional compilation (a reason Leon was right about this).
It makes filter and routing table validation occur unconditionally.

The third eliminates the remaining conditionally-defined code and
removes the line in the Makefile used to enable validation.

And the fourth removes all comments containing ipa_assert()
statements, replacing most of them with WARN_ON() calls.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents beeee08c 5bc55884
# Un-comment the next line if you want to validate configuration data
#ccflags-y += -DIPA_VALIDATE
obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
......
......@@ -1964,7 +1964,6 @@ static void gsi_evt_ring_init(struct gsi *gsi)
static bool gsi_channel_data_valid(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data)
{
#ifdef IPA_VALIDATION
u32 channel_id = data->channel_id;
struct device *dev = gsi->dev;
......@@ -2010,7 +2009,6 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
channel_id, data->channel.event_count);
return false;
}
#endif /* IPA_VALIDATION */
return true;
}
......
......@@ -90,14 +90,12 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
{
void *virt;
#ifdef IPA_VALIDATE
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
if (!max_alloc)
return -EINVAL;
#endif /* IPA_VALIDATE */
/* By allocating a few extra entries in our pool (one less
* than the maximum number that will be requested in a
......@@ -140,14 +138,12 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
dma_addr_t addr;
void *virt;
#ifdef IPA_VALIDATE
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
if (!max_alloc)
return -EINVAL;
#endif /* IPA_VALIDATE */
/* Don't let allocations cross a power-of-two boundary */
size = __roundup_pow_of_two(size);
......@@ -188,8 +184,8 @@ static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
{
u32 offset;
/* assert(count > 0); */
/* assert(count <= pool->max_alloc); */
WARN_ON(!count);
WARN_ON(count > pool->max_alloc);
/* Allocate from beginning if wrap would occur */
if (count > pool->count - pool->free)
......@@ -225,9 +221,10 @@ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
{
void *end = pool->base + pool->count * pool->size;
/* assert(element >= pool->base); */
/* assert(element < end); */
/* assert(pool->max_alloc == 1); */
WARN_ON(element < pool->base);
WARN_ON(element >= end);
WARN_ON(pool->max_alloc != 1);
element += pool->size;
return element < end ? element : pool->base;
......@@ -332,7 +329,8 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
/* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id)))
return NULL;
trans_info = &channel->trans_info;
......@@ -408,7 +406,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
u32 which = trans->used++;
struct scatterlist *sg;
/* assert(which < trans->tre_count); */
WARN_ON(which >= trans->tre_count);
/* Commands are quite different from data transfer requests.
* Their payloads come from a pool whose memory is allocated
......@@ -441,8 +439,10 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
struct scatterlist *sg = &trans->sgl[0];
int ret;
/* assert(trans->tre_count == 1); */
/* assert(!trans->used); */
if (WARN_ON(trans->tre_count != 1))
return -EINVAL;
if (WARN_ON(trans->used))
return -EINVAL;
sg_set_page(sg, page, size, offset);
ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
......@@ -461,8 +461,10 @@ int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
u32 used;
int ret;
/* assert(trans->tre_count == 1); */
/* assert(!trans->used); */
if (WARN_ON(trans->tre_count != 1))
return -EINVAL;
if (WARN_ON(trans->used))
return -EINVAL;
/* skb->len will not be 0 (checked early) */
ret = skb_to_sgvec(skb, sg, 0, skb->len);
......@@ -550,7 +552,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
u32 avail;
u32 i;
/* assert(trans->used > 0); */
WARN_ON(!trans->used);
/* Consume the entries. If we cross the end of the ring while
* filling them we'll switch to the beginning to finish.
......
......@@ -159,35 +159,49 @@ static void ipa_cmd_validate_build(void)
BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
#undef TABLE_COUNT_MAX
#undef TABLE_SIZE
}
#ifdef IPA_VALIDATE
/* Hashed and non-hashed fields are assumed to be the same size */
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
/* Valid endpoint numbers must fit in the IP packet init command */
BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
IPA_ENDPOINT_MAX - 1);
}
/* Validate a memory region holding a table */
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
bool route, bool ipv6, bool hashed)
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
{
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
const char *table = route ? "route" : "filter";
struct device *dev = &ipa->pdev->dev;
u32 offset_max;
offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
: field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
/* Size must fit in the immediate command field that holds it */
if (mem->size > size_max) {
dev_err(dev, "%s table region size too large\n", table);
dev_err(dev, " (0x%04x > 0x%04x)\n",
mem->size, size_max);
return false;
}
/* Offset must fit in the immediate command field that holds it */
if (mem->offset > offset_max ||
ipa->mem_offset > offset_max - mem->offset) {
dev_err(dev, "IPv%c %s%s table region offset too large\n",
ipv6 ? '6' : '4', hashed ? "hashed " : "",
route ? "route" : "filter");
dev_err(dev, "%s table region offset too large\n", table);
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
ipa->mem_offset, mem->offset, offset_max);
return false;
}
/* Entire memory range must fit within IPA-local memory */
if (mem->offset > ipa->mem_size ||
mem->size > ipa->mem_size - mem->offset) {
dev_err(dev, "IPv%c %s%s table region out of range\n",
ipv6 ? '6' : '4', hashed ? "hashed " : "",
route ? "route" : "filter");
dev_err(dev, "%s table region out of range\n", table);
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
mem->offset, mem->size, ipa->mem_size);
......@@ -331,7 +345,6 @@ bool ipa_cmd_data_valid(struct ipa *ipa)
return true;
}
#endif /* IPA_VALIDATE */
int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
......@@ -522,9 +535,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
/* assert(endpoint_id <
field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_init;
......@@ -548,8 +558,9 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
u16 flags;
/* size and offset must fit in 16 bit fields */
/* assert(size > 0 && size <= U16_MAX); */
/* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
WARN_ON(!size);
WARN_ON(size > U16_MAX);
WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
offset += ipa->mem_offset;
......@@ -588,8 +599,6 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
/* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_tag_status;
......
......@@ -57,20 +57,16 @@ struct ipa_cmd_info {
enum dma_data_direction direction;
};
#ifdef IPA_VALIDATE
/**
* ipa_cmd_table_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
* @mem: - IPA memory region descriptor
* @route: - Whether the region holds a route or filter table
* @ipv6: - Whether the table is for IPv6 or IPv4
* @hashed: - Whether the table is hashed or non-hashed
*
* Return: true if region is valid, false otherwise
*/
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
bool route, bool ipv6, bool hashed);
bool route);
/**
* ipa_cmd_data_valid() - Validate command-realted configuration is valid
......@@ -80,22 +76,6 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
*/
bool ipa_cmd_data_valid(struct ipa *ipa);
#else /* !IPA_VALIDATE */
static inline bool ipa_cmd_table_valid(struct ipa *ipa,
const struct ipa_mem *mem, bool route,
bool ipv6, bool hashed)
{
return true;
}
static inline bool ipa_cmd_data_valid(struct ipa *ipa)
{
return true;
}
#endif /* !IPA_VALIDATE */
/**
* ipa_cmd_pool_init() - initialize command channel pools
* @channel: AP->IPA command TX GSI channel pointer
......
......@@ -250,17 +250,18 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
/* Suspend is not supported for IPA v4.0+. Delay doesn't work
* correctly on IPA v4.2.
*
* if (endpoint->toward_ipa)
* assert(ipa->version != IPA_VERSION_4.2);
* else
* assert(ipa->version < IPA_VERSION_4_0);
*/
if (endpoint->toward_ipa)
WARN_ON(ipa->version == IPA_VERSION_4_2);
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
val = ioread32(ipa->reg_virt + offset);
/* Don't bother if it's already in the requested state */
state = !!(val & mask);
/* Don't bother if it's already in the requested state */
if (suspend_delay != state) {
val ^= mask;
iowrite32(val, ipa->reg_virt + offset);
......@@ -273,7 +274,7 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
static void
ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{
/* assert(endpoint->toward_ipa); */
WARN_ON(!endpoint->toward_ipa);
/* Delay mode doesn't work properly for IPA v4.2 */
if (endpoint->ipa->version != IPA_VERSION_4_2)
......@@ -287,7 +288,8 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
u32 offset;
u32 val;
/* assert(mask & ipa->available); */
WARN_ON(!(mask & ipa->available));
offset = ipa_reg_state_aggr_active_offset(ipa->version);
val = ioread32(ipa->reg_virt + offset);
......@@ -299,7 +301,8 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
/* assert(mask & ipa->available); */
WARN_ON(!(mask & ipa->available));
iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
}
......@@ -338,7 +341,7 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
if (endpoint->ipa->version >= IPA_VERSION_4_0)
return enable; /* For IPA v4.0+, no change made */
/* assert(!endpoint->toward_ipa); */
WARN_ON(endpoint->toward_ipa);
suspended = ipa_endpoint_init_ctrl(endpoint, enable);
......@@ -1156,7 +1159,8 @@ static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
if (!endpoint->netdev)
return false;
/* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD));
skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
if (skb) {
/* Reserve the headroom and account for the data */
......
......@@ -146,7 +146,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 offset;
u32 val;
/* assert(mask & ipa->available); */
WARN_ON(!(mask & ipa->available));
/* IPA version 3.0 does not support TX_SUSPEND interrupt control */
if (ipa->version == IPA_VERSION_3_0)
......@@ -206,7 +206,8 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
struct ipa *ipa = interrupt->ipa;
u32 offset;
/* assert(ipa_irq < IPA_IRQ_COUNT); */
WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
interrupt->handler[ipa_irq] = handler;
/* Update the IPA interrupt mask to enable it */
......@@ -222,7 +223,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
struct ipa *ipa = interrupt->ipa;
u32 offset;
/* assert(ipa_irq < IPA_IRQ_COUNT); */
WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
offset = ipa_reg_irq_en_offset(ipa->version);
......
......@@ -253,9 +253,6 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
const struct ipa_qsb_data *data1;
u32 val;
/* assert(data->qsb_count > 0); */
/* assert(data->qsb_count < 3); */
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
data0 = &data->qsb_data[IPA_QSB_MASTER_DDR];
if (data->qsb_count > 1)
......@@ -293,7 +290,7 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
*/
static u32 ipa_aggr_granularity_val(u32 usec)
{
/* assert(usec != 0); */
WARN_ON(!usec);
return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
}
......@@ -612,7 +609,6 @@ MODULE_DEVICE_TABLE(of, ipa_match);
* */
static void ipa_validate_build(void)
{
#ifdef IPA_VALIDATE
/* At one time we assumed a 64-bit build, allowing some do_div()
* calls to be replaced by simple division or modulo operations.
* We currently only perform divide and modulo operations on u32,
......@@ -646,7 +642,6 @@ static void ipa_validate_build(void)
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
field_max(AGGR_GRANULARITY_FMASK));
#endif /* IPA_VALIDATE */
}
static bool ipa_version_valid(enum ipa_version version)
......
......@@ -99,7 +99,7 @@ struct ipa;
static inline u32 arbitration_lock_disable_encoded(enum ipa_version version,
u32 mask)
{
/* assert(version >= IPA_VERSION_4_0); */
WARN_ON(version < IPA_VERSION_4_0);
if (version < IPA_VERSION_4_9)
return u32_encode_bits(mask, GENMASK(20, 17));
......@@ -116,7 +116,7 @@ static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version,
{
u32 val = enable ? 1 : 0;
/* assert(version >= IPA_VERSION_4_5); */
WARN_ON(version < IPA_VERSION_4_5);
if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7)
return u32_encode_bits(val, GENMASK(21, 21));
......@@ -409,7 +409,7 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version,
val = u32_encode_bits(size, HDR_LEN_FMASK);
if (version < IPA_VERSION_4_5) {
/* ipa_assert(header_size == size); */
WARN_ON(header_size != size);
return val;
}
......@@ -429,7 +429,7 @@ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
if (version < IPA_VERSION_4_5) {
/* ipa_assert(offset == off); */
WARN_ON(offset != off);
return val;
}
......@@ -812,7 +812,7 @@ ipa_reg_irq_suspend_info_offset(enum ipa_version version)
static inline u32
ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee)
{
/* assert(version != IPA_VERSION_3_0); */
WARN_ON(version == IPA_VERSION_3_0);
if (version < IPA_VERSION_4_9)
return 0x00003034 + 0x1000 * ee;
......@@ -830,7 +830,7 @@ ipa_reg_irq_suspend_en_offset(enum ipa_version version)
static inline u32
ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee)
{
/* assert(version != IPA_VERSION_3_0); */
WARN_ON(version == IPA_VERSION_3_0);
if (version < IPA_VERSION_4_9)
return 0x00003038 + 0x1000 * ee;
......
......@@ -29,7 +29,6 @@
static bool ipa_resource_limits_valid(struct ipa *ipa,
const struct ipa_resource_data *data)
{
#ifdef IPA_VALIDATION
u32 group_count;
u32 i;
u32 j;
......@@ -65,7 +64,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
#endif /* !IPA_VALIDATION */
return true;
}
......
......@@ -120,8 +120,6 @@
*/
#define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
#ifdef IPA_VALIDATE
/* Check things that can be validated at build time. */
static void ipa_table_validate_build(void)
{
......@@ -161,7 +159,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
else
size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
if (!ipa_cmd_table_valid(ipa, mem, route))
return false;
/* mem->size >= size is sufficient, but we'll demand more */
......@@ -169,7 +167,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
return true;
/* Hashed table regions can be zero size if hashing is not supported */
if (hashed && !mem->size)
if (ipa_table_hash_support(ipa) && !mem->size)
return true;
dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
......@@ -183,14 +181,22 @@ bool ipa_table_valid(struct ipa *ipa)
{
bool valid;
valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false);
valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false);
valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false);
valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false);
valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true);
valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true);
valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true);
valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true);
valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true);
if (!ipa_table_hash_support(ipa))
return valid;
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED,
false);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED,
false);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED,
true);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED,
true);
return valid;
}
......@@ -217,14 +223,6 @@ bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
return true;
}
#else /* !IPA_VALIDATE */
static void ipa_table_validate_build(void)
{
}
#endif /* !IPA_VALIDATE */
/* Zero entry count means no table, so just return a 0 address */
static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
{
......@@ -233,7 +231,7 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
if (!count)
return 0;
/* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */
WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX));
/* Skip over the zero rule and possibly the filter mask */
skip = filter_mask ? 1 : 2;
......
......@@ -16,8 +16,6 @@ struct ipa;
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
#ifdef IPA_VALIDATE
/**
* ipa_table_valid() - Validate route and filter table memory regions
* @ipa: IPA pointer
......@@ -35,20 +33,6 @@ bool ipa_table_valid(struct ipa *ipa);
*/
bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
#else /* !IPA_VALIDATE */
static inline bool ipa_table_valid(struct ipa *ipa)
{
return true;
}
static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
{
return true;
}
#endif /* !IPA_VALIDATE */
/**
* ipa_table_hash_support() - Return true if hashed tables are supported
* @ipa: IPA pointer
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment