Commit 156a532b authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-ipa-support-variable-rx-buffer-size'

Alex Elder says:

====================
net: ipa: support variable RX buffer size

Specify the size of receive buffers used for RX endpoints in the
configuration data, rather than using 8192 bytes for all of them.
Increase the size of the AP receive buffer for the modem to 32KB.
====================

Link: https://lore.kernel.org/r/20220201153737.601149-1-elder@linaro.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 52dae93f 33230aeb
...@@ -101,6 +101,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -101,6 +101,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true, .aggregation = true,
.status_enable = true, .status_enable = true,
.rx = { .rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)), .pad_align = ilog2(sizeof(u32)),
}, },
}, },
...@@ -148,6 +149,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -148,6 +149,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true, .qmap = true,
.aggregation = true, .aggregation = true,
.rx = { .rx = {
.buffer_size = 8192,
.aggr_close_eof = true, .aggr_close_eof = true,
}, },
}, },
......
...@@ -92,6 +92,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -92,6 +92,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true, .aggregation = true,
.status_enable = true, .status_enable = true,
.rx = { .rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)), .pad_align = ilog2(sizeof(u32)),
}, },
}, },
...@@ -140,6 +141,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -140,6 +141,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true, .qmap = true,
.aggregation = true, .aggregation = true,
.rx = { .rx = {
.buffer_size = 8192,
.aggr_close_eof = true, .aggr_close_eof = true,
}, },
}, },
......
...@@ -86,6 +86,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -86,6 +86,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true, .aggregation = true,
.status_enable = true, .status_enable = true,
.rx = { .rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)), .pad_align = ilog2(sizeof(u32)),
}, },
}, },
...@@ -133,6 +134,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -133,6 +134,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true, .qmap = true,
.aggregation = true, .aggregation = true,
.rx = { .rx = {
.buffer_size = 32768,
.aggr_close_eof = true, .aggr_close_eof = true,
}, },
}, },
......
...@@ -82,6 +82,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -82,6 +82,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true, .aggregation = true,
.status_enable = true, .status_enable = true,
.rx = { .rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)), .pad_align = ilog2(sizeof(u32)),
}, },
}, },
...@@ -130,6 +131,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -130,6 +131,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true, .qmap = true,
.aggregation = true, .aggregation = true,
.rx = { .rx = {
.buffer_size = 8192,
.aggr_close_eof = true, .aggr_close_eof = true,
}, },
}, },
......
...@@ -95,6 +95,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -95,6 +95,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true, .aggregation = true,
.status_enable = true, .status_enable = true,
.rx = { .rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)), .pad_align = ilog2(sizeof(u32)),
}, },
}, },
...@@ -142,6 +143,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -142,6 +143,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true, .qmap = true,
.aggregation = true, .aggregation = true,
.rx = { .rx = {
.buffer_size = 8192,
.aggr_close_eof = true, .aggr_close_eof = true,
}, },
}, },
......
...@@ -87,6 +87,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -87,6 +87,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true, .aggregation = true,
.status_enable = true, .status_enable = true,
.rx = { .rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)), .pad_align = ilog2(sizeof(u32)),
}, },
}, },
...@@ -134,6 +135,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { ...@@ -134,6 +135,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true, .qmap = true,
.aggregation = true, .aggregation = true,
.rx = { .rx = {
.buffer_size = 8192,
.aggr_close_eof = true, .aggr_close_eof = true,
}, },
}, },
......
...@@ -112,6 +112,7 @@ struct ipa_endpoint_tx_data { ...@@ -112,6 +112,7 @@ struct ipa_endpoint_tx_data {
/** /**
* struct ipa_endpoint_rx_data - configuration data for RX endpoints * struct ipa_endpoint_rx_data - configuration data for RX endpoints
* @buffer_size: requested receive buffer size (bytes)
* @pad_align: power-of-2 boundary to which packet payload is aligned * @pad_align: power-of-2 boundary to which packet payload is aligned
* @aggr_close_eof: whether aggregation closes on end-of-frame * @aggr_close_eof: whether aggregation closes on end-of-frame
* *
...@@ -125,6 +126,7 @@ struct ipa_endpoint_tx_data { ...@@ -125,6 +126,7 @@ struct ipa_endpoint_tx_data {
* a "frame" consisting of several transfers has ended. * a "frame" consisting of several transfers has ended.
*/ */
struct ipa_endpoint_rx_data { struct ipa_endpoint_rx_data {
u32 buffer_size;
u32 pad_align; u32 pad_align;
bool aggr_close_eof; bool aggr_close_eof;
}; };
......
...@@ -27,9 +27,6 @@ ...@@ -27,9 +27,6 @@
#define IPA_REPLENISH_BATCH 16 #define IPA_REPLENISH_BATCH 16
/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
/* The amount of RX buffer space consumed by standard skb overhead */ /* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
...@@ -75,6 +72,14 @@ struct ipa_status { ...@@ -75,6 +72,14 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
static u32 aggr_byte_limit_max(enum ipa_version version)
{
if (version < IPA_VERSION_4_5)
return field_max(aggr_byte_limit_fmask(true));
return field_max(aggr_byte_limit_fmask(false));
}
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data, const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data) const struct ipa_gsi_endpoint_data *data)
...@@ -87,6 +92,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, ...@@ -87,6 +92,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true; return true;
if (!data->toward_ipa) { if (!data->toward_ipa) {
u32 buffer_size;
u32 limit;
if (data->endpoint.filter_support) { if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for " dev_err(dev, "filtering not supported for "
"RX endpoint %u\n", "RX endpoint %u\n",
...@@ -94,6 +102,41 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, ...@@ -94,6 +102,41 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return false; return false;
} }
/* Nothing more to check for non-AP RX */
if (data->ee_id != GSI_EE_AP)
return true;
buffer_size = data->endpoint.config.rx.buffer_size;
/* The buffer size must hold an MTU plus overhead */
limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
if (buffer_size < limit) {
dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
data->endpoint_id, buffer_size, limit);
return false;
}
/* For an endpoint supporting receive aggregation, the
* aggregation byte limit defines the point at which an
* aggregation window will close. It is programmed into the
* IPA hardware as a number of KB. We don't use "hard byte
* limit" aggregation, so we need to supply enough space in
* a receive buffer to hold a complete MTU plus normal skb
* overhead *after* that aggregation byte limit has been
* crossed.
*
* This check just ensures the receive buffer size doesn't
* exceed what's representable in the aggregation limit field.
*/
if (data->endpoint.config.aggregation) {
limit += SZ_1K * aggr_byte_limit_max(ipa->version);
if (buffer_size > limit) {
dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n",
data->endpoint_id, buffer_size, limit);
return false;
}
}
return true; /* Nothing more to check for RX */ return true; /* Nothing more to check for RX */
} }
...@@ -156,21 +199,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, ...@@ -156,21 +199,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true; return true;
} }
static u32 aggr_byte_limit_max(enum ipa_version version)
{
if (version < IPA_VERSION_4_5)
return field_max(aggr_byte_limit_fmask(true));
return field_max(aggr_byte_limit_fmask(false));
}
static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data) const struct ipa_gsi_endpoint_data *data)
{ {
const struct ipa_gsi_endpoint_data *dp = data; const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev; struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name; enum ipa_endpoint_name name;
u32 limit;
if (count > IPA_ENDPOINT_COUNT) { if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n", dev_err(dev, "too many endpoints specified (%u > %u)\n",
...@@ -178,26 +212,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, ...@@ -178,26 +212,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return false; return false;
} }
/* The aggregation byte limit defines the point at which an
* aggregation window will close. It is programmed into the
* IPA hardware as a number of KB. We don't use "hard byte
* limit" aggregation, which means that we need to supply
* enough space in a receive buffer to hold a complete MTU
* plus normal skb overhead *after* that aggregation byte
* limit has been crossed.
*
* This check ensures we don't define a receive buffer size
* that would exceed what we can represent in the field that
* is used to program its size.
*/
limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
if (limit < IPA_RX_BUFFER_SIZE) {
dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
IPA_RX_BUFFER_SIZE, limit);
return false;
}
/* Make sure needed endpoints have defined data */ /* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n"); dev_err(dev, "command TX endpoint not defined\n");
...@@ -723,13 +737,15 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) ...@@ -723,13 +737,15 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
if (endpoint->data->aggregation) { if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) { if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx_data *rx_data;
bool close_eof; bool close_eof;
u32 limit; u32 limit;
rx_data = &endpoint->data->rx;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); limit = ipa_aggr_size_kb(rx_data->buffer_size);
val |= aggr_byte_limit_encoded(version, limit); val |= aggr_byte_limit_encoded(version, limit);
limit = IPA_AGGR_TIME_LIMIT; limit = IPA_AGGR_TIME_LIMIT;
...@@ -737,7 +753,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) ...@@ -737,7 +753,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* AGGR_PKT_LIMIT is 0 (unlimited) */ /* AGGR_PKT_LIMIT is 0 (unlimited) */
close_eof = endpoint->data->rx.aggr_close_eof; close_eof = rx_data->aggr_close_eof;
val |= aggr_sw_eof_active_encoded(version, close_eof); val |= aggr_sw_eof_active_encoded(version, close_eof);
/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
...@@ -1025,11 +1041,13 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) ...@@ -1025,11 +1041,13 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
struct gsi_trans *trans; struct gsi_trans *trans;
bool doorbell = false; bool doorbell = false;
struct page *page; struct page *page;
u32 buffer_size;
u32 offset; u32 offset;
u32 len; u32 len;
int ret; int ret;
page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); buffer_size = endpoint->data->rx.buffer_size;
page = dev_alloc_pages(get_order(buffer_size));
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -1039,7 +1057,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) ...@@ -1039,7 +1057,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
/* Offset the buffer to make space for skb headroom */ /* Offset the buffer to make space for skb headroom */
offset = NET_SKB_PAD; offset = NET_SKB_PAD;
len = IPA_RX_BUFFER_SIZE - offset; len = buffer_size - offset;
ret = gsi_trans_page_add(trans, page, len, offset); ret = gsi_trans_page_add(trans, page, len, offset);
if (ret) if (ret)
...@@ -1058,7 +1076,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) ...@@ -1058,7 +1076,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
err_trans_free: err_trans_free:
gsi_trans_free(trans); gsi_trans_free(trans);
err_free_pages: err_free_pages:
__free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); __free_pages(page, get_order(buffer_size));
return -ENOMEM; return -ENOMEM;
} }
...@@ -1183,15 +1201,16 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, ...@@ -1183,15 +1201,16 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
struct page *page, u32 len) struct page *page, u32 len)
{ {
u32 buffer_size = endpoint->data->rx.buffer_size;
struct sk_buff *skb; struct sk_buff *skb;
/* Nothing to do if there's no netdev */ /* Nothing to do if there's no netdev */
if (!endpoint->netdev) if (!endpoint->netdev)
return false; return false;
WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD)); WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); skb = build_skb(page_address(page), buffer_size);
if (skb) { if (skb) {
/* Reserve the headroom and account for the data */ /* Reserve the headroom and account for the data */
skb_reserve(skb, NET_SKB_PAD); skb_reserve(skb, NET_SKB_PAD);
...@@ -1289,8 +1308,9 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, ...@@ -1289,8 +1308,9 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
struct page *page, u32 total_len) struct page *page, u32 total_len)
{ {
u32 buffer_size = endpoint->data->rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD; void *data = page_address(page) + NET_SKB_PAD;
u32 unused = IPA_RX_BUFFER_SIZE - total_len; u32 unused = buffer_size - total_len;
u32 resid = total_len; u32 resid = total_len;
while (resid) { while (resid) {
...@@ -1398,8 +1418,11 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, ...@@ -1398,8 +1418,11 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
} else { } else {
struct page *page = trans->data; struct page *page = trans->data;
if (page) if (page) {
__free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); u32 buffer_size = endpoint->data->rx.buffer_size;
__free_pages(page, get_order(buffer_size));
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment