Commit 4dc84c06 authored by Jie Wang's avatar Jie Wang Committed by Jakub Kicinski

net: ethtool: extend ringparam set/get APIs for tx_push

Currently tx push is a standard driver feature which controls use of a fast
path descriptor push. So this patch extends the ringparam APIs and data
structures to support set/get tx push by ethtool -G/g.
Signed-off-by: default avatarJie Wang <wangjie125@huawei.com>
Signed-off-by: default avatarGuangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 0a03f3c5
...@@ -862,6 +862,7 @@ Kernel response contents: ...@@ -862,6 +862,7 @@ Kernel response contents:
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
==================================== ====== =========================== ==================================== ====== ===========================
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
...@@ -871,6 +872,12 @@ separate buffers. The device configuration must make it possible to receive ...@@ -871,6 +872,12 @@ separate buffers. The device configuration must make it possible to receive
full memory pages of data, for example because MTU is high enough or through full memory pages of data, for example because MTU is high enough or through
HW-GRO. HW-GRO.
``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast
path to send packets. In ordinary path, driver fills descriptors in DRAM and
notifies NIC hardware. In fast path, driver pushes descriptors to the device
through MMIO writes, thus reducing the latency. However, enabling this feature
may increase the CPU cost. Drivers may enforce additional per-packet
eligibility checks (e.g. on packet size).
RINGS_SET RINGS_SET
========= =========
...@@ -887,6 +894,7 @@ Request contents: ...@@ -887,6 +894,7 @@ Request contents:
``ETHTOOL_A_RINGS_TX`` u32 size of TX ring ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
==================================== ====== =========================== ==================================== ====== ===========================
Kernel checks that requested ring sizes do not exceed limits reported by Kernel checks that requested ring sizes do not exceed limits reported by
......
...@@ -71,11 +71,13 @@ enum { ...@@ -71,11 +71,13 @@ enum {
* struct kernel_ethtool_ringparam - RX/TX ring configuration * struct kernel_ethtool_ringparam - RX/TX ring configuration
* @rx_buf_len: Current length of buffers on the rx ring. * @rx_buf_len: Current length of buffers on the rx ring.
* @tcp_data_split: Scatter packet headers and data to separate buffers * @tcp_data_split: Scatter packet headers and data to separate buffers
* @tx_push: The flag of tx push mode
* @cqe_size: Size of TX/RX completion queue event * @cqe_size: Size of TX/RX completion queue event
*/ */
struct kernel_ethtool_ringparam { struct kernel_ethtool_ringparam {
u32 rx_buf_len; u32 rx_buf_len;
u8 tcp_data_split; u8 tcp_data_split;
u8 tx_push;
u32 cqe_size; u32 cqe_size;
}; };
...@@ -83,10 +85,12 @@ struct kernel_ethtool_ringparam { ...@@ -83,10 +85,12 @@ struct kernel_ethtool_ringparam {
* enum ethtool_supported_ring_param - indicator caps for setting ring params * enum ethtool_supported_ring_param - indicator caps for setting ring params
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
*/ */
enum ethtool_supported_ring_param { enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
ETHTOOL_RING_USE_CQE_SIZE = BIT(1), ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
}; };
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
......
...@@ -338,6 +338,7 @@ enum { ...@@ -338,6 +338,7 @@ enum {
ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */ ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */ ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */ ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
/* add new constants above here */ /* add new constants above here */
__ETHTOOL_A_RINGS_CNT, __ETHTOOL_A_RINGS_CNT,
......
...@@ -363,7 +363,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT ...@@ -363,7 +363,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1]; extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1]; extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1]; extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_CQE_SIZE + 1]; extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH + 1];
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1]; extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1]; extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1]; extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
......
...@@ -55,7 +55,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, ...@@ -55,7 +55,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RINGS_TX */ nla_total_size(sizeof(u32)) + /* _RINGS_TX */
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
nla_total_size(sizeof(u32)); /* _RINGS_CQE_SIZE */ nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */
} }
static int rings_fill_reply(struct sk_buff *skb, static int rings_fill_reply(struct sk_buff *skb,
...@@ -94,7 +95,8 @@ static int rings_fill_reply(struct sk_buff *skb, ...@@ -94,7 +95,8 @@ static int rings_fill_reply(struct sk_buff *skb,
(nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT,
kr->tcp_data_split))) || kr->tcp_data_split))) ||
(kr->cqe_size && (kr->cqe_size &&
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size)))) (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push))
return -EMSGSIZE; return -EMSGSIZE;
return 0; return 0;
...@@ -123,6 +125,7 @@ const struct nla_policy ethnl_rings_set_policy[] = { ...@@ -123,6 +125,7 @@ const struct nla_policy ethnl_rings_set_policy[] = {
[ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
}; };
int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
...@@ -149,6 +152,15 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) ...@@ -149,6 +152,15 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
if (!ops->get_ringparam || !ops->set_ringparam) if (!ops->get_ringparam || !ops->set_ringparam)
goto out_dev; goto out_dev;
if (tb[ETHTOOL_A_RINGS_TX_PUSH] &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) {
ret = -EOPNOTSUPP;
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_RINGS_TX_PUSH],
"setting tx push not supported");
goto out_dev;
}
rtnl_lock(); rtnl_lock();
ret = ethnl_ops_begin(dev); ret = ethnl_ops_begin(dev);
if (ret < 0) if (ret < 0)
...@@ -165,6 +177,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) ...@@ -165,6 +177,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod);
ethnl_update_u32(&kernel_ringparam.cqe_size, ethnl_update_u32(&kernel_ringparam.cqe_size,
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
ethnl_update_u8(&kernel_ringparam.tx_push,
tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
ret = 0; ret = 0;
if (!mod) if (!mod)
goto out_ops; goto out_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment