Commit 36d8afbb authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'idpf-add-get-set-for-ethtool-s-header-split-ringparam'

Alexander Lobakin says:

====================
idpf: add get/set for Ethtool's header split ringparam

Currently, the header split feature (putting headers in one smaller
buffer and then the data in a separate bigger one) is always enabled
in idpf when supported.
One may want to not have fragmented frames per each packet, for example,
to avoid XDP frags. To better optimize setups for particular workloads,
add ability to switch the header split state on and off via Ethtool's
ringparams, as well as to query the current status.
There's currently only GET in the Ethtool Netlink interface for now,
so add SET first. I suspect idpf is not the only one supporting this.
====================

Link: https://lore.kernel.org/r/20231212142752.935000-1-aleksander.lobakin@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 173b6d1c 9b1aa3ef
......@@ -15,7 +15,7 @@ struct idpf_vport_max_q;
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <linux/sctp.h>
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <net/gro.h>
#include <linux/dim.h>
......@@ -418,11 +418,13 @@ struct idpf_vport {
/**
* enum idpf_user_flags
* @__IDPF_USER_FLAG_HSPLIT: header split state
* @__IDPF_PROMISC_UC: Unicast promiscuous mode
* @__IDPF_PROMISC_MC: Multicast promiscuous mode
* @__IDPF_USER_FLAGS_NBITS: Must be last
*/
enum idpf_user_flags {
__IDPF_USER_FLAG_HSPLIT = 0U,
__IDPF_PROMISC_UC = 32,
__IDPF_PROMISC_MC,
......@@ -965,4 +967,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
#endif /* !_IDPF_H_ */
......@@ -320,6 +320,8 @@ static void idpf_get_ringparam(struct net_device *netdev,
ring->rx_pending = vport->rxq_desc_count;
ring->tx_pending = vport->txq_desc_count;
kring->tcp_data_split = idpf_vport_get_hsplit(vport);
idpf_vport_ctrl_unlock(netdev);
}
......@@ -379,6 +381,14 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_rx_count == vport->rxq_desc_count)
goto unlock_mutex;
if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
NL_SET_ERR_MSG_MOD(ext_ack,
"setting TCP data split is not supported");
err = -EOPNOTSUPP;
goto unlock_mutex;
}
config_data = &vport->adapter->vport_config[idx]->user_config;
config_data->num_req_txq_desc = new_tx_count;
config_data->num_req_rxq_desc = new_rx_count;
......@@ -1334,6 +1344,7 @@ static int idpf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_msglevel = idpf_get_msglevel,
.set_msglevel = idpf_set_msglevel,
.get_link = ethtool_op_get_link,
......
......@@ -1057,6 +1057,71 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
adapter->next_vport = idpf_get_free_slot(adapter);
}
/**
* idpf_is_hsplit_supported - check whether the header split is supported
* @vport: virtual port to check the capability for
*
* Return: true if it's supported by the HW/FW, false if not.
*/
static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
{
return idpf_is_queue_model_split(vport->rxq_model) &&
idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT);
}
/**
* idpf_vport_get_hsplit - get the current header split feature state
* @vport: virtual port to query the state for
*
* Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported,
* ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled,
* ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active.
*/
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
{
const struct idpf_vport_user_config_data *config;
if (!idpf_is_hsplit_supported(vport))
return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
config = &vport->adapter->vport_config[vport->idx]->user_config;
return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
ETHTOOL_TCP_DATA_SPLIT_ENABLED :
ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
/**
* idpf_vport_set_hsplit - enable or disable header split on a given vport
* @vport: virtual port to configure
* @val: Ethtool flag controlling the header split state
*
* Return: true on success, false if not supported by the HW.
*/
bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
{
struct idpf_vport_user_config_data *config;
if (!idpf_is_hsplit_supported(vport))
return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
config = &vport->adapter->vport_config[vport->idx]->user_config;
switch (val) {
case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
/* Default is to enable */
case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
__set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
return true;
case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
__clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
return true;
default:
return false;
}
}
/**
* idpf_vport_alloc - Allocates the next available struct vport in the adapter
* @adapter: board private structure
......
......@@ -1240,12 +1240,15 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_adapter *adapter = vport->adapter;
struct idpf_queue *q;
int i, k, err = 0;
bool hs;
vport->rxq_grps = kcalloc(vport->num_rxq_grp,
sizeof(struct idpf_rxq_group), GFP_KERNEL);
if (!vport->rxq_grps)
return -ENOMEM;
hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
int j;
......@@ -1298,9 +1301,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
q->rx_buf_size = vport->bufq_size[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT) &&
idpf_is_queue_model_split(vport->rxq_model)) {
if (hs) {
q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
}
......@@ -1344,9 +1346,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
rx_qgrp->splitq.rxq_sets[j]->refillq1 =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT) &&
idpf_is_queue_model_split(vport->rxq_model)) {
if (hs) {
q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
}
......
......@@ -3285,6 +3285,8 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
idpf_vport_init_num_qs(vport, vport_msg);
idpf_vport_calc_num_q_desc(vport);
idpf_vport_calc_num_q_groups(vport);
......
......@@ -95,6 +95,7 @@ struct kernel_ethtool_ringparam {
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
* @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
* @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len
* @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
......@@ -102,6 +103,7 @@ enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
ETHTOOL_RING_USE_RX_PUSH = BIT(3),
ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4),
ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
......
......@@ -124,6 +124,8 @@ const struct nla_policy ethnl_rings_set_policy[] = {
[ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] =
NLA_POLICY_MAX(NLA_U8, ETHTOOL_TCP_DATA_SPLIT_ENABLED),
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
[ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
......@@ -145,6 +147,14 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info,
return -EOPNOTSUPP;
}
if (tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_TCP_DATA_SPLIT)) {
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT],
"setting TCP data split is not supported");
return -EOPNOTSUPP;
}
if (tb[ETHTOOL_A_RINGS_CQE_SIZE] &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) {
NL_SET_ERR_MSG_ATTR(info->extack,
......@@ -202,6 +212,8 @@ ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod);
ethnl_update_u32(&kernel_ringparam.rx_buf_len,
tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod);
ethnl_update_u8(&kernel_ringparam.tcp_data_split,
tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], &mod);
ethnl_update_u32(&kernel_ringparam.cqe_size,
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
ethnl_update_u8(&kernel_ringparam.tx_push,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment