Commit 33377152 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Enhancements'

Manish Chopra says:

====================
qed*: Enhancements

This patch series adds following support in drivers -

1. Egress mqprio offload.
2. Add destination IP based flow profile.
3. Ingress flower offload (for drop action).

Please consider applying this series to "net-next".
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2eee32a7 2ce9c93e
...@@ -2188,16 +2188,17 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) ...@@ -2188,16 +2188,17 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
static int qed_fill_eth_dev_info(struct qed_dev *cdev, static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info) struct qed_dev_eth_info *info)
{ {
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
int i; int i;
memset(info, 0, sizeof(*info)); memset(info, 0, sizeof(*info));
info->num_tc = 1;
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
int max_vf_vlan_filters = 0; int max_vf_vlan_filters = 0;
int max_vf_mac_filters = 0; int max_vf_mac_filters = 0;
info->num_tc = p_hwfn->hw_info.num_hw_tc;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
u16 num_queues = 0; u16 num_queues = 0;
...@@ -2248,6 +2249,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ...@@ -2248,6 +2249,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
} else { } else {
u16 total_cids = 0; u16 total_cids = 0;
info->num_tc = 1;
/* Determine queues & XDP support */ /* Determine queues & XDP support */
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
...@@ -2554,7 +2557,7 @@ static int qed_start_txq(struct qed_dev *cdev, ...@@ -2554,7 +2557,7 @@ static int qed_start_txq(struct qed_dev *cdev,
rc = qed_eth_tx_queue_start(p_hwfn, rc = qed_eth_tx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid, p_hwfn->hw_info.opaque_fid,
p_params, 0, p_params, p_params->tc,
pbl_addr, pbl_size, ret_params); pbl_addr, pbl_size, ret_params);
if (rc) { if (rc) {
......
...@@ -948,13 +948,14 @@ static void qed_update_pf_params(struct qed_dev *cdev, ...@@ -948,13 +948,14 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->eth_pf_params.num_arfs_filters = 0; params->eth_pf_params.num_arfs_filters = 0;
/* In case we might support RDMA, don't allow qede to be greedy /* In case we might support RDMA, don't allow qede to be greedy
* with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
* per hwfn.
*/ */
if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
u16 *num_cons; u16 *num_cons;
num_cons = &params->eth_pf_params.num_cons; num_cons = &params->eth_pf_params.num_cons;
*num_cons = min_t(u16, *num_cons, 192); *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
} }
for (i = 0; i < cdev->num_hwfns; i++) { for (i = 0; i < cdev->num_hwfns; i++) {
......
...@@ -52,6 +52,9 @@ ...@@ -52,6 +52,9 @@
#include <linux/qed/qed_chain.h> #include <linux/qed/qed_chain.h>
#include <linux/qed/qed_eth_if.h> #include <linux/qed/qed_eth_if.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#define QEDE_MAJOR_VERSION 8 #define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 33 #define QEDE_MINOR_VERSION 33
#define QEDE_REVISION_VERSION 0 #define QEDE_REVISION_VERSION 0
...@@ -386,6 +389,15 @@ struct qede_tx_queue { ...@@ -386,6 +389,15 @@ struct qede_tx_queue {
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \ #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
QEDE_MAX_TSS_CNT(edev)) QEDE_MAX_TSS_CNT(edev))
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
#define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \
((idx) % QEDE_TSS_COUNT(edev)))
#define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev))
#define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \
(txq)->cos) + (txq)->index)
#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \
(&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
[QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
/* Regular Tx requires skb + metadata for release purpose, /* Regular Tx requires skb + metadata for release purpose,
* while XDP requires the pages and the mapped address. * while XDP requires the pages and the mapped address.
...@@ -399,6 +411,8 @@ struct qede_tx_queue { ...@@ -399,6 +411,8 @@ struct qede_tx_queue {
/* Slowpath; Should be kept in end [unless missing padding] */ /* Slowpath; Should be kept in end [unless missing padding] */
void *handle; void *handle;
u16 cos;
u16 ndev_txq_id;
}; };
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
...@@ -458,7 +472,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc); ...@@ -458,7 +472,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
void qede_free_arfs(struct qede_dev *edev); void qede_free_arfs(struct qede_dev *edev);
int qede_alloc_arfs(struct qede_dev *edev); int qede_alloc_arfs(struct qede_dev *edev);
int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie);
int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd); int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
u32 *rule_locs); u32 *rule_locs);
...@@ -524,6 +538,8 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq); ...@@ -524,6 +538,8 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq); int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count); void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct tc_cls_flower_offload *f);
#define RX_RING_SIZE_POW 13 #define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
...@@ -541,5 +557,7 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); ...@@ -541,5 +557,7 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define QEDE_RX_HDR_SIZE 256 #define QEDE_RX_HDR_SIZE 256
#define QEDE_MAX_JUMBO_PACKET_SIZE 9600 #define QEDE_MAX_JUMBO_PACKET_SIZE 9600
#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++) #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#define for_each_cos_in_txq(edev, var) \
for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)
#endif /* _QEDE_H_ */ #endif /* _QEDE_H_ */
...@@ -222,7 +222,7 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev, ...@@ -222,7 +222,7 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev,
QEDE_TXQ_XDP_TO_IDX(edev, txq), QEDE_TXQ_XDP_TO_IDX(edev, txq),
qede_tqstats_arr[i].string); qede_tqstats_arr[i].string);
else else
sprintf(*buf, "%d: %s", txq->index, sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
qede_tqstats_arr[i].string); qede_tqstats_arr[i].string);
*buf += ETH_GSTRING_LEN; *buf += ETH_GSTRING_LEN;
} }
...@@ -262,8 +262,13 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) ...@@ -262,8 +262,13 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
if (fp->type & QEDE_FASTPATH_XDP) if (fp->type & QEDE_FASTPATH_XDP)
qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf); qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
if (fp->type & QEDE_FASTPATH_TX) if (fp->type & QEDE_FASTPATH_TX) {
qede_get_strings_stats_txq(edev, fp->txq, &buf); int cos;
for_each_cos_in_txq(edev, cos)
qede_get_strings_stats_txq(edev,
&fp->txq[cos], &buf);
}
} }
/* Account for non-queue statistics */ /* Account for non-queue statistics */
...@@ -338,8 +343,12 @@ static void qede_get_ethtool_stats(struct net_device *dev, ...@@ -338,8 +343,12 @@ static void qede_get_ethtool_stats(struct net_device *dev,
if (fp->type & QEDE_FASTPATH_XDP) if (fp->type & QEDE_FASTPATH_XDP)
qede_get_ethtool_stats_txq(fp->xdp_tx, &buf); qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
if (fp->type & QEDE_FASTPATH_TX) if (fp->type & QEDE_FASTPATH_TX) {
qede_get_ethtool_stats_txq(fp->txq, &buf); int cos;
for_each_cos_in_txq(edev, cos)
qede_get_ethtool_stats_txq(&fp->txq[cos], &buf);
}
} }
for (i = 0; i < QEDE_NUM_STATS; i++) { for (i = 0; i < QEDE_NUM_STATS; i++) {
...@@ -366,7 +375,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) ...@@ -366,7 +375,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
num_stats--; num_stats--;
/* Account for the Regular Tx statistics */ /* Account for the Regular Tx statistics */
num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS; num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS *
edev->dev_info.num_tc;
/* Account for the Regular Rx statistics */ /* Account for the Regular Rx statistics */
num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS; num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
...@@ -741,9 +751,17 @@ static int qede_get_coalesce(struct net_device *dev, ...@@ -741,9 +751,17 @@ static int qede_get_coalesce(struct net_device *dev,
} }
for_each_queue(i) { for_each_queue(i) {
struct qede_tx_queue *txq;
fp = &edev->fp_array[i]; fp = &edev->fp_array[i];
/* All TX queues of given fastpath uses same
* coalescing value, so no need to iterate over
* all TCs, TC0 txq should suffice.
*/
if (fp->type & QEDE_FASTPATH_TX) { if (fp->type & QEDE_FASTPATH_TX) {
tx_handle = fp->txq->handle; txq = QEDE_FP_TC0_TXQ(fp);
tx_handle = txq->handle;
break; break;
} }
} }
...@@ -801,9 +819,17 @@ static int qede_set_coalesce(struct net_device *dev, ...@@ -801,9 +819,17 @@ static int qede_set_coalesce(struct net_device *dev,
} }
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
struct qede_tx_queue *txq;
/* All TX queues of given fastpath uses same
* coalescing value, so no need to iterate over
* all TCs, TC0 txq should suffice.
*/
txq = QEDE_FP_TC0_TXQ(fp);
rc = edev->ops->common->set_coalesce(edev->cdev, rc = edev->ops->common->set_coalesce(edev->cdev,
0, txc, 0, txc,
fp->txq->handle); txq->handle);
if (rc) { if (rc) {
DP_INFO(edev, DP_INFO(edev,
"Set TX coalesce error, rc = %d\n", rc); "Set TX coalesce error, rc = %d\n", rc);
...@@ -1259,7 +1285,7 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) ...@@ -1259,7 +1285,7 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
rc = qede_add_cls_rule(edev, info); rc = qede_add_cls_rule(edev, info);
break; break;
case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLDEL:
rc = qede_del_cls_rule(edev, info); rc = qede_delete_flow_filter(edev, info->fs.location);
break; break;
default: default:
DP_INFO(edev, "Command parameters not supported\n"); DP_INFO(edev, "Command parameters not supported\n");
...@@ -1385,8 +1411,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1385,8 +1411,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
u16 val; u16 val;
for_each_queue(i) { for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { struct qede_fastpath *fp = &edev->fp_array[i];
txq = edev->fp_array[i].txq;
if (fp->type & QEDE_FASTPATH_TX) {
txq = QEDE_FP_TC0_TXQ(fp);
break; break;
} }
} }
......
...@@ -83,7 +83,7 @@ struct qede_arfs_fltr_node { ...@@ -83,7 +83,7 @@ struct qede_arfs_fltr_node {
struct qede_arfs_tuple tuple; struct qede_arfs_tuple tuple;
u32 flow_id; u32 flow_id;
u16 sw_id; u64 sw_id;
u16 rxq_id; u16 rxq_id;
u16 next_rxq_id; u16 next_rxq_id;
u8 vfid; u8 vfid;
...@@ -138,7 +138,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, ...@@ -138,7 +138,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
n->tuple.stringify(&n->tuple, tuple_buffer); n->tuple.stringify(&n->tuple, tuple_buffer);
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
"%s sw_id[0x%x]: %s [vf %u queue %d]\n", "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
add_fltr ? "Adding" : "Deleting", add_fltr ? "Adding" : "Deleting",
n->sw_id, tuple_buffer, n->vfid, rxq_id); n->sw_id, tuple_buffer, n->vfid, rxq_id);
} }
...@@ -152,7 +152,10 @@ static void ...@@ -152,7 +152,10 @@ static void
qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
{ {
kfree(fltr->data); kfree(fltr->data);
clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
kfree(fltr); kfree(fltr);
} }
...@@ -214,7 +217,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) ...@@ -214,7 +217,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
if (fw_rc) { if (fw_rc) {
DP_NOTICE(edev, DP_NOTICE(edev,
"Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
fw_rc, fltr->flow_id, fltr->sw_id, fw_rc, fltr->flow_id, fltr->sw_id,
ntohs(fltr->tuple.src_port), ntohs(fltr->tuple.src_port),
ntohs(fltr->tuple.dst_port), fltr->rxq_id); ntohs(fltr->tuple.dst_port), fltr->rxq_id);
...@@ -1348,7 +1351,7 @@ void qede_config_rx_mode(struct net_device *ndev) ...@@ -1348,7 +1351,7 @@ void qede_config_rx_mode(struct net_device *ndev)
} }
static struct qede_arfs_fltr_node * static struct qede_arfs_fltr_node *
qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
{ {
struct qede_arfs_fltr_node *fltr; struct qede_arfs_fltr_node *fltr;
...@@ -1599,6 +1602,69 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev, ...@@ -1599,6 +1602,69 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev,
return 0; return 0;
} }
static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
struct qede_arfs_tuple *t)
{
/* We must have Only 4-tuples/l4 port/src ip/dst ip
* as an input.
*/
if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
} else if (!t->src_port && t->dst_port &&
!t->src_ipv4 && !t->dst_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
} else if (!t->src_port && !t->dst_port &&
!t->dst_ipv4 && t->src_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
} else if (!t->src_port && !t->dst_port &&
t->dst_ipv4 && !t->src_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
} else {
DP_INFO(edev, "Invalid N-tuple\n");
return -EOPNOTSUPP;
}
t->ip_comp = qede_flow_spec_ipv4_cmp;
t->build_hdr = qede_flow_build_ipv4_hdr;
t->stringify = qede_flow_stringify_ipv4_hdr;
return 0;
}
static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
struct qede_arfs_tuple *t,
struct in6_addr *zaddr)
{
/* We must have Only 4-tuples/l4 port/src ip/dst ip
* as an input.
*/
if (t->src_port && t->dst_port &&
memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
} else if (!t->src_port && t->dst_port &&
!memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
!memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
} else if (!t->src_port && !t->dst_port &&
!memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
} else if (!t->src_port && !t->dst_port &&
memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
!memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
} else {
DP_INFO(edev, "Invalid N-tuple\n");
return -EOPNOTSUPP;
}
t->ip_comp = qede_flow_spec_ipv6_cmp;
t->build_hdr = qede_flow_build_ipv6_hdr;
return 0;
}
static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
struct qede_arfs_tuple *t, struct qede_arfs_tuple *t,
struct ethtool_rx_flow_spec *fs) struct ethtool_rx_flow_spec *fs)
...@@ -1638,27 +1704,7 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, ...@@ -1638,27 +1704,7 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
t->src_port = fs->h_u.tcp_ip4_spec.psrc; t->src_port = fs->h_u.tcp_ip4_spec.psrc;
t->dst_port = fs->h_u.tcp_ip4_spec.pdst; t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
/* We must either have a valid 4-tuple or only dst port return qede_set_v4_tuple_to_profile(edev, t);
* or only src ip as an input
*/
if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
} else if (!t->src_port && t->dst_port &&
!t->src_ipv4 && !t->dst_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
} else if (!t->src_port && !t->dst_port &&
!t->dst_ipv4 && t->src_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
} else {
DP_INFO(edev, "Invalid N-tuple\n");
return -EOPNOTSUPP;
}
t->ip_comp = qede_flow_spec_ipv4_cmp;
t->build_hdr = qede_flow_build_ipv4_hdr;
t->stringify = qede_flow_stringify_ipv4_hdr;
return 0;
} }
static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev, static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
...@@ -1690,10 +1736,8 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, ...@@ -1690,10 +1736,8 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
struct ethtool_rx_flow_spec *fs) struct ethtool_rx_flow_spec *fs)
{ {
struct in6_addr zero_addr; struct in6_addr zero_addr;
void *p;
p = &zero_addr; memset(&zero_addr, 0, sizeof(zero_addr));
memset(p, 0, sizeof(zero_addr));
if ((fs->h_u.tcp_ip6_spec.psrc & if ((fs->h_u.tcp_ip6_spec.psrc &
fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) { fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
...@@ -1720,30 +1764,7 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, ...@@ -1720,30 +1764,7 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
t->src_port = fs->h_u.tcp_ip6_spec.psrc; t->src_port = fs->h_u.tcp_ip6_spec.psrc;
t->dst_port = fs->h_u.tcp_ip6_spec.pdst; t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
/* We must make sure we have a valid 4-tuple or only dest port return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
* or only src ip as an input
*/
if (t->src_port && t->dst_port &&
memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
} else if (!t->src_port && t->dst_port &&
!memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
!memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
} else if (!t->src_port && !t->dst_port &&
!memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) &&
memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
} else {
DP_INFO(edev, "Invalid N-tuple\n");
return -EOPNOTSUPP;
}
t->ip_comp = qede_flow_spec_ipv6_cmp;
t->build_hdr = qede_flow_build_ipv6_hdr;
return 0;
} }
static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev, static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
...@@ -1941,9 +1962,8 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) ...@@ -1941,9 +1962,8 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
return rc; return rc;
} }
int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
{ {
struct ethtool_rx_flow_spec *fsp = &info->fs;
struct qede_arfs_fltr_node *fltr = NULL; struct qede_arfs_fltr_node *fltr = NULL;
int rc = -EPERM; int rc = -EPERM;
...@@ -1952,7 +1972,7 @@ int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) ...@@ -1952,7 +1972,7 @@ int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
goto unlock; goto unlock;
fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
fsp->location); cookie);
if (!fltr) if (!fltr)
goto unlock; goto unlock;
...@@ -1982,3 +2002,293 @@ int qede_get_arfs_filter_count(struct qede_dev *edev) ...@@ -1982,3 +2002,293 @@ int qede_get_arfs_filter_count(struct qede_dev *edev)
__qede_unlock(edev); __qede_unlock(edev);
return count; return count;
} }
static int qede_parse_actions(struct qede_dev *edev,
struct tcf_exts *exts)
{
int rc = -EINVAL, num_act = 0;
const struct tc_action *a;
bool is_drop = false;
LIST_HEAD(actions);
if (!tcf_exts_has_actions(exts)) {
DP_NOTICE(edev, "No tc actions received\n");
return rc;
}
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
num_act++;
if (is_tcf_gact_shot(a))
is_drop = true;
}
if (num_act == 1 && is_drop)
return 0;
return rc;
}
static int
qede_tc_parse_ports(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *t)
{
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key, *mask;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_PORTS,
f->key);
mask = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_PORTS,
f->mask);
if ((key->src && mask->src != U16_MAX) ||
(key->dst && mask->dst != U16_MAX)) {
DP_NOTICE(edev, "Do not support ports masks\n");
return -EINVAL;
}
t->src_port = key->src;
t->dst_port = key->dst;
}
return 0;
}
static int
qede_tc_parse_v6_common(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *t)
{
struct in6_addr zero_addr, addr;
memset(&zero_addr, 0, sizeof(addr));
memset(&addr, 0xff, sizeof(addr));
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_dissector_key_ipv6_addrs *key, *mask;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->key);
mask = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->mask);
if ((memcmp(&key->src, &zero_addr, sizeof(addr)) &&
memcmp(&mask->src, &addr, sizeof(addr))) ||
(memcmp(&key->dst, &zero_addr, sizeof(addr)) &&
memcmp(&mask->dst, &addr, sizeof(addr)))) {
DP_NOTICE(edev,
"Do not support IPv6 address prefix/mask\n");
return -EINVAL;
}
memcpy(&t->src_ipv6, &key->src, sizeof(addr));
memcpy(&t->dst_ipv6, &key->dst, sizeof(addr));
}
if (qede_tc_parse_ports(edev, f, t))
return -EINVAL;
return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
}
static int
qede_tc_parse_v4_common(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *t)
{
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_dissector_key_ipv4_addrs *key, *mask;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->key);
mask = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->mask);
if ((key->src && mask->src != U32_MAX) ||
(key->dst && mask->dst != U32_MAX)) {
DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
return -EINVAL;
}
t->src_ipv4 = key->src;
t->dst_ipv4 = key->dst;
}
if (qede_tc_parse_ports(edev, f, t))
return -EINVAL;
return qede_set_v4_tuple_to_profile(edev, t);
}
static int
qede_tc_parse_tcp_v6(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IPV6);
return qede_tc_parse_v6_common(edev, f, tuple);
}
static int
qede_tc_parse_tcp_v4(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IP);
return qede_tc_parse_v4_common(edev, f, tuple);
}
static int
qede_tc_parse_udp_v6(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IPV6);
return qede_tc_parse_v6_common(edev, f, tuple);
}
static int
qede_tc_parse_udp_v4(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IP);
return qede_tc_parse_v4_common(edev, f, tuple);
}
static int
qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
int rc = -EINVAL;
u8 ip_proto = 0;
memset(tuple, 0, sizeof(*tuple));
if (f->dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
DP_NOTICE(edev, "Unsupported key set:0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
}
if (proto != htons(ETH_P_IP) &&
proto != htons(ETH_P_IPV6)) {
DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
return -EPROTONOSUPPORT;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
f->key);
ip_proto = key->ip_proto;
}
if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
rc = qede_tc_parse_tcp_v4(edev, f, tuple);
else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
rc = qede_tc_parse_tcp_v6(edev, f, tuple);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
rc = qede_tc_parse_udp_v4(edev, f, tuple);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
rc = qede_tc_parse_udp_v6(edev, f, tuple);
else
DP_NOTICE(edev, "Invalid tc protocol request\n");
return rc;
}
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct tc_cls_flower_offload *f)
{
struct qede_arfs_fltr_node *n;
int min_hlen, rc = -EINVAL;
struct qede_arfs_tuple t;
__qede_lock(edev);
if (!edev->arfs) {
rc = -EPERM;
goto unlock;
}
/* parse flower attribute and prepare filter */
if (qede_parse_flower_attr(edev, proto, f, &t))
goto unlock;
/* Validate profile mode and number of filters */
if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
DP_NOTICE(edev,
"Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
t.mode, edev->arfs->mode, edev->arfs->filter_count);
goto unlock;
}
/* parse tc actions and get the vf_id */
if (qede_parse_actions(edev, f->exts))
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
rc = -EEXIST;
goto unlock;
}
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n) {
rc = -ENOMEM;
goto unlock;
}
min_hlen = qede_flow_get_min_header_size(&t);
n->data = kzalloc(min_hlen, GFP_KERNEL);
if (!n->data) {
kfree(n);
rc = -ENOMEM;
goto unlock;
}
memcpy(&n->tuple, &t, sizeof(n->tuple));
n->buf_len = min_hlen;
n->b_is_drop = true;
n->sw_id = f->cookie;
n->tuple.build_hdr(&n->tuple, n->data);
rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
if (rc)
goto unlock;
qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
rc = qede_poll_arfs_filter_config(edev, n);
unlock:
__qede_unlock(edev);
return rc;
}
...@@ -408,12 +408,12 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -408,12 +408,12 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{ {
unsigned int pkts_compl = 0, bytes_compl = 0;
struct netdev_queue *netdev_txq; struct netdev_queue *netdev_txq;
u16 hw_bd_cons; u16 hw_bd_cons;
unsigned int pkts_compl = 0, bytes_compl = 0;
int rc; int rc;
netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier(); barrier();
...@@ -1365,9 +1365,14 @@ static bool qede_poll_is_more_work(struct qede_fastpath *fp) ...@@ -1365,9 +1365,14 @@ static bool qede_poll_is_more_work(struct qede_fastpath *fp)
if (qede_txq_has_work(fp->xdp_tx)) if (qede_txq_has_work(fp->xdp_tx))
return true; return true;
if (likely(fp->type & QEDE_FASTPATH_TX)) if (likely(fp->type & QEDE_FASTPATH_TX)) {
if (qede_txq_has_work(fp->txq)) int cos;
return true;
for_each_cos_in_txq(fp->edev, cos) {
if (qede_txq_has_work(&fp->txq[cos]))
return true;
}
}
return false; return false;
} }
...@@ -1382,8 +1387,14 @@ int qede_poll(struct napi_struct *napi, int budget) ...@@ -1382,8 +1387,14 @@ int qede_poll(struct napi_struct *napi, int budget)
struct qede_dev *edev = fp->edev; struct qede_dev *edev = fp->edev;
int rx_work_done = 0; int rx_work_done = 0;
if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) if (likely(fp->type & QEDE_FASTPATH_TX)) {
qede_tx_int(edev, fp->txq); int cos;
for_each_cos_in_txq(fp->edev, cos) {
if (qede_txq_has_work(&fp->txq[cos]))
qede_tx_int(edev, &fp->txq[cos]);
}
}
if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
qede_xdp_tx_int(edev, fp->xdp_tx); qede_xdp_tx_int(edev, fp->xdp_tx);
...@@ -1444,8 +1455,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1444,8 +1455,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Get tx-queue context and netdev index */ /* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb); txq_index = skb_get_queue_mapping(skb);
WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index); netdev_txq = netdev_get_tx_queue(ndev, txq_index);
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
......
...@@ -536,6 +536,97 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ...@@ -536,6 +536,97 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0; return 0;
} }
int qede_setup_tc(struct net_device *ndev, u8 num_tc)
{
struct qede_dev *edev = netdev_priv(ndev);
int cos, count, offset;
if (num_tc > edev->dev_info.num_tc)
return -EINVAL;
netdev_reset_tc(ndev);
netdev_set_num_tc(ndev, num_tc);
for_each_cos_in_txq(edev, cos) {
count = QEDE_TSS_COUNT(edev);
offset = cos * QEDE_TSS_COUNT(edev);
netdev_set_tc_queue(ndev, cos, count, offset);
}
return 0;
}
static int
qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
__be16 proto)
{
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
return qede_add_tc_flower_fltr(edev, proto, f);
case TC_CLSFLOWER_DESTROY:
return qede_delete_flow_filter(edev, f->cookie);
default:
return -EOPNOTSUPP;
}
}
static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct tc_cls_flower_offload *f;
struct qede_dev *edev = cb_priv;
if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
f = type_data;
return qede_set_flower(edev, f, f->common.protocol);
default:
return -EOPNOTSUPP;
}
}
static int qede_setup_tc_block(struct qede_dev *edev,
struct tc_block_offload *f)
{
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
qede_setup_tc_block_cb,
edev, edev, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int
qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct qede_dev *edev = netdev_priv(dev);
struct tc_mqprio_qopt *mqprio;
switch (type) {
case TC_SETUP_BLOCK:
return qede_setup_tc_block(edev, type_data);
case TC_SETUP_QDISC_MQPRIO:
mqprio = type_data;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
return qede_setup_tc(dev, mqprio->num_tc);
default:
return -EOPNOTSUPP;
}
}
static const struct net_device_ops qede_netdev_ops = { static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open, .ndo_open = qede_open,
.ndo_stop = qede_close, .ndo_stop = qede_close,
...@@ -568,6 +659,7 @@ static const struct net_device_ops qede_netdev_ops = { ...@@ -568,6 +659,7 @@ static const struct net_device_ops qede_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = qede_rx_flow_steer, .ndo_rx_flow_steer = qede_rx_flow_steer,
#endif #endif
.ndo_setup_tc = qede_setup_tc_offload,
}; };
static const struct net_device_ops qede_netdev_vf_ops = { static const struct net_device_ops qede_netdev_vf_ops = {
...@@ -621,7 +713,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, ...@@ -621,7 +713,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
struct qede_dev *edev; struct qede_dev *edev;
ndev = alloc_etherdev_mqs(sizeof(*edev), ndev = alloc_etherdev_mqs(sizeof(*edev),
info->num_queues, info->num_queues); info->num_queues * info->num_tc,
info->num_queues);
if (!ndev) { if (!ndev) {
pr_err("etherdev allocation failed\n"); pr_err("etherdev allocation failed\n");
return NULL; return NULL;
...@@ -688,7 +781,7 @@ static void qede_init_ndev(struct qede_dev *edev) ...@@ -688,7 +781,7 @@ static void qede_init_ndev(struct qede_dev *edev)
/* user-changeble features */ /* user-changeble features */
hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG | hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6; NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
hw_features |= NETIF_F_NTUPLE; hw_features |= NETIF_F_NTUPLE;
...@@ -830,7 +923,8 @@ static int qede_alloc_fp_array(struct qede_dev *edev) ...@@ -830,7 +923,8 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
} }
if (fp->type & QEDE_FASTPATH_TX) { if (fp->type & QEDE_FASTPATH_TX) {
fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL); fp->txq = kcalloc(edev->dev_info.num_tc,
sizeof(*fp->txq), GFP_KERNEL);
if (!fp->txq) if (!fp->txq)
goto err; goto err;
} }
...@@ -879,10 +973,15 @@ static void qede_sp_task(struct work_struct *work) ...@@ -879,10 +973,15 @@ static void qede_sp_task(struct work_struct *work)
static void qede_update_pf_params(struct qed_dev *cdev) static void qede_update_pf_params(struct qed_dev *cdev)
{ {
struct qed_pf_params pf_params; struct qed_pf_params pf_params;
u16 num_cons;
/* 64 rx + 64 tx + 64 XDP */ /* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params)); memset(&pf_params, 0, sizeof(struct qed_pf_params));
pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
/* 1 rx + 1 xdp + max tx cos */
num_cons = QED_MIN_L2_CONS;
pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
/* Same for VFs - make sure they'll have sufficient connections /* Same for VFs - make sure they'll have sufficient connections
* to support XDP Tx queues. * to support XDP Tx queues.
...@@ -1363,8 +1462,12 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) ...@@ -1363,8 +1462,12 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
if (fp->type & QEDE_FASTPATH_XDP) if (fp->type & QEDE_FASTPATH_XDP)
qede_free_mem_txq(edev, fp->xdp_tx); qede_free_mem_txq(edev, fp->xdp_tx);
if (fp->type & QEDE_FASTPATH_TX) if (fp->type & QEDE_FASTPATH_TX) {
qede_free_mem_txq(edev, fp->txq); int cos;
for_each_cos_in_txq(edev, cos)
qede_free_mem_txq(edev, &fp->txq[cos]);
}
} }
/* This function allocates all memory needed for a single fp (i.e. an entity /* This function allocates all memory needed for a single fp (i.e. an entity
...@@ -1391,9 +1494,13 @@ static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) ...@@ -1391,9 +1494,13 @@ static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
} }
if (fp->type & QEDE_FASTPATH_TX) { if (fp->type & QEDE_FASTPATH_TX) {
rc = qede_alloc_mem_txq(edev, fp->txq); int cos;
if (rc)
goto out; for_each_cos_in_txq(edev, cos) {
rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
if (rc)
goto out;
}
} }
out: out:
...@@ -1466,10 +1573,23 @@ static void qede_init_fp(struct qede_dev *edev) ...@@ -1466,10 +1573,23 @@ static void qede_init_fp(struct qede_dev *edev)
} }
if (fp->type & QEDE_FASTPATH_TX) { if (fp->type & QEDE_FASTPATH_TX) {
fp->txq->index = txq_index++; int cos;
if (edev->dev_info.is_legacy)
fp->txq->is_legacy = 1; for_each_cos_in_txq(edev, cos) {
fp->txq->dev = &edev->pdev->dev; struct qede_tx_queue *txq = &fp->txq[cos];
u16 ndev_tx_id;
txq->cos = cos;
txq->index = txq_index;
ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
txq->ndev_txq_id = ndev_tx_id;
if (edev->dev_info.is_legacy)
txq->is_legacy = 1;
txq->dev = &edev->pdev->dev;
}
txq_index++;
} }
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
...@@ -1483,7 +1603,9 @@ static int qede_set_real_num_queues(struct qede_dev *edev) ...@@ -1483,7 +1603,9 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
{ {
int rc = 0; int rc = 0;
rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev)); rc = netif_set_real_num_tx_queues(edev->ndev,
QEDE_TSS_COUNT(edev) *
edev->dev_info.num_tc);
if (rc) { if (rc) {
DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
return rc; return rc;
...@@ -1685,9 +1807,13 @@ static int qede_stop_queues(struct qede_dev *edev) ...@@ -1685,9 +1807,13 @@ static int qede_stop_queues(struct qede_dev *edev)
fp = &edev->fp_array[i]; fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) { if (fp->type & QEDE_FASTPATH_TX) {
rc = qede_drain_txq(edev, fp->txq, true); int cos;
if (rc)
return rc; for_each_cos_in_txq(edev, cos) {
rc = qede_drain_txq(edev, &fp->txq[cos], true);
if (rc)
return rc;
}
} }
if (fp->type & QEDE_FASTPATH_XDP) { if (fp->type & QEDE_FASTPATH_XDP) {
...@@ -1703,9 +1829,13 @@ static int qede_stop_queues(struct qede_dev *edev) ...@@ -1703,9 +1829,13 @@ static int qede_stop_queues(struct qede_dev *edev)
/* Stop the Tx Queue(s) */ /* Stop the Tx Queue(s) */
if (fp->type & QEDE_FASTPATH_TX) { if (fp->type & QEDE_FASTPATH_TX) {
rc = qede_stop_txq(edev, fp->txq, i); int cos;
if (rc)
return rc; for_each_cos_in_txq(edev, cos) {
rc = qede_stop_txq(edev, &fp->txq[cos], i);
if (rc)
return rc;
}
} }
/* Stop the Rx Queue */ /* Stop the Rx Queue */
...@@ -1758,6 +1888,7 @@ static int qede_start_txq(struct qede_dev *edev, ...@@ -1758,6 +1888,7 @@ static int qede_start_txq(struct qede_dev *edev,
params.p_sb = fp->sb_info; params.p_sb = fp->sb_info;
params.sb_idx = sb_idx; params.sb_idx = sb_idx;
params.tc = txq->cos;
rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table, rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
page_cnt, &ret_params); page_cnt, &ret_params);
...@@ -1877,9 +2008,14 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) ...@@ -1877,9 +2008,14 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
} }
if (fp->type & QEDE_FASTPATH_TX) { if (fp->type & QEDE_FASTPATH_TX) {
rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0)); int cos;
if (rc)
goto out; for_each_cos_in_txq(edev, cos) {
rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
TX_PI(cos));
if (rc)
goto out;
}
} }
} }
...@@ -1973,6 +2109,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, ...@@ -1973,6 +2109,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked) bool is_locked)
{ {
struct qed_link_params link_params; struct qed_link_params link_params;
u8 num_tc;
int rc; int rc;
DP_INFO(edev, "Starting qede load\n"); DP_INFO(edev, "Starting qede load\n");
...@@ -2019,6 +2156,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, ...@@ -2019,6 +2156,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
goto err4; goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
num_tc = netdev_get_num_tc(edev->ndev);
num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
qede_setup_tc(edev->ndev, num_tc);
/* Program un-configured VLANs */ /* Program un-configured VLANs */
qede_configure_vlan_filters(edev); qede_configure_vlan_filters(edev);
...@@ -2143,7 +2284,7 @@ static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -2143,7 +2284,7 @@ static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{ {
struct netdev_queue *netdev_txq; struct netdev_queue *netdev_txq;
netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
if (netif_xmit_stopped(netdev_txq)) if (netif_xmit_stopped(netdev_txq))
return true; return true;
...@@ -2208,9 +2349,11 @@ static void qede_get_eth_tlv_data(void *dev, void *data) ...@@ -2208,9 +2349,11 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
for_each_queue(i) { for_each_queue(i) {
fp = &edev->fp_array[i]; fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) { if (fp->type & QEDE_FASTPATH_TX) {
if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod) struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
if (txq->sw_tx_cons != txq->sw_tx_prod)
etlv->txqs_empty = false; etlv->txqs_empty = false;
if (qede_is_txq_full(edev, fp->txq)) if (qede_is_txq_full(edev, txq))
etlv->num_txqs_full++; etlv->num_txqs_full++;
} }
if (fp->type & QEDE_FASTPATH_RX) { if (fp->type & QEDE_FASTPATH_RX) {
......
...@@ -39,6 +39,10 @@ ...@@ -39,6 +39,10 @@
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h> #include <linux/qed/qed_iov_if.h>
/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */
#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2)
#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS))
struct qed_queue_start_common_params { struct qed_queue_start_common_params {
/* Should always be relative to entity sending this. */ /* Should always be relative to entity sending this. */
u8 vport_id; u8 vport_id;
...@@ -49,6 +53,8 @@ struct qed_queue_start_common_params { ...@@ -49,6 +53,8 @@ struct qed_queue_start_common_params {
struct qed_sb_info *p_sb; struct qed_sb_info *p_sb;
u8 sb_idx; u8 sb_idx;
u8 tc;
}; };
struct qed_rxq_start_ret_params { struct qed_rxq_start_ret_params {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment