Commit 2b04a661 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-add-mirror-action-support-for-TC-MATCHALL'

Rahul Lakkireddy says:

====================
cxgb4: add mirror action support for TC-MATCHALL

This series of patches add support to mirror all ingress traffic
for TC-MATCHALL ingress offload.

Patch 1 adds support to dynamically create a mirror Virtual Interface
(VI) that accepts all mirror ingress traffic when mirror action is
set in TC-MATCHALL offload.

Patch 2 adds support to allocate mirror Rxqs and setup RSS for the
mirror VI.

Patch 3 adds support to replicate all the main VI configuration to
mirror VI. This includes replicating MTU, promiscuous mode,
all-multicast mode, and enabled netdev Rx feature offloads.

v3:
- Replace mirror VI refcount_t with normal u32 variable in all patches.
- Add back calling cxgb4_port_mirror_start() in cxgb_open(), which
  was there in v1, but got missed in v2 during refactoring, in patch
  3.

v2:
- Add mutex to protect all mirror VI data, instead of just
  mirror Rxqs, in patch 1 and 2.
- Remove the un-needed mirror Rxq mutex in patch 2.
- Simplify the replication code by refactoring t4_set_rxmode()
  to handle mirror VI, instead of duplicating the t4_set_rxmode()
  calls in multiple places in patch 3.
====================
Reviewed-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 75603a31 696c278f
......@@ -679,6 +679,12 @@ struct port_info {
u8 rx_cchan;
bool tc_block_shared;
/* Mirror VI information */
u16 viid_mirror;
u16 nmirrorqsets;
u32 vi_mirror_count;
struct mutex vi_mirror_mutex; /* Sync access to Mirror VI info */
};
struct dentry;
......@@ -705,6 +711,13 @@ enum {
ULP_CRYPTO_KTLS_INLINE = 1 << 3,
};
#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM 1024
#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE 64
#define CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC 5
#define CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT 8
#define CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM 72
struct rx_sw_desc;
struct sge_fl { /* SGE free-buffer queue state */
......@@ -954,12 +967,15 @@ struct sge {
struct sge_eohw_txq *eohw_txq;
struct sge_ofld_rxq *eohw_rxq;
struct sge_eth_rxq *mirror_rxq[NCHAN];
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active ofld queue sets */
u16 nqs_per_uld; /* # of Rx queues per ULD */
u16 eoqsets; /* # of ETHOFLD queues */
u16 mirrorqsets; /* # of Mirror queues */
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
......@@ -1857,6 +1873,8 @@ int t4_init_rss_mode(struct adapter *adap, int mbox);
int t4_init_portinfo(struct port_info *pi, int mbox,
int port, int pf, int vf, u8 mac[]);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
u16 *mirror_viid);
void t4_fatal_err(struct adapter *adapter);
unsigned int t4_chip_rss_size(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
......@@ -1966,8 +1984,8 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int viid);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
unsigned int viid_mirror, int mtu, int promisc, int all_multi,
int bcast, int vlanex, bool sleep_ok);
int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
const u8 *addr, const u8 *mask, unsigned int idx,
u8 lookup_type, u8 port_id, bool sleep_ok);
......@@ -2141,6 +2159,8 @@ int cxgb_open(struct net_device *dev);
int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
int cxgb4_port_mirror_alloc(struct net_device *dev);
void cxgb4_port_mirror_free(struct net_device *dev);
#ifdef CONFIG_CHELSIO_TLS_DEVICE
int cxgb4_set_ktls_feature(struct adapter *adap, bool enable);
#endif
......
......@@ -2742,6 +2742,58 @@ do { \
}
r -= eth_entries;
for_each_port(adap, j) {
struct port_info *pi = adap2pinfo(adap, j);
const struct sge_eth_rxq *rx;
mutex_lock(&pi->vi_mirror_mutex);
if (!pi->vi_mirror_count) {
mutex_unlock(&pi->vi_mirror_mutex);
continue;
}
if (r >= DIV_ROUND_UP(pi->nmirrorqsets, 4)) {
r -= DIV_ROUND_UP(pi->nmirrorqsets, 4);
mutex_unlock(&pi->vi_mirror_mutex);
continue;
}
rx = &s->mirror_rxq[j][r * 4];
n = min(4, pi->nmirrorqsets - 4 * r);
S("QType:", "Mirror-Rxq");
S("Interface:",
rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
R("FL size:", fl.size - 8);
R("FL pend:", fl.pend_cred);
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
RL("RxPackets:", stats.pkts);
RL("RxCSO:", stats.rx_cso);
RL("VLANxtract:", stats.vlan_ex);
RL("LROmerged:", stats.lro_merged);
RL("LROpackets:", stats.lro_pkts);
RL("RxDrops:", stats.rx_drops);
RL("RxBadPkts:", stats.bad_rx_pkts);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLMapErr:", fl.mapping_err);
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
mutex_unlock(&pi->vi_mirror_mutex);
goto out;
}
if (!adap->tc_mqprio)
goto skip_mqprio;
......@@ -3098,9 +3150,10 @@ do { \
return 0;
}
static int sge_queue_entries(const struct adapter *adap)
static int sge_queue_entries(struct adapter *adap)
{
int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0;
int mirror_rxq_entries = 0;
if (adap->tc_mqprio) {
struct cxgb4_tc_port_mqprio *port_mqprio;
......@@ -3123,6 +3176,15 @@ static int sge_queue_entries(const struct adapter *adap)
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
}
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
mutex_lock(&pi->vi_mirror_mutex);
if (pi->vi_mirror_count)
mirror_rxq_entries += DIV_ROUND_UP(pi->nmirrorqsets, 4);
mutex_unlock(&pi->vi_mirror_mutex);
}
if (!is_uld(adap))
goto lld_only;
......@@ -3137,7 +3199,7 @@ static int sge_queue_entries(const struct adapter *adap)
mutex_unlock(&uld_mutex);
lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
return DIV_ROUND_UP(adap->sge.ethqsets, 4) + mirror_rxq_entries +
eohw_entries + eosw_entries + tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
......
......@@ -435,8 +435,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
__dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
__dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
}
......@@ -503,15 +503,16 @@ int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
*/
static int link_start(struct net_device *dev)
{
int ret;
struct port_info *pi = netdev_priv(dev);
unsigned int mb = pi->adapter->pf;
unsigned int mb = pi->adapter->mbox;
int ret;
/*
* We do not set address filters and promiscuity here, the stack does
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
dev->mtu, -1, -1, -1,
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (ret == 0)
ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
......@@ -822,6 +823,31 @@ static void adap_config_hpfilter(struct adapter *adapter)
"HP filter region isn't supported by FW\n");
}
static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
u16 rss_size, u16 viid)
{
struct adapter *adap = pi->adapter;
int ret;
ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
rss_size);
if (ret)
return ret;
/* If Tunnel All Lookup isn't specified in the global RSS
* Configuration, then we need to specify a default Ingress
* Queue for any ingress packets which aren't hashed. We'll
* use our first ingress queue ...
*/
return t4_config_vi_rss(adap, adap->mbox, viid,
FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
FW_RSS_VI_CONFIG_CMD_UDPEN_F,
rss[0]);
}
/**
* cxgb4_write_rss - write the RSS table for a given port
* @pi: the port
......@@ -833,10 +859,10 @@ static void adap_config_hpfilter(struct adapter *adapter)
*/
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
{
u16 *rss;
int i, err;
struct adapter *adapter = pi->adapter;
const struct sge_eth_rxq *rxq;
int i, err;
u16 *rss;
rxq = &adapter->sge.ethrxq[pi->first_qset];
rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
......@@ -847,21 +873,7 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
for (i = 0; i < pi->rss_size; i++, queues++)
rss[i] = rxq[*queues].rspq.abs_id;
err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
pi->rss_size, rss, pi->rss_size);
/* If Tunnel All Lookup isn't specified in the global RSS
* Configuration, then we need to specify a default Ingress
* Queue for any ingress packets which aren't hashed. We'll
* use our first ingress queue ...
*/
if (!err)
err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
FW_RSS_VI_CONFIG_CMD_UDPEN_F,
rss[0]);
err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
kfree(rss);
return err;
}
......@@ -1259,15 +1271,15 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
const struct port_info *pi = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
const struct port_info *pi = netdev_priv(dev);
int err;
if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
-1, -1, -1,
err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
pi->viid_mirror, -1, -1, -1, -1,
!!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err))
dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
......@@ -1285,6 +1297,292 @@ static int setup_debugfs(struct adapter *adap)
return 0;
}
static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
struct sge_eth_rxq *mirror_rxq)
{
if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
!(adap->flags & CXGB4_SHUTTING_DOWN))
cxgb4_quiesce_rx(&mirror_rxq->rspq);
if (adap->flags & CXGB4_USING_MSIX) {
cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
mirror_rxq->msix->aff_mask);
free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
}
free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
}
static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct sge_eth_rxq *mirror_rxq;
struct sge *s = &adap->sge;
int ret = 0, msix = 0;
u16 i, rxqid;
u16 *rss;
if (!pi->vi_mirror_count)
return 0;
if (s->mirror_rxq[pi->port_id])
return 0;
mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
if (!mirror_rxq)
return -ENOMEM;
s->mirror_rxq[pi->port_id] = mirror_rxq;
if (!(adap->flags & CXGB4_USING_MSIX))
msix = -((int)adap->sge.intrq.abs_id + 1);
for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
mirror_rxq = &s->mirror_rxq[pi->port_id][i];
/* Allocate Mirror Rxqs */
if (msix >= 0) {
msix = cxgb4_get_msix_idx_from_bmap(adap);
if (msix < 0) {
ret = msix;
goto out_free_queues;
}
mirror_rxq->msix = &adap->msix_info[msix];
snprintf(mirror_rxq->msix->desc,
sizeof(mirror_rxq->msix->desc),
"%s-mirrorrxq%d", dev->name, i);
}
init_rspq(adap, &mirror_rxq->rspq,
CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
dev, msix, &mirror_rxq->fl,
t4_ethrx_handler, NULL, 0);
if (ret)
goto out_free_msix_idx;
/* Setup MSI-X vectors for Mirror Rxqs */
if (adap->flags & CXGB4_USING_MSIX) {
ret = request_irq(mirror_rxq->msix->vec,
t4_sge_intr_msix, 0,
mirror_rxq->msix->desc,
&mirror_rxq->rspq);
if (ret)
goto out_free_rxq;
cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
&mirror_rxq->msix->aff_mask, i);
}
/* Start NAPI for Mirror Rxqs */
cxgb4_enable_rx(adap, &mirror_rxq->rspq);
}
/* Setup RSS for Mirror Rxqs */
rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
if (!rss) {
ret = -ENOMEM;
goto out_free_queues;
}
mirror_rxq = &s->mirror_rxq[pi->port_id][0];
for (i = 0; i < pi->rss_size; i++)
rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
kfree(rss);
if (ret)
goto out_free_queues;
return 0;
out_free_rxq:
free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
out_free_msix_idx:
cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
out_free_queues:
while (rxqid-- > 0)
cxgb4_port_mirror_free_rxq(adap,
&s->mirror_rxq[pi->port_id][rxqid]);
kfree(s->mirror_rxq[pi->port_id]);
s->mirror_rxq[pi->port_id] = NULL;
return ret;
}
static void cxgb4_port_mirror_free_queues(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct sge *s = &adap->sge;
u16 i;
if (!pi->vi_mirror_count)
return;
if (!s->mirror_rxq[pi->port_id])
return;
for (i = 0; i < pi->nmirrorqsets; i++)
cxgb4_port_mirror_free_rxq(adap,
&s->mirror_rxq[pi->port_id][i]);
kfree(s->mirror_rxq[pi->port_id]);
s->mirror_rxq[pi->port_id] = NULL;
}
static int cxgb4_port_mirror_start(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
int ret, idx = -1;
if (!pi->vi_mirror_count)
return 0;
/* Mirror VIs can be created dynamically after stack had
* already setup Rx modes like MTU, promisc, allmulti, etc.
* on main VI. So, parse what the stack had setup on the
* main VI and update the same on the mirror VI.
*/
ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (ret) {
dev_err(adap->pdev_dev,
"Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
pi->viid_mirror, ret);
return ret;
}
/* Enable replication bit for the device's MAC address
* in MPS TCAM, so that the packets for the main VI are
* replicated to mirror VI.
*/
ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
dev->dev_addr, true, NULL);
if (ret) {
dev_err(adap->pdev_dev,
"Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
pi->viid_mirror, ret);
return ret;
}
/* Enabling a Virtual Interface can result in an interrupt
* during the processing of the VI Enable command and, in some
* paths, result in an attempt to issue another command in the
* interrupt context. Thus, we disable interrupts during the
* course of the VI Enable command ...
*/
local_bh_disable();
ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
false);
local_bh_enable();
if (ret)
dev_err(adap->pdev_dev,
"Failed starting Mirror VI 0x%x, ret: %d\n",
pi->viid_mirror, ret);
return ret;
}
static void cxgb4_port_mirror_stop(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
if (!pi->vi_mirror_count)
return;
t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
false);
}
int cxgb4_port_mirror_alloc(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
int ret = 0;
if (!pi->nmirrorqsets)
return -EOPNOTSUPP;
mutex_lock(&pi->vi_mirror_mutex);
if (pi->viid_mirror) {
pi->vi_mirror_count++;
goto out_unlock;
}
ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
&pi->viid_mirror);
if (ret)
goto out_unlock;
pi->vi_mirror_count = 1;
if (adap->flags & CXGB4_FULL_INIT_DONE) {
ret = cxgb4_port_mirror_alloc_queues(dev);
if (ret)
goto out_free_vi;
ret = cxgb4_port_mirror_start(dev);
if (ret)
goto out_free_queues;
}
mutex_unlock(&pi->vi_mirror_mutex);
return 0;
out_free_queues:
cxgb4_port_mirror_free_queues(dev);
out_free_vi:
pi->vi_mirror_count = 0;
t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
pi->viid_mirror = 0;
out_unlock:
mutex_unlock(&pi->vi_mirror_mutex);
return ret;
}
void cxgb4_port_mirror_free(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
mutex_lock(&pi->vi_mirror_mutex);
if (!pi->viid_mirror)
goto out_unlock;
if (pi->vi_mirror_count > 1) {
pi->vi_mirror_count--;
goto out_unlock;
}
cxgb4_port_mirror_stop(dev);
cxgb4_port_mirror_free_queues(dev);
pi->vi_mirror_count = 0;
t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
pi->viid_mirror = 0;
out_unlock:
mutex_unlock(&pi->vi_mirror_mutex);
}
/*
* upper-layer driver support
*/
......@@ -2557,8 +2855,29 @@ int cxgb_open(struct net_device *dev)
return err;
err = link_start(dev);
if (!err)
netif_tx_start_all_queues(dev);
if (err)
return err;
if (pi->nmirrorqsets) {
mutex_lock(&pi->vi_mirror_mutex);
err = cxgb4_port_mirror_alloc_queues(dev);
if (err)
goto out_unlock;
err = cxgb4_port_mirror_start(dev);
if (err)
goto out_free_queues;
mutex_unlock(&pi->vi_mirror_mutex);
}
netif_tx_start_all_queues(dev);
return 0;
out_free_queues:
cxgb4_port_mirror_free_queues(dev);
out_unlock:
mutex_unlock(&pi->vi_mirror_mutex);
return err;
}
......@@ -2576,7 +2895,17 @@ int cxgb_close(struct net_device *dev)
cxgb4_dcb_reset(dev);
dcb_tx_queue_prio_enable(dev, false);
#endif
return ret;
if (ret)
return ret;
if (pi->nmirrorqsets) {
mutex_lock(&pi->vi_mirror_mutex);
cxgb4_port_mirror_stop(dev);
cxgb4_port_mirror_free_queues(dev);
mutex_unlock(&pi->vi_mirror_mutex);
}
return 0;
}
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
......@@ -2842,11 +3171,11 @@ static void cxgb_set_rxmode(struct net_device *dev)
static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
{
int ret;
struct port_info *pi = netdev_priv(dev);
int ret;
ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
-1, -1, -1, true);
ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
if (!ret)
dev->mtu = new_mtu;
return ret;
......@@ -5504,6 +5833,19 @@ static int cfg_queues(struct adapter *adap)
avail_qsets -= s->eoqsets;
}
/* Mirror queues must follow same scheme as normal Ethernet
* Queues, when there are enough queues available. Otherwise,
* allocate at least 1 queue per port. If even 1 queue is not
* available, then disable mirror queues support.
*/
if (avail_qsets >= s->max_ethqsets)
s->mirrorqsets = s->max_ethqsets;
else if (avail_qsets >= adap->params.nports)
s->mirrorqsets = adap->params.nports;
else
s->mirrorqsets = 0;
avail_qsets -= s->mirrorqsets;
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
......@@ -5617,8 +5959,8 @@ void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
static int enable_msix(struct adapter *adap)
{
u32 eth_need, uld_need = 0, ethofld_need = 0;
u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
u8 num_uld = 0, nchan = adap->params.nports;
u32 i, want, need, num_vec;
struct sge *s = &adap->sge;
......@@ -5649,6 +5991,12 @@ static int enable_msix(struct adapter *adap)
need += ethofld_need;
}
if (s->mirrorqsets) {
want += s->mirrorqsets;
mirror_need = nchan;
need += mirror_need;
}
want += EXTRA_VECS;
need += EXTRA_VECS;
......@@ -5682,8 +6030,10 @@ static int enable_msix(struct adapter *adap)
adap->params.ethofld = 0;
s->ofldqsets = 0;
s->eoqsets = 0;
s->mirrorqsets = 0;
uld_need = 0;
ethofld_need = 0;
mirror_need = 0;
}
num_vec = allocated;
......@@ -5697,6 +6047,8 @@ static int enable_msix(struct adapter *adap)
ofldqsets = nchan;
if (is_ethofld(adap))
eoqsets = ethofld_need;
if (s->mirrorqsets)
mirrorqsets = mirror_need;
num_vec -= need;
while (num_vec) {
......@@ -5728,12 +6080,25 @@ static int enable_msix(struct adapter *adap)
num_vec -= uld_need;
}
}
if (s->mirrorqsets) {
while (num_vec) {
if (num_vec < mirror_need ||
mirrorqsets > s->mirrorqsets)
break;
mirrorqsets++;
num_vec -= mirror_need;
}
}
} else {
ethqsets = s->max_ethqsets;
if (is_uld(adap))
ofldqsets = s->ofldqsets;
if (is_ethofld(adap))
eoqsets = s->eoqsets;
if (s->mirrorqsets)
mirrorqsets = s->mirrorqsets;
}
if (ethqsets < s->max_ethqsets) {
......@@ -5749,6 +6114,15 @@ static int enable_msix(struct adapter *adap)
if (is_ethofld(adap))
s->eoqsets = eoqsets;
if (s->mirrorqsets) {
s->mirrorqsets = mirrorqsets;
for_each_port(adap, i) {
pi = adap2pinfo(adap, i);
pi->nmirrorqsets = s->mirrorqsets / nchan;
mutex_init(&pi->vi_mirror_mutex);
}
}
/* map for msix */
ret = alloc_msix_info(adap, allocated);
if (ret)
......@@ -5760,8 +6134,9 @@ static int enable_msix(struct adapter *adap)
}
dev_info(adap->pdev_dev,
"%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
"%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
s->mirrorqsets);
kfree(entries);
return 0;
......
......@@ -372,6 +372,7 @@ void cxgb4_process_flow_actions(struct net_device *in,
case FLOW_ACTION_DROP:
fs->action = FILTER_DROP;
break;
case FLOW_ACTION_MIRRED:
case FLOW_ACTION_REDIRECT: {
struct net_device *out = act->dev;
struct port_info *pi = netdev_priv(out);
......@@ -529,7 +530,8 @@ static bool valid_pedit_action(struct net_device *dev,
int cxgb4_validate_flow_actions(struct net_device *dev,
struct flow_action *actions,
struct netlink_ext_ack *extack)
struct netlink_ext_ack *extack,
u8 matchall_filter)
{
struct flow_action_entry *act;
bool act_redir = false;
......@@ -546,11 +548,19 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
case FLOW_ACTION_DROP:
/* Do nothing */
break;
case FLOW_ACTION_MIRRED:
case FLOW_ACTION_REDIRECT: {
struct adapter *adap = netdev2adap(dev);
struct net_device *n_dev, *target_dev;
unsigned int i;
bool found = false;
unsigned int i;
if (act->id == FLOW_ACTION_MIRRED &&
!matchall_filter) {
NL_SET_ERR_MSG_MOD(extack,
"Egress mirror action is only supported for tc-matchall");
return -EOPNOTSUPP;
}
target_dev = act->dev;
for_each_port(adap, i) {
......@@ -689,7 +699,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
u8 inet_family;
int fidx, ret;
if (cxgb4_validate_flow_actions(dev, &rule->action, extack))
if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
return -EOPNOTSUPP;
if (cxgb4_validate_flow_match(dev, rule))
......
......@@ -113,7 +113,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
struct ch_filter_specification *fs);
int cxgb4_validate_flow_actions(struct net_device *dev,
struct flow_action *actions,
struct netlink_ext_ack *extack);
struct netlink_ext_ack *extack,
u8 matchall_filter);
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls);
......
......@@ -188,6 +188,49 @@ static void cxgb4_matchall_free_tc(struct net_device *dev)
tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
}
static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct cxgb4_tc_port_matchall *tc_port_matchall;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *act;
int ret;
u32 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
flow_action_for_each(i, act, &cls->rule->action) {
if (act->id == FLOW_ACTION_MIRRED) {
ret = cxgb4_port_mirror_alloc(dev);
if (ret) {
NL_SET_ERR_MSG_MOD(extack,
"Couldn't allocate mirror");
return ret;
}
tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
break;
}
}
return 0;
}
static void cxgb4_matchall_mirror_free(struct net_device *dev)
{
struct cxgb4_tc_port_matchall *tc_port_matchall;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (!tc_port_matchall->ingress.viid_mirror)
return;
cxgb4_port_mirror_free(dev);
tc_port_matchall->ingress.viid_mirror = 0;
}
static int cxgb4_matchall_alloc_filter(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
......@@ -211,6 +254,10 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
return -ENOMEM;
}
ret = cxgb4_matchall_mirror_alloc(dev, cls);
if (ret)
return ret;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
fs = &tc_port_matchall->ingress.fs;
memset(fs, 0, sizeof(*fs));
......@@ -229,11 +276,15 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
ret = cxgb4_set_filter(dev, fidx, fs);
if (ret)
return ret;
goto out_free;
tc_port_matchall->ingress.tid = fidx;
tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0;
out_free:
cxgb4_matchall_mirror_free(dev);
return ret;
}
static int cxgb4_matchall_free_filter(struct net_device *dev)
......@@ -250,6 +301,8 @@ static int cxgb4_matchall_free_filter(struct net_device *dev)
if (ret)
return ret;
cxgb4_matchall_mirror_free(dev);
tc_port_matchall->ingress.packets = 0;
tc_port_matchall->ingress.bytes = 0;
tc_port_matchall->ingress.last_used = 0;
......@@ -279,7 +332,7 @@ int cxgb4_tc_matchall_replace(struct net_device *dev,
ret = cxgb4_validate_flow_actions(dev,
&cls_matchall->rule->action,
extack);
extack, 1);
if (ret)
return ret;
......
......@@ -21,6 +21,7 @@ struct cxgb4_matchall_ingress_entry {
enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
u32 tid; /* Index to hardware filter entry */
struct ch_filter_specification fs; /* Filter entry */
u16 viid_mirror; /* Identifier for allocated Mirror VI */
u64 bytes; /* # of bytes hitting the filter */
u64 packets; /* # of packets hitting the filter */
u64 last_used; /* Last updated jiffies time */
......
......@@ -7711,6 +7711,7 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @viid_mirror: the mirror VI id
* @mtu: the new MTU or -1
* @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
* @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
......@@ -7721,10 +7722,11 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
* Sets Rx properties of a virtual interface.
*/
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok)
unsigned int viid_mirror, int mtu, int promisc, int all_multi,
int bcast, int vlanex, bool sleep_ok)
{
struct fw_vi_rxmode_cmd c;
struct fw_vi_rxmode_cmd c, c_mirror;
int ret;
/* convert to FW values */
if (mtu < 0)
......@@ -7749,7 +7751,24 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
if (viid_mirror) {
memcpy(&c_mirror, &c, sizeof(c_mirror));
c_mirror.op_to_viid =
cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_VI_RXMODE_CMD_VIID_V(viid_mirror));
}
ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
if (ret)
return ret;
if (viid_mirror)
ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror),
NULL, sleep_ok);
return ret;
}
/**
......@@ -9711,6 +9730,22 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
return 0;
}
int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
u16 *mirror_viid)
{
int ret;
ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
NULL, NULL);
if (ret < 0)
return ret;
if (mirror_viid)
*mirror_viid = ret;
return 0;
}
/**
* t4_read_cimq_cfg - read CIM queue configuration
* @adap: the adapter
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment