Commit 4bfbe53f authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-add-hash-filter-support-to-tc-flower-offload'

Rahul Lakkireddy says:

====================
cxgb4: add hash-filter support to tc-flower offload

This series of patches add support to create hash-filters; a.k.a
exact-match filters, to tc-flower offload.  T6 supports creating
~500K hash-filters in hw and can theoretically be expanded up to
~1 million.

Patch 1 fetches and saves the configured hw filter tuple field shifts
and filter mask.

Patch 2 initializes the driver to use hash-filter configuration.

Patch 3 adds support to create hash filters in hw.

Patch 4 adds support to delete hash filters in hw.

Patch 5 adds support to retrieve filter stats for hash filters.

Patch 6 converts the flower table to use rhashtable instead of
static hlist.

Patch 7 finally adds support to create hash filters via tc-flower
offload.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1f2c897c 3eb8b62d
......@@ -287,10 +287,18 @@ struct tp_params {
* places we store their offsets here, or a -1 if the field isn't
* present.
*/
int vlan_shift;
int vnic_shift;
int fcoe_shift;
int port_shift;
int vnic_shift;
int vlan_shift;
int tos_shift;
int protocol_shift;
int ethertype_shift;
int macmatch_shift;
int matchtype_shift;
int frag_shift;
u64 hash_filter_mask;
};
struct vpd_params {
......@@ -358,6 +366,7 @@ struct adapter_params {
unsigned char crypto; /* HW capability for crypto */
unsigned char bypass;
unsigned char hash_filter;
unsigned int ofldq_wr_cred;
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
......@@ -909,8 +918,10 @@ struct adapter {
struct chcr_stats_debug chcr_stats;
/* TC flower offload */
DECLARE_HASHTABLE(flower_anymatch_tbl, 9);
struct rhashtable flower_tbl;
struct rhashtable_params flower_ht_params;
struct timer_list flower_stats_timer;
struct work_struct flower_stats_work;
/* Ethtool Dump */
struct ethtool_dump eth_dump;
......@@ -1041,6 +1052,7 @@ struct ch_filter_specification {
* matching that doesn't exist as a (value, mask) tuple.
*/
uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
u32 hash:1; /* 0 => wild-card, 1 => exact-match */
/* Packet dispatch information. Ingress packets which match the
* filter rules will be dropped, passed to the host or switched back
......@@ -1098,7 +1110,14 @@ enum {
};
enum {
NAT_MODE_ALL = 7, /* NAT on entire 4-tuple */
NAT_MODE_NONE = 0, /* No NAT performed */
NAT_MODE_DIP, /* NAT on Dst IP */
NAT_MODE_DIP_DP, /* NAT on Dst IP, Dst Port */
NAT_MODE_DIP_DP_SIP, /* NAT on Dst IP, Dst Port and Src IP */
NAT_MODE_DIP_DP_SP, /* NAT on Dst IP, Dst Port and Src Port */
NAT_MODE_SIP_SP, /* NAT on Src IP and Src Port */
NAT_MODE_DIP_SIP_SP, /* NAT on Dst IP, Src IP and Src Port */
NAT_MODE_ALL /* NAT on entire 4-tuple */
};
/* Host shadow copy of ingress filter entry. This is in host native format
......@@ -1132,6 +1151,11 @@ static inline int is_offload(const struct adapter *adap)
return adap->params.offload;
}
static inline int is_hashfilter(const struct adapter *adap)
{
return adap->params.hash_filter;
}
static inline int is_pci_uld(const struct adapter *adap)
{
return adap->params.crypto;
......
......@@ -31,10 +31,13 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <net/ipv6.h>
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4_tcb.h"
#include "t4_values.h"
#include "clip_tbl.h"
#include "l2t.h"
#include "smt.h"
#include "t4fw_api.h"
......@@ -50,6 +53,194 @@ static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
return !(conf & conf_mask) && is_field_set(val, mask);
}
static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
unsigned int ftid, u16 word, u64 mask, u64 val,
int no_reply)
{
struct cpl_set_tcb_field *req;
struct sk_buff *skb;
skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
req->reply_ctrl = htons(REPLY_CHAN_V(0) |
QUEUENO_V(adap->sge.fw_evtq.abs_id) |
NO_REPLY_V(no_reply));
req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
req->mask = cpu_to_be64(mask);
req->val = cpu_to_be64(val);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
t4_ofld_send(adap, skb);
return 0;
}
/* Set one of the t_flags bits in the TCB.
*/
static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
unsigned int ftid, unsigned int bit_pos,
unsigned int val, int no_reply)
{
return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos,
(unsigned long long)val << bit_pos, no_reply);
}
static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid)
{
struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16));
sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr));
OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
abort_req->rsvd0 = htonl(0);
abort_req->rsvd1 = 0;
abort_req->cmd = CPL_ABORT_NO_RST;
}
static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid)
{
struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr));
OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
abort_rpl->rsvd0 = htonl(0);
abort_rpl->rsvd1 = 0;
abort_rpl->cmd = CPL_ABORT_NO_RST;
}
static void mk_set_tcb_ulp(struct filter_entry *f,
struct cpl_set_tcb_field *req,
unsigned int word, u64 mask, u64 val,
u8 cookie, int no_reply)
{
struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
QUEUENO_V(0));
req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
req->mask = cpu_to_be64(mask);
req->val = cpu_to_be64(val);
sc = (struct ulptx_idata *)(req + 1);
sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
sc->len = htonl(0);
}
static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
{
int err;
/* do a set-tcb for smac-sel and CWR bit.. */
err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
if (err)
goto smac_err;
err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
TCB_SMAC_SEL_V(f->smt->idx), 1);
if (!err)
return 0;
smac_err:
dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
f->tid, err);
return err;
}
static void set_nat_params(struct adapter *adap, struct filter_entry *f,
unsigned int tid, bool dip, bool sip, bool dp,
bool sp)
{
if (dip) {
if (f->fs.type) {
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
WORD_MASK, f->fs.nat_lip[15] |
f->fs.nat_lip[14] << 8 |
f->fs.nat_lip[13] << 16 |
f->fs.nat_lip[12] << 24, 1);
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
WORD_MASK, f->fs.nat_lip[11] |
f->fs.nat_lip[10] << 8 |
f->fs.nat_lip[9] << 16 |
f->fs.nat_lip[8] << 24, 1);
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
WORD_MASK, f->fs.nat_lip[7] |
f->fs.nat_lip[6] << 8 |
f->fs.nat_lip[5] << 16 |
f->fs.nat_lip[4] << 24, 1);
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
WORD_MASK, f->fs.nat_lip[3] |
f->fs.nat_lip[2] << 8 |
f->fs.nat_lip[1] << 16 |
f->fs.nat_lip[0] << 24, 1);
} else {
set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
WORD_MASK, f->fs.nat_lip[3] |
f->fs.nat_lip[2] << 8 |
f->fs.nat_lip[1] << 16 |
f->fs.nat_lip[0] << 24, 1);
}
}
if (sip) {
if (f->fs.type) {
set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W,
WORD_MASK, f->fs.nat_fip[15] |
f->fs.nat_fip[14] << 8 |
f->fs.nat_fip[13] << 16 |
f->fs.nat_fip[12] << 24, 1);
set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
WORD_MASK, f->fs.nat_fip[11] |
f->fs.nat_fip[10] << 8 |
f->fs.nat_fip[9] << 16 |
f->fs.nat_fip[8] << 24, 1);
set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
WORD_MASK, f->fs.nat_fip[7] |
f->fs.nat_fip[6] << 8 |
f->fs.nat_fip[5] << 16 |
f->fs.nat_fip[4] << 24, 1);
set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
WORD_MASK, f->fs.nat_fip[3] |
f->fs.nat_fip[2] << 8 |
f->fs.nat_fip[1] << 16 |
f->fs.nat_fip[0] << 24, 1);
} else {
set_tcb_field(adap, f, tid,
TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
WORD_MASK, f->fs.nat_fip[3] |
f->fs.nat_fip[2] << 8 |
f->fs.nat_fip[1] << 16 |
f->fs.nat_fip[0] << 24, 1);
}
}
set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
(dp ? f->fs.nat_lport : 0) |
(sp ? f->fs.nat_fport << 16 : 0), 1);
}
/* Validate filter spec against configuration done on the card. */
static int validate_filter(struct net_device *dev,
struct ch_filter_specification *fs)
......@@ -151,7 +342,7 @@ static int get_filter_steerq(struct net_device *dev,
}
static int get_filter_count(struct adapter *adapter, unsigned int fidx,
u64 *pkts, u64 *bytes)
u64 *pkts, u64 *bytes, bool hash)
{
unsigned int tcb_base, tcbaddr;
unsigned int word_offset;
......@@ -160,14 +351,24 @@ static int get_filter_count(struct adapter *adapter, unsigned int fidx,
int ret;
tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids - 1)) &&
fidx >= adapter->tids.nftids)
return -E2BIG;
f = &adapter->tids.ftid_tab[fidx];
if (!f->valid)
return -EINVAL;
if (is_hashfilter(adapter) && hash) {
if (fidx < adapter->tids.ntids) {
f = adapter->tids.tid_tab[fidx];
if (!f)
return -EINVAL;
} else {
return -E2BIG;
}
} else {
if ((fidx != (adapter->tids.nftids +
adapter->tids.nsftids - 1)) &&
fidx >= adapter->tids.nftids)
return -E2BIG;
f = &adapter->tids.ftid_tab[fidx];
if (!f->valid)
return -EINVAL;
}
tcbaddr = tcb_base + f->tid * TCB_SIZE;
spin_lock(&adapter->win0_lock);
......@@ -219,11 +420,11 @@ static int get_filter_count(struct adapter *adapter, unsigned int fidx,
}
int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
u64 *hitcnt, u64 *bytecnt)
u64 *hitcnt, u64 *bytecnt, bool hash)
{
struct adapter *adapter = netdev2adap(dev);
return get_filter_count(adapter, fidx, hitcnt, bytecnt);
return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
}
int cxgb4_get_free_ftid(struct net_device *dev, int family)
......@@ -484,10 +685,8 @@ int delete_filter(struct adapter *adapter, unsigned int fidx)
void clear_filter(struct adapter *adap, struct filter_entry *f)
{
/* If the new or old filter have loopback rewriteing rules then we'll
* need to free any existing Layer Two Table (L2T) entries of the old
* filter rule. The firmware will handle freeing up any Source MAC
* Table (SMT) entries used for rewriting Source MAC Addresses in
* loopback rules.
* need to free any existing L2T, SMT, CLIP entries of filter
* rule.
*/
if (f->l2t)
cxgb4_l2t_release(f->l2t);
......@@ -495,6 +694,9 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
if (f->smt)
cxgb4_smt_release(f->smt);
if (f->fs.hash && f->fs.type)
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
/* The zeroing of the filter rule below clears the filter valid,
* pending, locked flags, l2t pointer, etc. so it's all we need for
* this operation.
......@@ -564,6 +766,416 @@ static void fill_default_mask(struct ch_filter_specification *fs)
fs->mask.fport = ~0;
}
static bool is_addr_all_mask(u8 *ipmask, int family)
{
if (family == AF_INET) {
struct in_addr *addr;
addr = (struct in_addr *)ipmask;
if (addr->s_addr == 0xffffffff)
return true;
} else if (family == AF_INET6) {
struct in6_addr *addr6;
addr6 = (struct in6_addr *)ipmask;
if (addr6->s6_addr32[0] == 0xffffffff &&
addr6->s6_addr32[1] == 0xffffffff &&
addr6->s6_addr32[2] == 0xffffffff &&
addr6->s6_addr32[3] == 0xffffffff)
return true;
}
return false;
}
static bool is_inaddr_any(u8 *ip, int family)
{
int addr_type;
if (family == AF_INET) {
struct in_addr *addr;
addr = (struct in_addr *)ip;
if (addr->s_addr == htonl(INADDR_ANY))
return true;
} else if (family == AF_INET6) {
struct in6_addr *addr6;
addr6 = (struct in6_addr *)ip;
addr_type = ipv6_addr_type((const struct in6_addr *)
&addr6);
if (addr_type == IPV6_ADDR_ANY)
return true;
}
return false;
}
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs)
{
struct tp_params *tp = &adap->params.tp;
u64 hash_filter_mask = tp->hash_filter_mask;
u32 mask;
if (!is_hashfilter(adap))
return false;
if (fs->type) {
if (is_inaddr_any(fs->val.fip, AF_INET6) ||
!is_addr_all_mask(fs->mask.fip, AF_INET6))
return false;
if (is_inaddr_any(fs->val.lip, AF_INET6) ||
!is_addr_all_mask(fs->mask.lip, AF_INET6))
return false;
} else {
if (is_inaddr_any(fs->val.fip, AF_INET) ||
!is_addr_all_mask(fs->mask.fip, AF_INET))
return false;
if (is_inaddr_any(fs->val.lip, AF_INET) ||
!is_addr_all_mask(fs->mask.lip, AF_INET))
return false;
}
if (!fs->val.lport || fs->mask.lport != 0xffff)
return false;
if (!fs->val.fport || fs->mask.fport != 0xffff)
return false;
if (tp->fcoe_shift >= 0) {
mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
if (mask && !fs->mask.fcoe)
return false;
}
if (tp->port_shift >= 0) {
mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W;
if (mask && !fs->mask.iport)
return false;
}
if (tp->vnic_shift >= 0) {
mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W;
if ((adap->params.tp.ingress_config & VNIC_F)) {
if (mask && !fs->mask.pfvf_vld)
return false;
} else {
if (mask && !fs->mask.ovlan_vld)
return false;
}
}
if (tp->vlan_shift >= 0) {
mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W;
if (mask && !fs->mask.ivlan)
return false;
}
if (tp->tos_shift >= 0) {
mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W;
if (mask && !fs->mask.tos)
return false;
}
if (tp->protocol_shift >= 0) {
mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W;
if (mask && !fs->mask.proto)
return false;
}
if (tp->ethertype_shift >= 0) {
mask = (hash_filter_mask >> tp->ethertype_shift) &
FT_ETHERTYPE_W;
if (mask && !fs->mask.ethtype)
return false;
}
if (tp->macmatch_shift >= 0) {
mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W;
if (mask && !fs->mask.macidx)
return false;
}
if (tp->matchtype_shift >= 0) {
mask = (hash_filter_mask >> tp->matchtype_shift) &
FT_MPSHITTYPE_W;
if (mask && !fs->mask.matchtype)
return false;
}
if (tp->frag_shift >= 0) {
mask = (hash_filter_mask >> tp->frag_shift) &
FT_FRAGMENTATION_W;
if (mask && !fs->mask.frag)
return false;
}
return true;
}
static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
struct tp_params *tp = &adap->params.tp;
u64 ntuple = 0;
/* Initialize each of the fields which we care about which are present
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && fs->mask.ivlan)
ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
if (tp->port_shift >= 0 && fs->mask.iport)
ntuple |= (u64)fs->val.iport << tp->port_shift;
if (tp->protocol_shift >= 0) {
if (!fs->val.proto)
ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
else
ntuple |= (u64)fs->val.proto << tp->protocol_shift;
}
if (tp->tos_shift >= 0 && fs->mask.tos)
ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
if (tp->vnic_shift >= 0) {
if ((adap->params.tp.ingress_config & VNIC_F) &&
fs->mask.pfvf_vld)
ntuple |= (u64)((fs->val.pfvf_vld << 16) |
(fs->val.pf << 13) |
(fs->val.vf)) << tp->vnic_shift;
else
ntuple |= (u64)((fs->val.ovlan_vld << 16) |
(fs->val.ovlan)) << tp->vnic_shift;
}
if (tp->macmatch_shift >= 0 && fs->mask.macidx)
ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift;
if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift;
if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift;
if (tp->frag_shift >= 0 && fs->mask.frag)
ntuple |= (u64)(fs->val.frag) << tp->frag_shift;
if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift;
return ntuple;
}
static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
unsigned int qid_filterid, struct adapter *adap)
{
struct cpl_t6_act_open_req6 *t6req = NULL;
struct cpl_act_open_req6 *req = NULL;
t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
INIT_TP_WR(t6req, 0);
req = (struct cpl_act_open_req6 *)t6req;
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
req->local_port = cpu_to_be16(f->fs.val.lport);
req->peer_port = cpu_to_be16(f->fs.val.fport);
req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
DELACK_V(f->fs.hitcnts) |
L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
SMAC_SEL_V((cxgb4_port_viid(f->dev) &
0x7F) << 1) |
TX_CHAN_V(f->fs.eport) |
NO_CONG_V(f->fs.rpttid) |
ULP_MODE_V(f->fs.nat_mode ?
ULP_MODE_TCPDDP : ULP_MODE_NONE) |
TCAM_BYPASS_F | NON_OFFLOAD_F);
t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
f->dev)));
t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
RSS_QUEUE_V(f->fs.iq) |
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_F |
CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
((f->fs.dirsteerhash) << 1)) |
CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
}
static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
unsigned int qid_filterid, struct adapter *adap)
{
struct cpl_t6_act_open_req *t6req = NULL;
struct cpl_act_open_req *req = NULL;
t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
INIT_TP_WR(t6req, 0);
req = (struct cpl_act_open_req *)t6req;
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
req->local_port = cpu_to_be16(f->fs.val.lport);
req->peer_port = cpu_to_be16(f->fs.val.fport);
req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
DELACK_V(f->fs.hitcnts) |
L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
SMAC_SEL_V((cxgb4_port_viid(f->dev) &
0x7F) << 1) |
TX_CHAN_V(f->fs.eport) |
NO_CONG_V(f->fs.rpttid) |
ULP_MODE_V(f->fs.nat_mode ?
ULP_MODE_TCPDDP : ULP_MODE_NONE) |
TCAM_BYPASS_F | NON_OFFLOAD_F);
t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
f->dev)));
t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
RSS_QUEUE_V(f->fs.iq) |
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_F |
CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
((f->fs.dirsteerhash) << 1)) |
CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
}
static int cxgb4_set_hash_filter(struct net_device *dev,
struct ch_filter_specification *fs,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
struct tid_info *t = &adapter->tids;
struct filter_entry *f;
struct sk_buff *skb;
int iq, atid, size;
int ret = 0;
u32 iconf;
fill_default_mask(fs);
ret = validate_filter(dev, fs);
if (ret)
return ret;
iq = get_filter_steerq(dev, fs);
if (iq < 0)
return iq;
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return -ENOMEM;
f->fs = *fs;
f->ctx = ctx;
f->dev = dev;
f->fs.iq = iq;
/* If the new filter requires loopback Destination MAC and/or VLAN
* rewriting then we need to allocate a Layer 2 Table (L2T) entry for
* the filter.
*/
if (f->fs.newdmac || f->fs.newvlan) {
/* allocate L2T entry for new filter */
f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
f->fs.eport, f->fs.dmac);
if (!f->l2t) {
ret = -ENOMEM;
goto out_err;
}
}
/* If the new filter requires loopback Source MAC rewriting then
* we need to allocate a SMT entry for the filter.
*/
if (f->fs.newsmac) {
f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
if (!f->smt) {
if (f->l2t) {
cxgb4_l2t_release(f->l2t);
f->l2t = NULL;
}
ret = -ENOMEM;
goto free_l2t;
}
}
atid = cxgb4_alloc_atid(t, f);
if (atid < 0)
goto free_smt;
iconf = adapter->params.tp.ingress_config;
if (iconf & VNIC_F) {
f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
f->fs.val.ovlan_vld = fs->val.pfvf_vld;
f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
}
size = sizeof(struct cpl_t6_act_open_req);
if (f->fs.type) {
ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
if (ret)
goto free_atid;
skb = alloc_skb(size, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto free_clip;
}
mk_act_open_req6(f, skb,
((adapter->sge.fw_evtq.abs_id << 14) | atid),
adapter);
} else {
skb = alloc_skb(size, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto free_atid;
}
mk_act_open_req(f, skb,
((adapter->sge.fw_evtq.abs_id << 14) | atid),
adapter);
}
f->pending = 1;
set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
t4_ofld_send(adapter, skb);
return 0;
free_clip:
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
free_atid:
cxgb4_free_atid(t, atid);
free_smt:
if (f->smt) {
cxgb4_smt_release(f->smt);
f->smt = NULL;
}
free_l2t:
if (f->l2t) {
cxgb4_l2t_release(f->l2t);
f->l2t = NULL;
}
out_err:
kfree(f);
return ret;
}
/* Check a Chelsio Filter Request for validity, convert it into our internal
* format and send it to the hardware. Return 0 on success, an error number
* otherwise. We attach any provided filter operation context to the internal
......@@ -580,6 +1192,14 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
u32 iconf;
int iq, ret;
if (fs->hash) {
if (is_hashfilter(adapter))
return cxgb4_set_hash_filter(dev, fs, ctx);
netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
__func__);
return -EINVAL;
}
max_fidx = adapter->tids.nftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
......@@ -701,12 +1321,74 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
return ret;
}
static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
struct tid_info *t = &adapter->tids;
struct cpl_abort_req *abort_req;
struct cpl_abort_rpl *abort_rpl;
struct cpl_set_tcb_field *req;
struct ulptx_idata *aligner;
struct work_request_hdr *wr;
struct filter_entry *f;
struct sk_buff *skb;
unsigned int wrlen;
int ret;
netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
__func__, filter_id, adapter->tids.nftids);
if (filter_id > adapter->tids.ntids)
return -E2BIG;
f = lookup_tid(t, filter_id);
if (!f) {
netdev_err(dev, "%s: no filter entry for filter_id = %d",
__func__, filter_id);
return -EINVAL;
}
ret = writable_filter(f);
if (ret)
return ret;
if (!f->valid)
return -EINVAL;
f->ctx = ctx;
f->pending = 1;
wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
+ sizeof(*abort_req) + sizeof(*abort_rpl), 16);
skb = alloc_skb(wrlen, GFP_KERNEL);
if (!skb) {
netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
INIT_ULPTX_WR(req, wrlen, 0, 0);
wr = (struct work_request_hdr *)req;
wr++;
req = (struct cpl_set_tcb_field *)wr;
mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
aligner = (struct ulptx_idata *)(req + 1);
abort_req = (struct cpl_abort_req *)(aligner + 1);
mk_abort_req_ulp(abort_req, f->tid);
abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
mk_abort_rpl_ulp(abort_rpl, f->tid);
t4_ofld_send(adapter, skb);
return 0;
}
/* Check a delete filter request for validity and send it to the hardware.
* Return 0 on success, an error number otherwise. We attach any provided
* filter operation context to the internal filter specification in order to
* facilitate signaling completion of the operation.
*/
int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
......@@ -714,6 +1396,14 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
unsigned int max_fidx;
int ret;
if (fs && fs->hash) {
if (is_hashfilter(adapter))
return cxgb4_del_hash_filter(dev, filter_id, ctx);
netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
__func__);
return -EINVAL;
}
max_fidx = adapter->tids.nftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
......@@ -764,18 +1454,19 @@ int cxgb4_set_filter(struct net_device *dev, int filter_id,
return ret;
}
int cxgb4_del_filter(struct net_device *dev, int filter_id)
int cxgb4_del_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs)
{
struct filter_ctx ctx;
int ret;
/* If we are shutting down the adapter do not wait for completion */
if (netdev2adap(dev)->flags & SHUTTING_DOWN)
return __cxgb4_del_filter(dev, filter_id, NULL);
return __cxgb4_del_filter(dev, filter_id, fs, NULL);
init_completion(&ctx.completion);
ret = __cxgb4_del_filter(dev, filter_id, &ctx);
ret = __cxgb4_del_filter(dev, filter_id, fs, &ctx);
if (ret)
goto out;
......@@ -789,60 +1480,155 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id)
return ret;
}
static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
unsigned int ftid, u16 word, u64 mask, u64 val,
int no_reply)
static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
struct filter_entry *f)
{
struct cpl_set_tcb_field *req;
struct sk_buff *skb;
skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
req->reply_ctrl = htons(REPLY_CHAN_V(0) |
QUEUENO_V(adap->sge.fw_evtq.abs_id) |
NO_REPLY_V(no_reply));
req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
req->mask = cpu_to_be64(mask);
req->val = cpu_to_be64(val);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
t4_ofld_send(adap, skb);
if (f->fs.hitcnts)
set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
TCB_TIMESTAMP_V(0ULL) |
TCB_RTT_TS_RECENT_AGE_V(0ULL),
1);
if (f->fs.newdmac)
set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
1);
if (f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE)
set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1,
1);
if (f->fs.newsmac)
configure_filter_smac(adap, f);
if (f->fs.nat_mode) {
switch (f->fs.nat_mode) {
case NAT_MODE_DIP:
set_nat_params(adap, f, tid, true, false, false, false);
break;
case NAT_MODE_DIP_DP:
set_nat_params(adap, f, tid, true, false, true, false);
break;
case NAT_MODE_DIP_DP_SIP:
set_nat_params(adap, f, tid, true, true, true, false);
break;
case NAT_MODE_DIP_DP_SP:
set_nat_params(adap, f, tid, true, false, true, true);
break;
case NAT_MODE_SIP_SP:
set_nat_params(adap, f, tid, false, true, false, true);
break;
case NAT_MODE_DIP_SIP_SP:
set_nat_params(adap, f, tid, true, true, false, true);
break;
case NAT_MODE_ALL:
set_nat_params(adap, f, tid, true, true, true, true);
break;
default:
pr_err("%s: Invalid NAT mode: %d\n",
__func__, f->fs.nat_mode);
return -EINVAL;
}
}
return 0;
}
/* Set one of the t_flags bits in the TCB.
*/
static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
unsigned int ftid, unsigned int bit_pos,
unsigned int val, int no_reply)
void hash_del_filter_rpl(struct adapter *adap,
const struct cpl_abort_rpl_rss *rpl)
{
return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos,
(unsigned long long)val << bit_pos, no_reply);
unsigned int status = rpl->status;
struct tid_info *t = &adap->tids;
unsigned int tid = GET_TID(rpl);
struct filter_ctx *ctx = NULL;
struct filter_entry *f;
dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n",
__func__, status, tid);
f = lookup_tid(t, tid);
if (!f) {
dev_err(adap->pdev_dev, "%s:could not find filter entry",
__func__);
return;
}
ctx = f->ctx;
f->ctx = NULL;
clear_filter(adap, f);
cxgb4_remove_tid(t, 0, tid, 0);
kfree(f);
if (ctx) {
ctx->result = 0;
complete(&ctx->completion);
}
}
static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
{
int err;
unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status)));
unsigned int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
struct tid_info *t = &adap->tids;
unsigned int tid = GET_TID(rpl);
struct filter_ctx *ctx = NULL;
struct filter_entry *f;
/* do a set-tcb for smac-sel and CWR bit.. */
err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
if (err)
goto smac_err;
dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n",
__func__, tid, ftid, status);
err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
TCB_SMAC_SEL_V(f->smt->idx), 1);
if (!err)
return 0;
f = lookup_atid(t, ftid);
if (!f) {
dev_err(adap->pdev_dev, "%s:could not find filter entry",
__func__);
return;
}
ctx = f->ctx;
f->ctx = NULL;
switch (status) {
case CPL_ERR_NONE:
f->tid = tid;
f->pending = 0;
f->valid = 1;
cxgb4_insert_tid(t, f, f->tid, 0);
cxgb4_free_atid(t, ftid);
if (ctx) {
ctx->tid = f->tid;
ctx->result = 0;
}
if (configure_filter_tcb(adap, tid, f)) {
clear_filter(adap, f);
cxgb4_remove_tid(t, 0, tid, 0);
kfree(f);
if (ctx) {
ctx->result = -EINVAL;
complete(&ctx->completion);
}
return;
}
break;
smac_err:
dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
f->tid, err);
return err;
default:
dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
__func__, status);
if (ctx) {
if (status == CPL_ERR_TCAM_FULL)
ctx->result = -EAGAIN;
else
ctx->result = -EINVAL;
}
clear_filter(adap, f);
cxgb4_free_atid(t, ftid);
kfree(f);
}
if (ctx)
complete(&ctx->completion);
}
/* Handle a filter write/deletion reply. */
......@@ -915,3 +1701,25 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
complete(&ctx->completion);
}
}
int init_hash_filter(struct adapter *adap)
{
/* On T6, verify the necessary register configs and warn the user in
* case of improper config
*/
if (is_t6(adap->params.chip)) {
if (TCAM_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_0_A)) != 4)
goto err;
if (HASH_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_1_A)) != 4)
goto err;
} else {
dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
return -EINVAL;
}
adap->params.hash_filter = 1;
return 0;
err:
dev_warn(adap->pdev_dev, "Invalid hash filter config!\n");
return -EINVAL;
}
......@@ -37,7 +37,12 @@
#include "t4_msg.h"
#define WORD_MASK 0xffffffff
void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl);
void hash_del_filter_rpl(struct adapter *adap,
const struct cpl_abort_rpl_rss *rpl);
void clear_filter(struct adapter *adap, struct filter_entry *f);
int set_filter_wr(struct adapter *adapter, int fidx);
......@@ -45,4 +50,7 @@ int delete_filter(struct adapter *adapter, unsigned int fidx);
int writable_filter(struct filter_entry *f);
void clear_all_filters(struct adapter *adapter);
int init_hash_filter(struct adapter *adap);
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs);
#endif /* __CXGB4_FILTER_H */
......@@ -572,6 +572,14 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_set_tcb_rpl *p = (void *)rsp;
filter_rpl(q->adap, p);
} else if (opcode == CPL_ACT_OPEN_RPL) {
const struct cpl_act_open_rpl *p = (void *)rsp;
hash_filter_rpl(q->adap, p);
} else if (opcode == CPL_ABORT_RPL_RSS) {
const struct cpl_abort_rpl_rss *p = (void *)rsp;
hash_del_filter_rpl(q->adap, p);
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
......@@ -3963,7 +3971,8 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
if (caps_cmd.ofldcaps) {
if (caps_cmd.ofldcaps ||
(caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
/* query offload-related parameters */
params[0] = FW_PARAM_DEV(NTID);
params[1] = FW_PARAM_PFVF(SERVER_START);
......@@ -4000,8 +4009,13 @@ static int adap_init0(struct adapter *adap)
adap->vres.ddp.size = val[4] - val[3] + 1;
adap->params.ofldq_wr_cred = val[5];
adap->params.offload = 1;
adap->num_ofld_uld += 1;
if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
if (init_hash_filter(adap) < 0)
goto bye;
} else {
adap->params.offload = 1;
adap->num_ofld_uld += 1;
}
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
......@@ -5168,10 +5182,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev,
"could not offload tc u32, continuing\n");
cxgb4_init_tc_flower(adapter);
if (cxgb4_init_tc_flower(adapter))
dev_warn(&pdev->dev,
"could not offload tc flower, continuing\n");
}
if (is_offload(adapter)) {
if (is_offload(adapter) || is_hashfilter(adapter)) {
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
u32 hash_base, hash_reg;
......
......@@ -38,6 +38,7 @@
#include <net/tc_act/tc_vlan.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
#include "cxgb4_tc_flower.h"
#define STATS_CHECK_PERIOD (HZ / 2)
......@@ -74,13 +75,8 @@ static struct ch_tc_flower_entry *allocate_flower_entry(void)
static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
unsigned long flower_cookie)
{
struct ch_tc_flower_entry *flower_entry;
hash_for_each_possible_rcu(adap->flower_anymatch_tbl, flower_entry,
link, flower_cookie)
if (flower_entry->tc_flower_cookie == flower_cookie)
return flower_entry;
return NULL;
return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
adap->flower_ht_params);
}
static void cxgb4_process_flow_match(struct net_device *dev,
......@@ -677,11 +673,16 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
cxgb4_process_flow_match(dev, cls, fs);
cxgb4_process_flow_actions(dev, cls, fs);
fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
if (fidx < 0) {
netdev_err(dev, "%s: No fidx for offload.\n", __func__);
ret = -ENOMEM;
goto free_entry;
fs->hash = is_filter_exact_match(adap, fs);
if (fs->hash) {
fidx = 0;
} else {
fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
if (fidx < 0) {
netdev_err(dev, "%s: No fidx for offload.\n", __func__);
ret = -ENOMEM;
goto free_entry;
}
}
init_completion(&ctx.completion);
......@@ -707,12 +708,17 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
goto free_entry;
}
INIT_HLIST_NODE(&ch_flower->link);
ch_flower->tc_flower_cookie = cls->cookie;
ch_flower->filter_id = ctx.tid;
hash_add_rcu(adap->flower_anymatch_tbl, &ch_flower->link, cls->cookie);
ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
adap->flower_ht_params);
if (ret)
goto del_filter;
return ret;
return 0;
del_filter:
cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
free_entry:
kfree(ch_flower);
......@@ -730,47 +736,70 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
if (!ch_flower)
return -ENOENT;
ret = cxgb4_del_filter(dev, ch_flower->filter_id);
ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
if (ret)
goto err;
hash_del_rcu(&ch_flower->link);
ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
adap->flower_ht_params);
if (ret) {
netdev_err(dev, "Flow remove from rhashtable failed");
goto err;
}
kfree_rcu(ch_flower, rcu);
err:
return ret;
}
static void ch_flower_stats_cb(struct timer_list *t)
static void ch_flower_stats_handler(struct work_struct *work)
{
struct adapter *adap = from_timer(adap, t, flower_stats_timer);
struct adapter *adap = container_of(work, struct adapter,
flower_stats_work);
struct ch_tc_flower_entry *flower_entry;
struct ch_tc_flower_stats *ofld_stats;
unsigned int i;
struct rhashtable_iter iter;
u64 packets;
u64 bytes;
int ret;
rcu_read_lock();
hash_for_each_rcu(adap->flower_anymatch_tbl, i, flower_entry, link) {
ret = cxgb4_get_filter_counters(adap->port[0],
flower_entry->filter_id,
&packets, &bytes);
if (!ret) {
spin_lock(&flower_entry->lock);
ofld_stats = &flower_entry->stats;
if (ofld_stats->prev_packet_count != packets) {
ofld_stats->prev_packet_count = packets;
ofld_stats->last_used = jiffies;
rhashtable_walk_enter(&adap->flower_tbl, &iter);
do {
flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
if (IS_ERR(flower_entry))
goto walk_stop;
while ((flower_entry = rhashtable_walk_next(&iter)) &&
!IS_ERR(flower_entry)) {
ret = cxgb4_get_filter_counters(adap->port[0],
flower_entry->filter_id,
&packets, &bytes,
flower_entry->fs.hash);
if (!ret) {
spin_lock(&flower_entry->lock);
ofld_stats = &flower_entry->stats;
if (ofld_stats->prev_packet_count != packets) {
ofld_stats->prev_packet_count = packets;
ofld_stats->last_used = jiffies;
}
spin_unlock(&flower_entry->lock);
}
spin_unlock(&flower_entry->lock);
}
}
rcu_read_unlock();
walk_stop:
rhashtable_walk_stop(&iter);
} while (flower_entry == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
}
static void ch_flower_stats_cb(struct timer_list *t)
{
struct adapter *adap = from_timer(adap, t, flower_stats_timer);
schedule_work(&adap->flower_stats_work);
}
int cxgb4_tc_flower_stats(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
......@@ -788,7 +817,8 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
}
ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
&packets, &bytes);
&packets, &bytes,
ch_flower->fs.hash);
if (ret < 0)
goto err;
......@@ -812,15 +842,35 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
return ret;
}
void cxgb4_init_tc_flower(struct adapter *adap)
static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
.nelem_hint = 384,
.head_offset = offsetof(struct ch_tc_flower_entry, node),
.key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
.key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
.max_size = 524288,
.min_size = 512,
.automatic_shrinking = true
};
int cxgb4_init_tc_flower(struct adapter *adap)
{
hash_init(adap->flower_anymatch_tbl);
int ret;
adap->flower_ht_params = cxgb4_tc_flower_ht_params;
ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
if (ret)
return ret;
INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
return 0;
}
void cxgb4_cleanup_tc_flower(struct adapter *adap)
{
if (adap->flower_stats_timer.function)
del_timer_sync(&adap->flower_stats_timer);
cancel_work_sync(&adap->flower_stats_work);
rhashtable_destroy(&adap->flower_tbl);
}
......@@ -48,7 +48,7 @@ struct ch_tc_flower_entry {
struct ch_filter_specification fs;
struct ch_tc_flower_stats stats;
unsigned long tc_flower_cookie;
struct hlist_node link;
struct rhash_head node;
struct rcu_head rcu;
spinlock_t lock; /* lock for stats */
u32 filter_id;
......@@ -115,6 +115,6 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
int cxgb4_tc_flower_stats(struct net_device *dev,
struct tc_cls_flower_offload *cls);
void cxgb4_init_tc_flower(struct adapter *adap);
int cxgb4_init_tc_flower(struct adapter *adap);
void cxgb4_cleanup_tc_flower(struct adapter *adap);
#endif /* __CXGB4_TC_FLOWER_H */
......@@ -380,7 +380,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
return -EINVAL;
}
ret = cxgb4_del_filter(dev, filter_id);
ret = cxgb4_del_filter(dev, filter_id, NULL);
if (ret)
goto out;
......@@ -399,7 +399,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
if (!test_bit(j, link->tid_map))
continue;
ret = __cxgb4_del_filter(dev, j, NULL);
ret = __cxgb4_del_filter(dev, j, NULL, NULL);
if (ret)
goto out;
......
......@@ -217,12 +217,14 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs,
struct filter_ctx *ctx);
int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs,
struct filter_ctx *ctx);
int cxgb4_set_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs);
int cxgb4_del_filter(struct net_device *dev, int filter_id);
int cxgb4_del_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs);
int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
u64 *hitcnt, u64 *bytecnt);
u64 *hitcnt, u64 *bytecnt, bool hash);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
......
......@@ -8816,11 +8816,21 @@ int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
PROTOCOL_F);
adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
ETHERTYPE_F);
adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
MACMATCH_F);
adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
MPSHITTYPE_F);
adap->params.tp.frag_shift = t4_filter_field_shift(adap,
FRAGMENTATION_F);
/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
* represents the presence of an Outer VLAN instead of a VNIC ID.
......@@ -8828,6 +8838,10 @@ int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
if ((adap->params.tp.ingress_config & VNIC_F) == 0)
adap->params.tp.vnic_shift = -1;
v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
adap->params.tp.hash_filter_mask = v;
v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
adap->params.tp.hash_filter_mask |= ((u64)v << 32);
return 0;
}
......
......@@ -286,6 +286,7 @@ struct work_request_hdr {
#define RX_CHANNEL_S 26
#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S)
#define RX_CHANNEL_F RX_CHANNEL_V(1U)
#define WND_SCALE_EN_S 28
#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S)
......@@ -315,6 +316,10 @@ struct cpl_pass_open_req {
#define DELACK_V(x) ((x) << DELACK_S)
#define DELACK_F DELACK_V(1U)
#define NON_OFFLOAD_S 7
#define NON_OFFLOAD_V(x) ((x) << NON_OFFLOAD_S)
#define NON_OFFLOAD_F NON_OFFLOAD_V(1U)
#define DSCP_S 22
#define DSCP_M 0x3F
#define DSCP_V(x) ((x) << DSCP_S)
......
......@@ -2933,6 +2933,23 @@
#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
#define SSRAMINTPERR_F SSRAMINTPERR_V(1U)
#define LE_DB_RSP_CODE_0_A 0x19c74
#define TCAM_ACTV_HIT_S 0
#define TCAM_ACTV_HIT_M 0x1fU
#define TCAM_ACTV_HIT_V(x) ((x) << TCAM_ACTV_HIT_S)
#define TCAM_ACTV_HIT_G(x) (((x) >> TCAM_ACTV_HIT_S) & TCAM_ACTV_HIT_M)
#define LE_DB_RSP_CODE_1_A 0x19c78
#define HASH_ACTV_HIT_S 25
#define HASH_ACTV_HIT_M 0x1fU
#define HASH_ACTV_HIT_V(x) ((x) << HASH_ACTV_HIT_S)
#define HASH_ACTV_HIT_G(x) (((x) >> HASH_ACTV_HIT_S) & HASH_ACTV_HIT_M)
#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac
#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0
#define NCSI_INT_CAUSE_A 0x1a0d8
#define CIM_DM_PRTY_ERR_S 8
......
......@@ -42,6 +42,28 @@
#define TCB_T_FLAGS_W 1
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
#define TF_CCTRL_RFR_S 62
#define TCB_RSS_INFO_W 3
#define TCB_RSS_INFO_S 0
#define TCB_RSS_INFO_M 0x3ffULL
#define TCB_RSS_INFO_V(x) ((x) << TCB_RSS_INFO_S)
#define TCB_TIMESTAMP_W 5
#define TCB_TIMESTAMP_S 0
#define TCB_TIMESTAMP_M 0xffffffffULL
#define TCB_TIMESTAMP_V(x) ((x) << TCB_TIMESTAMP_S)
#define TCB_RTT_TS_RECENT_AGE_W 6
#define TCB_RTT_TS_RECENT_AGE_S 0
#define TCB_RTT_TS_RECENT_AGE_M 0xffffffffULL
#define TCB_RTT_TS_RECENT_AGE_V(x) ((x) << TCB_RTT_TS_RECENT_AGE_S)
#define TCB_SND_UNA_RAW_W 10
#define TCB_RX_FRAG2_PTR_RAW_W 27
#define TCB_RX_FRAG3_LEN_RAW_W 29
#define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30
#define TCB_PDU_HDR_LEN_W 31
#endif /* __T4_TCB_H */
......@@ -1092,6 +1092,7 @@ enum fw_caps_config_switch {
enum fw_caps_config_nic {
FW_CAPS_CONFIG_NIC = 0x00000001,
FW_CAPS_CONFIG_NIC_VM = 0x00000002,
FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
};
enum fw_caps_config_ofld {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment