Commit f2b7e78d authored by Vipul Pandya's avatar Vipul Pandya Committed by Roland Dreier

cxgb4: Add T4 filter support

The T4 architecture is capable of filtering ingress packets at line rate
using the rule in TCAM. If packet hits a rule in the TCAM then it can be either
dropped or passed to the receive queues based on a rule settings.

This patch adds framework for managing filters and to use T4's filter
capabilities. It constructs a Firmware Filter Work Request which writes the
filter at a specified index to get the work done. It hosts shadow copy of
ingress filter entry to check field size limitations and save memory in the
case where the filter table is large.
Signed-off-by: default avatarVipul Pandya <vipul@chelsio.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 5bd665f2
......@@ -545,6 +545,129 @@ struct adapter {
spinlock_t stats_lock;
};
/* Defined bit width of user definable filter tuples
*/
#define ETHTYPE_BITWIDTH 16
#define FRAG_BITWIDTH 1
#define MACIDX_BITWIDTH 9
#define FCOE_BITWIDTH 1
#define IPORT_BITWIDTH 3
#define MATCHTYPE_BITWIDTH 3
#define PROTO_BITWIDTH 8
#define TOS_BITWIDTH 8
#define PF_BITWIDTH 8
#define VF_BITWIDTH 8
#define IVLAN_BITWIDTH 16
#define OVLAN_BITWIDTH 16
/* Filter matching rules. These consist of a set of ingress packet field
* (value, mask) tuples. The associated ingress packet field matches the
* tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
* rule can be constructed by specifying a tuple of (0, 0).) A filter rule
* matches an ingress packet when all of the individual individual field
* matching rules are true.
*
* Partial field masks are always valid, however, while it may be easy to
* understand their meanings for some fields (e.g. IP address to match a
* subnet), for others making sensible partial masks is less intuitive (e.g.
* MPS match type) ...
*
* Most of the following data structures are modeled on T4 capabilities.
* Drivers for earlier chips use the subsets which make sense for those chips.
* We really need to come up with a hardware-independent mechanism to
* represent hardware filter capabilities ...
*/
struct ch_filter_tuple {
/* Compressed header matching field rules. The TP_VLAN_PRI_MAP
* register selects which of these fields will participate in the
* filter match rules -- up to a maximum of 36 bits. Because
* TP_VLAN_PRI_MAP is a global register, all filters must use the same
* set of fields.
*/
uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
uint32_t ivlan_vld:1; /* inner VLAN valid */
uint32_t ovlan_vld:1; /* outer VLAN valid */
uint32_t pfvf_vld:1; /* PF/VF valid */
uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
uint32_t iport:IPORT_BITWIDTH; /* ingress port */
uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
uint32_t proto:PROTO_BITWIDTH; /* protocol type */
uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
/* Uncompressed header matching field rules. These are always
* available for field rules.
*/
uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
uint16_t lport; /* local port */
uint16_t fport; /* foreign port */
};
/* A filter ioctl command.
*/
struct ch_filter_specification {
/* Administrative fields for filter.
*/
uint32_t hitcnts:1; /* count filter hits in TCB */
uint32_t prio:1; /* filter has priority over active/server */
/* Fundamental filter typing. This is the one element of filter
* matching that doesn't exist as a (value, mask) tuple.
*/
uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
/* Packet dispatch information. Ingress packets which match the
* filter rules will be dropped, passed to the host or switched back
* out as egress packets.
*/
uint32_t action:2; /* drop, pass, switch */
uint32_t rpttid:1; /* report TID in RSS hash field */
uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
uint32_t iq:10; /* ingress queue */
uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
/* 1 => TCB contains IQ ID */
/* Switch proxy/rewrite fields. An ingress packet which matches a
* filter with "switch" set will be looped back out as an egress
* packet -- potentially with some Ethernet header rewriting.
*/
uint32_t eport:2; /* egress port to switch packet out */
uint32_t newdmac:1; /* rewrite destination MAC address */
uint32_t newsmac:1; /* rewrite source MAC address */
uint32_t newvlan:2; /* rewrite VLAN Tag */
uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
uint8_t smac[ETH_ALEN]; /* new source MAC address */
uint16_t vlan; /* VLAN Tag to insert */
/* Filter rule value/mask pairs.
*/
struct ch_filter_tuple val;
struct ch_filter_tuple mask;
};
enum {
FILTER_PASS = 0, /* default */
FILTER_DROP,
FILTER_SWITCH
};
enum {
VLAN_NOCHANGE = 0, /* default */
VLAN_REMOVE,
VLAN_INSERT,
VLAN_REWRITE
};
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
......@@ -701,6 +824,12 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals, unsigned int nregs,
unsigned int start_idx);
struct fw_filter_wr;
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
......@@ -737,6 +866,8 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr);
int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
......
......@@ -97,7 +97,9 @@ struct tid_info {
union aopen_entry *atid_tab;
unsigned int natids;
unsigned int atid_base;
struct filter_entry *ftid_tab;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
......
......@@ -484,6 +484,38 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
handle_failed_resolution(adap, arpq);
}
/* Allocate an L2T entry for use by a switching rule. Such need to be
* explicitly freed and while busy they are not on any hash chain, so normal
* address resolution updates do not see them.
*/
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
{
struct l2t_entry *e;
write_lock_bh(&d->lock);
e = alloc_l2e(d);
if (e) {
spin_lock(&e->lock); /* avoid race with t4_l2t_free */
e->state = L2T_STATE_SWITCHING;
atomic_set(&e->refcnt, 1);
spin_unlock(&e->lock);
}
write_unlock_bh(&d->lock);
return e;
}
/* Sets/updates the contents of a switching L2T entry that has been allocated
* with an earlier call to @t4_l2t_alloc_switching.
*/
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
u8 port, u8 *eth_addr)
{
e->vlan = vlan;
e->lport = port;
memcpy(e->dmac, eth_addr, ETH_ALEN);
return write_l2e(adap, e, 0);
}
struct l2t_data *t4_init_l2t(void)
{
int i;
......
......@@ -100,6 +100,9 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
u8 port, u8 *eth_addr);
struct l2t_data *t4_init_l2t(void);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
......
......@@ -109,7 +109,7 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals,
unsigned int nregs, unsigned int start_idx)
{
......@@ -2268,6 +2268,26 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
return 0;
}
/* t4_mk_filtdelwr - create a delete filter WR
* @ftid: the filter ID
* @wr: the filter work request to populate
* @qid: ingress queue to receive the delete notification
*
* Creates a filter work request to delete the supplied filter. If @qid is
* negative the delete notification is suppressed.
*/
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
{
memset(wr, 0, sizeof(*wr));
wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
V_FW_FILTER_WR_NOREPLY(qid < 0));
wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
if (qid >= 0)
wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
}
#define INIT_CMD(var, cmd, rd_wr) do { \
(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
FW_CMD_REQUEST | FW_CMD_##rd_wr); \
......
......@@ -332,6 +332,7 @@ struct cpl_set_tcb_field {
__be16 word_cookie;
#define TCB_WORD(x) ((x) << 0)
#define TCB_COOKIE(x) ((x) << 5)
#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
__be64 mask;
__be64 val;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment