Commit 69625ea7 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-add-support-for-TC-MQPRIO-Qdisc-Offload'

Rahul Lakkireddy says:

====================
cxgb4: add support for TC-MQPRIO Qdisc Offload

This series of patches add support for offloading TC-MQPRIO Qdisc
to Chelsio T5/T6 NICs. Offloading QoS traffic shaping and pacing
requires using Ethernet Offload (ETHOFLD) resources available on
Chelsio NICs. The ETHOFLD resources are configured by firmware
and taken from the resource pool shared with other Chelsio Upper
Layer Drivers. Traffic flowing through ETHOFLD region requires a
software netdev Tx queue (EOSW_TXQ) exposed to networking stack,
and an underlying hardware Tx queue (EOHW_TXQ) used for sending
packets through hardware.

ETHOFLD region is addressed using EOTIDs, which are per-connection
resource. Hence, EOTIDs are capable of storing only a very small
number of packets in flight. To allow more connections to share
the the QoS rate limiting configuration, multiple EOTIDs must be
allocated to reduce packet drops. EOTIDs are 1-to-1 mapped with
software EOSW_TXQ. Several software EOSW_TXQs can post packets to
a single hardware EOHW_TXQ.

The series is broken down as follows:

Patch 1 queries firmware for maximum available traffic classes,
as well as, start and maximum available indices (EOTID) into ETHOFLD
region, supported by the underlying device.

Patch 2 reworks queue configuration and simplifies MSI-X allocation
logic in preparation for ETHOFLD queues support.

Patch 3 adds skeleton for validating and configuring TC-MQPRIO Qdisc
offload. Also, adds support for software EOSW_TXQs and exposes them
to network stack. Updates Tx queue selection to use fallback NIC Tx
path for unsupported traffic that can't go through ETHOFLD queues.

Patch 4 adds support for managing hardware queues to rate limit
traffic flowing through them. The queues are allocated/removed based
on enabling/disabling TC-MQPRIO Qdisc offload, respectively.

Patch 5 adds Tx path for traffic flowing through software EOSW_TXQ
and EOHW_TXQ. Also, adds Rx path to handle Tx completions.

Patch 6 updates exisiting SCHED API to configure FLOWC based QoS
offload. In the existing QUEUE based rate limiting, multiple queues
sharing a traffic class get the aggreagated max rate limit value.
On the other hand, in FLOWC based rate limiting, multiple queues
sharing a traffic class get their own individual max rate limit
value. For example, if 2 queues are bound to class 0, which is rate
limited to 1 Gbps, then in QUEUE based rate limiting, both the
queues get the aggregate max output of 1 Gbps only. In FLOWC based
rate limiting, each queue gets its own output of max 1 Gbps each;
i.e. 2 queues * 1 Gbps rate limit = 2 Gbps max output.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1c8dd9cb 0e395b3c
...@@ -8,7 +8,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o ...@@ -8,7 +8,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \ cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \
cudbg_common.o cudbg_lib.o cudbg_zlib.o cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
......
...@@ -325,6 +325,9 @@ enum cudbg_qdesc_qtype { ...@@ -325,6 +325,9 @@ enum cudbg_qdesc_qtype {
CUDBG_QTYPE_CRYPTO_FLQ, CUDBG_QTYPE_CRYPTO_FLQ,
CUDBG_QTYPE_TLS_RXQ, CUDBG_QTYPE_TLS_RXQ,
CUDBG_QTYPE_TLS_FLQ, CUDBG_QTYPE_TLS_FLQ,
CUDBG_QTYPE_ETHOFLD_TXQ,
CUDBG_QTYPE_ETHOFLD_RXQ,
CUDBG_QTYPE_ETHOFLD_FLQ,
CUDBG_QTYPE_MAX, CUDBG_QTYPE_MAX,
}; };
......
...@@ -2930,6 +2930,10 @@ void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, ...@@ -2930,6 +2930,10 @@ void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE * tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
MAX_RXQ_DESC_SIZE; MAX_RXQ_DESC_SIZE;
/* ETHOFLD TXQ, RXQ, and FLQ */
tot_entries += MAX_OFLD_QSETS * 3;
tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
tot_size += sizeof(struct cudbg_ver_hdr) + tot_size += sizeof(struct cudbg_ver_hdr) +
sizeof(struct cudbg_qdesc_info) + sizeof(struct cudbg_qdesc_info) +
sizeof(struct cudbg_qdesc_entry) * tot_entries; sizeof(struct cudbg_qdesc_entry) * tot_entries;
...@@ -3087,6 +3091,23 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, ...@@ -3087,6 +3091,23 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
} }
} }
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
CUDBG_QTYPE_ETHOFLD_TXQ, out);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
CUDBG_QTYPE_ETHOFLD_RXQ, out);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
CUDBG_QTYPE_ETHOFLD_FLQ, out);
}
out_unlock: out_unlock:
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
......
...@@ -392,6 +392,7 @@ struct adapter_params { ...@@ -392,6 +392,7 @@ struct adapter_params {
struct arch_specific_params arch; /* chip specific params */ struct arch_specific_params arch; /* chip specific params */
unsigned char offload; unsigned char offload;
unsigned char crypto; /* HW capability for crypto */ unsigned char crypto; /* HW capability for crypto */
unsigned char ethofld; /* QoS support */
unsigned char bypass; unsigned char bypass;
unsigned char hash_filter; unsigned char hash_filter;
...@@ -711,6 +712,7 @@ struct sge_eth_rxq { /* SW Ethernet Rx queue */ ...@@ -711,6 +712,7 @@ struct sge_eth_rxq { /* SW Ethernet Rx queue */
struct sge_rspq rspq; struct sge_rspq rspq;
struct sge_fl fl; struct sge_fl fl;
struct sge_eth_stats stats; struct sge_eth_stats stats;
struct msix_info *msix;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct sge_ofld_stats { /* offload queue statistics */ struct sge_ofld_stats { /* offload queue statistics */
...@@ -724,6 +726,7 @@ struct sge_ofld_rxq { /* SW offload Rx queue */ ...@@ -724,6 +726,7 @@ struct sge_ofld_rxq { /* SW offload Rx queue */
struct sge_rspq rspq; struct sge_rspq rspq;
struct sge_fl fl; struct sge_fl fl;
struct sge_ofld_stats stats; struct sge_ofld_stats stats;
struct msix_info *msix;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct tx_desc { struct tx_desc {
...@@ -788,7 +791,6 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */ ...@@ -788,7 +791,6 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */
struct sge_uld_rxq_info { struct sge_uld_rxq_info {
char name[IFNAMSIZ]; /* name of ULD driver */ char name[IFNAMSIZ]; /* name of ULD driver */
struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */ struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
u16 *msix_tbl; /* msix_tbl for uld */
u16 *rspq_id; /* response queue id's of rxq */ u16 *rspq_id; /* response queue id's of rxq */
u16 nrxq; /* # of ingress uld queues */ u16 nrxq; /* # of ingress uld queues */
u16 nciq; /* # of completion queues */ u16 nciq; /* # of completion queues */
...@@ -801,6 +803,55 @@ struct sge_uld_txq_info { ...@@ -801,6 +803,55 @@ struct sge_uld_txq_info {
u16 ntxq; /* # of egress uld queues */ u16 ntxq; /* # of egress uld queues */
}; };
enum sge_eosw_state {
CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
CXGB4_EO_STATE_FLOWC_OPEN_REPLY, /* Waiting for FLOWC open reply */
CXGB4_EO_STATE_ACTIVE, /* Ready to accept traffic */
CXGB4_EO_STATE_FLOWC_CLOSE_SEND, /* Send FLOWC close request */
CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */
};
struct sge_eosw_desc {
struct sk_buff *skb; /* SKB to free after getting completion */
dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
};
struct sge_eosw_txq {
spinlock_t lock; /* Per queue lock to synchronize completions */
enum sge_eosw_state state; /* Current ETHOFLD State */
struct sge_eosw_desc *desc; /* Descriptor ring to hold packets */
u32 ndesc; /* Number of descriptors */
u32 pidx; /* Current Producer Index */
u32 last_pidx; /* Last successfully transmitted Producer Index */
u32 cidx; /* Current Consumer Index */
u32 last_cidx; /* Last successfully reclaimed Consumer Index */
u32 flowc_idx; /* Descriptor containing a FLOWC request */
u32 inuse; /* Number of packets held in ring */
u32 cred; /* Current available credits */
u32 ncompl; /* # of completions posted */
u32 last_compl; /* # of credits consumed since last completion req */
u32 eotid; /* Index into EOTID table in software */
u32 hwtid; /* Hardware EOTID index */
u32 hwqid; /* Underlying hardware queue index */
struct net_device *netdev; /* Pointer to netdevice */
struct tasklet_struct qresume_tsk; /* Restarts the queue */
struct completion completion; /* completion for FLOWC rendezvous */
};
struct sge_eohw_txq {
spinlock_t lock; /* Per queue lock */
struct sge_txq q; /* HW Txq */
struct adapter *adap; /* Backpointer to adapter */
unsigned long tso; /* # of TSO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
};
struct sge { struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_txq ptptxq; struct sge_eth_txq ptptxq;
...@@ -814,11 +865,16 @@ struct sge { ...@@ -814,11 +865,16 @@ struct sge {
struct sge_rspq intrq ____cacheline_aligned_in_smp; struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock; spinlock_t intrq_lock;
struct sge_eohw_txq *eohw_txq;
struct sge_ofld_rxq *eohw_rxq;
u16 max_ethqsets; /* # of available Ethernet queue sets */ u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */ u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */ u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active ofld queue sets */ u16 ofldqsets; /* # of active ofld queue sets */
u16 nqs_per_uld; /* # of Rx queues per ULD */ u16 nqs_per_uld; /* # of Rx queues per ULD */
u16 eoqsets; /* # of ETHOFLD queues */
u16 timer_val[SGE_NTIMERS]; u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS]; u8 counter_val[SGE_NCOUNTERS];
u16 dbqtimer_tick; u16 dbqtimer_tick;
...@@ -841,6 +897,9 @@ struct sge { ...@@ -841,6 +897,9 @@ struct sge {
unsigned long *blocked_fl; unsigned long *blocked_fl;
struct timer_list rx_timer; /* refills starving FLs */ struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */ struct timer_list tx_timer; /* checks Tx queues */
int fwevtq_msix_idx; /* Index to firmware event queue MSI-X info */
int nd_msix_idx; /* Index to non-data interrupts MSI-X info */
}; };
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
...@@ -870,13 +929,13 @@ struct hash_mac_addr { ...@@ -870,13 +929,13 @@ struct hash_mac_addr {
unsigned int iface_mac; unsigned int iface_mac;
}; };
struct uld_msix_bmap { struct msix_bmap {
unsigned long *msix_bmap; unsigned long *msix_bmap;
unsigned int mapsize; unsigned int mapsize;
spinlock_t lock; /* lock for acquiring bitmap */ spinlock_t lock; /* lock for acquiring bitmap */
}; };
struct uld_msix_info { struct msix_info {
unsigned short vec; unsigned short vec;
char desc[IFNAMSIZ + 10]; char desc[IFNAMSIZ + 10];
unsigned int idx; unsigned int idx;
...@@ -945,14 +1004,9 @@ struct adapter { ...@@ -945,14 +1004,9 @@ struct adapter {
struct cxgb4_virt_res vres; struct cxgb4_virt_res vres;
unsigned int swintr; unsigned int swintr;
struct msix_info { /* MSI-X Info for NIC and OFLD queues */
unsigned short vec; struct msix_info *msix_info;
char desc[IFNAMSIZ + 10]; struct msix_bmap msix_bmap;
cpumask_var_t aff_mask;
} msix_info[MAX_INGQ + 1];
struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
int msi_idx;
struct doorbell_stats db_stats; struct doorbell_stats db_stats;
struct sge sge; struct sge sge;
...@@ -1044,6 +1098,9 @@ struct adapter { ...@@ -1044,6 +1098,9 @@ struct adapter {
#if IS_ENABLED(CONFIG_THERMAL) #if IS_ENABLED(CONFIG_THERMAL)
struct ch_thermal ch_thermal; struct ch_thermal ch_thermal;
#endif #endif
/* TC MQPRIO offload */
struct cxgb4_tc_mqprio *tc_mqprio;
}; };
/* Support for "sched-class" command to allow a TX Scheduling Class to be /* Support for "sched-class" command to allow a TX Scheduling Class to be
...@@ -1077,6 +1134,7 @@ enum { ...@@ -1077,6 +1134,7 @@ enum {
enum { enum {
SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */ SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */
SCHED_CLASS_MODE_FLOW, /* per-flow scheduling */
}; };
enum { enum {
...@@ -1100,6 +1158,14 @@ struct ch_sched_queue { ...@@ -1100,6 +1158,14 @@ struct ch_sched_queue {
s8 class; /* class index */ s8 class; /* class index */
}; };
/* Support for "sched_flowc" command to allow one or more FLOWC
* to be bound to a TX Scheduling Class.
*/
struct ch_sched_flowc {
s32 tid; /* TID to bind */
s8 class; /* class index */
};
/* Defined bit width of user definable filter tuples /* Defined bit width of user definable filter tuples
*/ */
#define ETHTYPE_BITWIDTH 16 #define ETHTYPE_BITWIDTH 16
...@@ -1293,6 +1359,11 @@ static inline int is_uld(const struct adapter *adap) ...@@ -1293,6 +1359,11 @@ static inline int is_uld(const struct adapter *adap)
return (adap->params.offload || adap->params.crypto); return (adap->params.offload || adap->params.crypto);
} }
static inline int is_ethofld(const struct adapter *adap)
{
return adap->params.ethofld;
}
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{ {
return readl(adap->regs + reg_addr); return readl(adap->regs + reg_addr);
...@@ -1426,6 +1497,9 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, ...@@ -1426,6 +1497,9 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
struct net_device *dev, unsigned int iqid, struct net_device *dev, unsigned int iqid,
unsigned int uld_type); unsigned int uld_type);
int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
struct net_device *dev, u32 iqid);
void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie); irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap); int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap); void t4_sge_start(struct adapter *adap);
...@@ -1890,6 +1964,12 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, ...@@ -1890,6 +1964,12 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q, void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap); unsigned int n, bool unmap);
void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
u32 ndesc);
int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
void cxgb4_ethofld_restart(unsigned long data);
int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *si);
void free_txq(struct adapter *adap, struct sge_txq *q); void free_txq(struct adapter *adap, struct sge_txq *q);
void cxgb4_reclaim_completed_tx(struct adapter *adap, void cxgb4_reclaim_completed_tx(struct adapter *adap,
struct sge_txq *q, bool unmap); struct sge_txq *q, bool unmap);
...@@ -1948,5 +2028,10 @@ int cxgb4_alloc_raw_mac_filt(struct adapter *adap, ...@@ -1948,5 +2028,10 @@ int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid, int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
int *tcam_idx, const u8 *addr, int *tcam_idx, const u8 *addr,
bool persistent, u8 *smt_idx); bool persistent, u8 *smt_idx);
int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
int cxgb_open(struct net_device *dev);
int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
#endif /* __CXGB4_H__ */ #endif /* __CXGB4_H__ */
...@@ -2658,6 +2658,7 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld) ...@@ -2658,6 +2658,7 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
static int sge_qinfo_show(struct seq_file *seq, void *v) static int sge_qinfo_show(struct seq_file *seq, void *v)
{ {
int eth_entries, ctrl_entries, eo_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 }; int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 }; int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 }; int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
...@@ -2665,11 +2666,12 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) ...@@ -2665,11 +2666,12 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
const struct sge_uld_rxq_info *urxq_info; const struct sge_uld_rxq_info *urxq_info;
struct adapter *adap = seq->private; struct adapter *adap = seq->private;
int i, n, r = (uintptr_t)v - 1; int i, n, r = (uintptr_t)v - 1;
int eth_entries, ctrl_entries;
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
if (adap->sge.eohw_txq)
eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
mutex_lock(&uld_mutex); mutex_lock(&uld_mutex);
if (s->uld_txq_info) if (s->uld_txq_info)
...@@ -2761,6 +2763,54 @@ do { \ ...@@ -2761,6 +2763,54 @@ do { \
} }
r -= eth_entries; r -= eth_entries;
if (r < eo_entries) {
int base_qset = r * 4;
const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
n = min(4, s->eoqsets - 4 * r);
S("QType:", "ETHOFLD");
S("Interface:",
rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx);
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
S3("u", "FL size:", rx->fl.size ? rx->fl.size - 8 : 0);
R("FL pend:", fl.pend_cred);
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
RL("RxPackets:", stats.pkts);
RL("RxImm:", stats.imm);
RL("RxAN", stats.an);
RL("RxNoMem", stats.nomem);
TL("TSO:", tso);
TL("TxCSO:", tx_cso);
TL("VLANins:", vlan_ins);
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
TL("TxMapErr:", mapping_err);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLMapErr:", fl.mapping_err);
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
goto unlock;
}
r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) { if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx; const struct sge_uld_txq *tx;
...@@ -3007,6 +3057,7 @@ static int sge_queue_entries(const struct adapter *adap) ...@@ -3007,6 +3057,7 @@ static int sge_queue_entries(const struct adapter *adap)
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
return DIV_ROUND_UP(adap->sge.ethqsets, 4) + return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
(adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
tot_uld_entries + tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
} }
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
#ifndef __CXGB4_TC_MQPRIO_H__
#define __CXGB4_TC_MQPRIO_H__
#include <net/pkt_cls.h>
#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
#define CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM 1024
#define CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM 1024
#define CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE 64
#define CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC 5
#define CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT 8
#define CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM 72
#define CXGB4_FLOWC_WAIT_TIMEOUT (5 * HZ)
enum cxgb4_mqprio_state {
CXGB4_MQPRIO_STATE_DISABLED = 0,
CXGB4_MQPRIO_STATE_ACTIVE,
};
struct cxgb4_tc_port_mqprio {
enum cxgb4_mqprio_state state; /* Current MQPRIO offload state */
struct tc_mqprio_qopt_offload mqprio; /* MQPRIO offload params */
struct sge_eosw_txq *eosw_txq; /* Netdev SW Tx queue array */
u8 tc_hwtc_map[TC_QOPT_MAX_QUEUE]; /* MQPRIO tc to hardware tc map */
};
struct cxgb4_tc_mqprio {
refcount_t refcnt; /* Refcount for adapter-wide resources */
struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */
};
int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio);
int cxgb4_init_tc_mqprio(struct adapter *adap);
void cxgb4_cleanup_tc_mqprio(struct adapter *adap);
#endif /* __CXGB4_TC_MQPRIO_H__ */
...@@ -53,35 +53,6 @@ ...@@ -53,35 +53,6 @@
#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
static int get_msix_idx_from_bmap(struct adapter *adap)
{
struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
unsigned long flags;
unsigned int msix_idx;
spin_lock_irqsave(&bmap->lock, flags);
msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
if (msix_idx < bmap->mapsize) {
__set_bit(msix_idx, bmap->msix_bmap);
} else {
spin_unlock_irqrestore(&bmap->lock, flags);
return -ENOSPC;
}
spin_unlock_irqrestore(&bmap->lock, flags);
return msix_idx;
}
static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
{
struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
unsigned long flags;
spin_lock_irqsave(&bmap->lock, flags);
__clear_bit(msix_idx, bmap->msix_bmap);
spin_unlock_irqrestore(&bmap->lock, flags);
}
/* Flush the aggregated lro sessions */ /* Flush the aggregated lro sessions */
static void uldrx_flush_handler(struct sge_rspq *q) static void uldrx_flush_handler(struct sge_rspq *q)
{ {
...@@ -138,9 +109,9 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -138,9 +109,9 @@ static int alloc_uld_rxqs(struct adapter *adap,
struct sge_uld_rxq_info *rxq_info, bool lro) struct sge_uld_rxq_info *rxq_info, bool lro)
{ {
unsigned int nq = rxq_info->nrxq + rxq_info->nciq; unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
struct sge_ofld_rxq *q = rxq_info->uldrxq; struct sge_ofld_rxq *q = rxq_info->uldrxq;
unsigned short *ids = rxq_info->rspq_id; unsigned short *ids = rxq_info->rspq_id;
int i, err, msi_idx, que_idx = 0;
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
unsigned int per_chan; unsigned int per_chan;
...@@ -159,12 +130,18 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -159,12 +130,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
} }
if (msi_idx >= 0) { if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap); msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
if (bmap_idx < 0) { if (msi_idx < 0) {
err = -ENOSPC; err = -ENOSPC;
goto freeout; goto freeout;
} }
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
snprintf(adap->msix_info[msi_idx].desc,
sizeof(adap->msix_info[msi_idx].desc),
"%s-%s%d",
adap->port[0]->name, rxq_info->name, i);
q->msix = &adap->msix_info[msi_idx];
} }
err = t4_sge_alloc_rxq(adap, &q->rspq, false, err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[que_idx++ / per_chan], adap->port[que_idx++ / per_chan],
...@@ -175,8 +152,7 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -175,8 +152,7 @@ static int alloc_uld_rxqs(struct adapter *adap,
0); 0);
if (err) if (err)
goto freeout; goto freeout;
if (msi_idx >= 0)
rxq_info->msix_tbl[i] = bmap_idx;
memset(&q->stats, 0, sizeof(q->stats)); memset(&q->stats, 0, sizeof(q->stats));
if (ids) if (ids)
ids[i] = q->rspq.abs_id; ids[i] = q->rspq.abs_id;
...@@ -188,6 +164,8 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -188,6 +164,8 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (q->rspq.desc) if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq, free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL); q->fl.size ? &q->fl : NULL);
if (q->msix)
cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
} }
return err; return err;
} }
...@@ -198,14 +176,6 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) ...@@ -198,14 +176,6 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0; int i, ret = 0;
if (adap->flags & CXGB4_USING_MSIX) {
rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
sizeof(unsigned short),
GFP_KERNEL);
if (!rxq_info->msix_tbl)
return -ENOMEM;
}
ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */ /* Tell uP to route control queue completions to rdma rspq */
...@@ -261,8 +231,6 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) ...@@ -261,8 +231,6 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
t4_free_uld_rxqs(adap, rxq_info->nciq, t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq); rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
if (adap->flags & CXGB4_USING_MSIX)
kfree(rxq_info->msix_tbl);
} }
static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
...@@ -355,13 +323,12 @@ static int ...@@ -355,13 +323,12 @@ static int
request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
struct uld_msix_info *minfo; struct msix_info *minfo;
unsigned int idx;
int err = 0; int err = 0;
unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) { for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx]; minfo = rxq_info->uldrxq[idx].msix;
minfo = &adap->msix_info_ulds[bmap_idx];
err = request_irq(minfo->vec, err = request_irq(minfo->vec,
t4_sge_intr_msix, 0, t4_sge_intr_msix, 0,
minfo->desc, minfo->desc,
...@@ -376,10 +343,9 @@ request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) ...@@ -376,10 +343,9 @@ request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
unwind: unwind:
while (idx-- > 0) { while (idx-- > 0) {
bmap_idx = rxq_info->msix_tbl[idx]; minfo = rxq_info->uldrxq[idx].msix;
minfo = &adap->msix_info_ulds[bmap_idx];
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_msix_idx_in_bmap(adap, bmap_idx); cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
} }
return err; return err;
...@@ -389,69 +355,45 @@ static void ...@@ -389,69 +355,45 @@ static void
free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
struct uld_msix_info *minfo; struct msix_info *minfo;
unsigned int idx, bmap_idx; unsigned int idx;
for_each_uldrxq(rxq_info, idx) { for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx]; minfo = rxq_info->uldrxq[idx].msix;
minfo = &adap->msix_info_ulds[bmap_idx];
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_msix_idx_in_bmap(adap, bmap_idx); cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
} }
} }
static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int n = sizeof(adap->msix_info_ulds[0].desc); int idx;
unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) { for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx]; struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
adap->port[0]->name, rxq_info->name, idx);
}
}
static void enable_rx(struct adapter *adap, struct sge_rspq *q)
{
if (!q) if (!q)
return; continue;
if (q->handler)
napi_enable(&q->napi);
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
INGRESSQID_V(q->cntxt_id));
}
static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) cxgb4_enable_rx(adap, q);
{ }
if (q && q->handler)
napi_disable(&q->napi);
} }
static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx; int idx;
for_each_uldrxq(rxq_info, idx) for_each_uldrxq(rxq_info, idx) {
enable_rx(adap, &rxq_info->uldrxq[idx].rspq); struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
}
static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) if (!q)
{ continue;
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
for_each_uldrxq(rxq_info, idx) cxgb4_quiesce_rx(q);
quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); }
} }
static void static void
...@@ -750,7 +692,6 @@ void cxgb4_register_uld(enum cxgb4_uld type, ...@@ -750,7 +692,6 @@ void cxgb4_register_uld(enum cxgb4_uld type,
if (ret) if (ret)
goto free_queues; goto free_queues;
if (adap->flags & CXGB4_USING_MSIX) { if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type); ret = request_msix_queue_irqs_uld(adap, type);
if (ret) if (ret)
goto free_rxq; goto free_rxq;
......
...@@ -89,6 +89,10 @@ union aopen_entry { ...@@ -89,6 +89,10 @@ union aopen_entry {
union aopen_entry *next; union aopen_entry *next;
}; };
struct eotid_entry {
void *data;
};
/* /*
* Holds the size, base address, free list start, etc of the TID, server TID, * Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables. The tables themselves are allocated dynamically. * and active-open TID tables. The tables themselves are allocated dynamically.
...@@ -126,6 +130,12 @@ struct tid_info { ...@@ -126,6 +130,12 @@ struct tid_info {
unsigned int v6_stids_in_use; unsigned int v6_stids_in_use;
unsigned int sftids_in_use; unsigned int sftids_in_use;
/* ETHOFLD range */
struct eotid_entry *eotid_tab;
unsigned long *eotid_bmap;
unsigned int eotid_base;
unsigned int neotids;
/* TIDs in the TCAM */ /* TIDs in the TCAM */
atomic_t tids_in_use; atomic_t tids_in_use;
/* TIDs in the HASH */ /* TIDs in the HASH */
...@@ -176,6 +186,35 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data, ...@@ -176,6 +186,35 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
atomic_inc(&t->conns_in_use); atomic_inc(&t->conns_in_use);
} }
static inline struct eotid_entry *cxgb4_lookup_eotid(struct tid_info *t,
u32 eotid)
{
return eotid < t->neotids ? &t->eotid_tab[eotid] : NULL;
}
static inline int cxgb4_get_free_eotid(struct tid_info *t)
{
int eotid;
eotid = find_first_zero_bit(t->eotid_bmap, t->neotids);
if (eotid >= t->neotids)
eotid = -1;
return eotid;
}
static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
{
set_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = data;
}
static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
{
clear_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = NULL;
}
int cxgb4_alloc_atid(struct tid_info *t, void *data); int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data); int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
......
...@@ -92,45 +92,69 @@ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, ...@@ -92,45 +92,69 @@ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
pf = adap->pf; pf = adap->pf;
vf = 0; vf = 0;
err = t4_set_params(adap, adap->mbox, pf, vf, 1,
&fw_param, &fw_class);
break;
}
case SCHED_FLOWC: {
struct sched_flowc_entry *fe;
fe = (struct sched_flowc_entry *)arg;
fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
fe->param.tid, fw_class);
break; break;
} }
default: default:
err = -ENOTSUPP; err = -ENOTSUPP;
goto out; break;
} }
err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
out:
return err; return err;
} }
static struct sched_class *t4_sched_queue_lookup(struct port_info *pi, static void *t4_sched_entry_lookup(struct port_info *pi,
const unsigned int qid, enum sched_bind_type type,
int *index) const u32 val)
{ {
struct sched_table *s = pi->sched_tbl; struct sched_table *s = pi->sched_tbl;
struct sched_class *e, *end; struct sched_class *e, *end;
struct sched_class *found = NULL; void *found = NULL;
int i;
/* Look for a class with matching bound queue parameters */ /* Look for an entry with matching @val */
end = &s->tab[s->sched_size]; end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) { for (e = &s->tab[0]; e != end; ++e) {
if (e->state == SCHED_STATE_UNUSED ||
e->bind_type != type)
continue;
switch (type) {
case SCHED_QUEUE: {
struct sched_queue_entry *qe; struct sched_queue_entry *qe;
i = 0; list_for_each_entry(qe, &e->entry_list, list) {
if (e->state == SCHED_STATE_UNUSED) if (qe->cntxt_id == val) {
continue; found = qe;
break;
}
}
break;
}
case SCHED_FLOWC: {
struct sched_flowc_entry *fe;
list_for_each_entry(qe, &e->queue_list, list) { list_for_each_entry(fe, &e->entry_list, list) {
if (qe->cntxt_id == qid) { if (fe->param.tid == val) {
found = e; found = fe;
if (index) break;
*index = i; }
}
break; break;
} }
i++; default:
return NULL;
} }
if (found) if (found)
...@@ -142,35 +166,26 @@ static struct sched_class *t4_sched_queue_lookup(struct port_info *pi, ...@@ -142,35 +166,26 @@ static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{ {
struct adapter *adap = pi->adapter;
struct sched_class *e;
struct sched_queue_entry *qe = NULL; struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq; struct sge_eth_txq *txq;
unsigned int qid; struct sched_class *e;
int index = -1;
int err = 0; int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets) if (p->queue < 0 || p->queue >= pi->nqsets)
return -ERANGE; return -ERANGE;
txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
qid = txq->q.cntxt_id;
/* Find the existing class that the queue is bound to */ /* Find the existing entry that the queue is bound to */
e = t4_sched_queue_lookup(pi, qid, &index); qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
if (e && index >= 0) { if (qe) {
int i = 0;
list_for_each_entry(qe, &e->queue_list, list) {
if (i == index)
break;
i++;
}
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
false); false);
if (err) if (err)
return err; return err;
e = &pi->sched_tbl->tab[qe->param.class];
list_del(&qe->list); list_del(&qe->list);
kvfree(qe); kvfree(qe);
if (atomic_dec_and_test(&e->refcnt)) { if (atomic_dec_and_test(&e->refcnt)) {
...@@ -183,11 +198,11 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) ...@@ -183,11 +198,11 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
{ {
struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl; struct sched_table *s = pi->sched_tbl;
struct sched_class *e;
struct sched_queue_entry *qe = NULL; struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq; struct sge_eth_txq *txq;
struct sched_class *e;
unsigned int qid; unsigned int qid;
int err = 0; int err = 0;
...@@ -215,7 +230,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) ...@@ -215,7 +230,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
if (err) if (err)
goto out_err; goto out_err;
list_add_tail(&qe->list, &e->queue_list); list_add_tail(&qe->list, &e->entry_list);
e->bind_type = SCHED_QUEUE;
atomic_inc(&e->refcnt); atomic_inc(&e->refcnt);
return err; return err;
...@@ -224,6 +240,73 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) ...@@ -224,6 +240,73 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
return err; return err;
} }
static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
{
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
struct sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
return -ERANGE;
/* Find the existing entry that the flowc is bound to */
fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
if (fe) {
err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
false);
if (err)
return err;
e = &pi->sched_tbl->tab[fe->param.class];
list_del(&fe->list);
kvfree(fe);
if (atomic_dec_and_test(&e->refcnt)) {
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
}
return err;
}
static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
{
struct sched_table *s = pi->sched_tbl;
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
struct sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
return -ERANGE;
fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
if (!fe)
return -ENOMEM;
/* Unbind flowc from any existing class */
err = t4_sched_flowc_unbind(pi, p);
if (err)
goto out_err;
/* Bind flowc to specified class */
memcpy(&fe->param, p, sizeof(fe->param));
e = &s->tab[fe->param.class];
err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
if (err)
goto out_err;
list_add_tail(&fe->list, &e->entry_list);
e->bind_type = SCHED_FLOWC;
atomic_inc(&e->refcnt);
return err;
out_err:
kvfree(fe);
return err;
}
static void t4_sched_class_unbind_all(struct port_info *pi, static void t4_sched_class_unbind_all(struct port_info *pi,
struct sched_class *e, struct sched_class *e,
enum sched_bind_type type) enum sched_bind_type type)
...@@ -235,10 +318,17 @@ static void t4_sched_class_unbind_all(struct port_info *pi, ...@@ -235,10 +318,17 @@ static void t4_sched_class_unbind_all(struct port_info *pi,
case SCHED_QUEUE: { case SCHED_QUEUE: {
struct sched_queue_entry *qe; struct sched_queue_entry *qe;
list_for_each_entry(qe, &e->queue_list, list) list_for_each_entry(qe, &e->entry_list, list)
t4_sched_queue_unbind(pi, &qe->param); t4_sched_queue_unbind(pi, &qe->param);
break; break;
} }
case SCHED_FLOWC: {
struct sched_flowc_entry *fe;
list_for_each_entry(fe, &e->entry_list, list)
t4_sched_flowc_unbind(pi, &fe->param);
break;
}
default: default:
break; break;
} }
...@@ -262,6 +352,15 @@ static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg, ...@@ -262,6 +352,15 @@ static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
err = t4_sched_queue_unbind(pi, qe); err = t4_sched_queue_unbind(pi, qe);
break; break;
} }
case SCHED_FLOWC: {
struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
if (bind)
err = t4_sched_flowc_bind(pi, fe);
else
err = t4_sched_flowc_unbind(pi, fe);
break;
}
default: default:
err = -ENOTSUPP; err = -ENOTSUPP;
break; break;
...@@ -299,6 +398,12 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg, ...@@ -299,6 +398,12 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
class_id = qe->class; class_id = qe->class;
break; break;
} }
case SCHED_FLOWC: {
struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
class_id = fe->class;
break;
}
default: default:
return -ENOTSUPP; return -ENOTSUPP;
} }
...@@ -340,6 +445,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, ...@@ -340,6 +445,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
class_id = qe->class; class_id = qe->class;
break; break;
} }
case SCHED_FLOWC: {
struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
class_id = fe->class;
break;
}
default: default:
return -ENOTSUPP; return -ENOTSUPP;
} }
...@@ -355,10 +466,13 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, ...@@ -355,10 +466,13 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
const struct ch_sched_params *p) const struct ch_sched_params *p)
{ {
struct sched_table *s = pi->sched_tbl; struct sched_table *s = pi->sched_tbl;
struct sched_class *e, *end;
struct sched_class *found = NULL; struct sched_class *found = NULL;
struct sched_class *e, *end;
if (!p) { /* Only allow tc to be shared among SCHED_FLOWC types. For
* other types, always allocate a new tc.
*/
if (!p || p->u.params.mode != SCHED_CLASS_MODE_FLOW) {
/* Get any available unused class */ /* Get any available unused class */
end = &s->tab[s->sched_size]; end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) { for (e = &s->tab[0]; e != end; ++e) {
...@@ -467,9 +581,32 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, ...@@ -467,9 +581,32 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
return t4_sched_class_alloc(pi, p); return t4_sched_class_alloc(pi, p);
} }
static void t4_sched_class_free(struct port_info *pi, struct sched_class *e) /**
* cxgb4_sched_class_free - free a scheduling class
* @dev: net_device pointer
* @e: scheduling class
*
* Frees a scheduling class if there are no users.
*/
void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_table *s = pi->sched_tbl;
struct sched_class *e;
e = &s->tab[classid];
if (!atomic_read(&e->refcnt)) {
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
}
static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
{ {
t4_sched_class_unbind_all(pi, e, SCHED_QUEUE); struct port_info *pi = netdev2pinfo(dev);
t4_sched_class_unbind_all(pi, e, e->bind_type);
cxgb4_sched_class_free(dev, e->idx);
} }
struct sched_table *t4_init_sched(unsigned int sched_size) struct sched_table *t4_init_sched(unsigned int sched_size)
...@@ -487,7 +624,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size) ...@@ -487,7 +624,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
memset(&s->tab[i], 0, sizeof(struct sched_class)); memset(&s->tab[i], 0, sizeof(struct sched_class));
s->tab[i].idx = i; s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED; s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].queue_list); INIT_LIST_HEAD(&s->tab[i].entry_list);
atomic_set(&s->tab[i].refcnt, 0); atomic_set(&s->tab[i].refcnt, 0);
} }
return s; return s;
...@@ -510,7 +647,7 @@ void t4_cleanup_sched(struct adapter *adap) ...@@ -510,7 +647,7 @@ void t4_cleanup_sched(struct adapter *adap)
e = &s->tab[i]; e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE) if (e->state == SCHED_STATE_ACTIVE)
t4_sched_class_free(pi, e); t4_sched_class_free(adap->port[j], e);
} }
kvfree(s); kvfree(s);
} }
......
...@@ -56,6 +56,7 @@ enum sched_fw_ops { ...@@ -56,6 +56,7 @@ enum sched_fw_ops {
enum sched_bind_type { enum sched_bind_type {
SCHED_QUEUE, SCHED_QUEUE,
SCHED_FLOWC,
}; };
struct sched_queue_entry { struct sched_queue_entry {
...@@ -64,11 +65,17 @@ struct sched_queue_entry { ...@@ -64,11 +65,17 @@ struct sched_queue_entry {
struct ch_sched_queue param; struct ch_sched_queue param;
}; };
struct sched_flowc_entry {
struct list_head list;
struct ch_sched_flowc param;
};
struct sched_class { struct sched_class {
u8 state; u8 state;
u8 idx; u8 idx;
struct ch_sched_params info; struct ch_sched_params info;
struct list_head queue_list; enum sched_bind_type bind_type;
struct list_head entry_list;
atomic_t refcnt; atomic_t refcnt;
}; };
...@@ -102,6 +109,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, ...@@ -102,6 +109,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
struct ch_sched_params *p); struct ch_sched_params *p);
void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size); struct sched_table *t4_init_sched(unsigned int size);
void t4_cleanup_sched(struct adapter *adap); void t4_cleanup_sched(struct adapter *adap);
......
This diff is collapsed.
...@@ -1421,6 +1421,11 @@ enum { ...@@ -1421,6 +1421,11 @@ enum {
CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */ CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */
}; };
#define CPL_FW4_ACK_FLOWID_S 0
#define CPL_FW4_ACK_FLOWID_M 0xffffff
#define CPL_FW4_ACK_FLOWID_G(x) \
(((x) >> CPL_FW4_ACK_FLOWID_S) & CPL_FW4_ACK_FLOWID_M)
struct cpl_fw6_msg { struct cpl_fw6_msg {
u8 opcode; u8 opcode;
u8 type; u8 type;
......
...@@ -87,6 +87,7 @@ enum fw_wr_opcodes { ...@@ -87,6 +87,7 @@ enum fw_wr_opcodes {
FW_ULPTX_WR = 0x04, FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05, FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08, FW_ETH_TX_PKT_WR = 0x08,
FW_ETH_TX_EO_WR = 0x1c,
FW_OFLD_CONNECTION_WR = 0x2f, FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a, FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b, FW_OFLD_TX_DATA_WR = 0x0b,
...@@ -534,6 +535,35 @@ struct fw_eth_tx_pkt_wr { ...@@ -534,6 +535,35 @@ struct fw_eth_tx_pkt_wr {
__be64 r3; __be64 r3;
}; };
enum fw_eth_tx_eo_type {
FW_ETH_TX_EO_TYPE_TCPSEG = 1,
};
struct fw_eth_tx_eo_wr {
__be32 op_immdlen;
__be32 equiq_to_len16;
__be64 r3;
union fw_eth_tx_eo {
struct fw_eth_tx_eo_tcpseg {
__u8 type;
__u8 ethlen;
__be16 iplen;
__u8 tcplen;
__u8 tsclk_tsoff;
__be16 r4;
__be16 mss;
__be16 r5;
__be32 plen;
} tcpseg;
} u;
};
#define FW_ETH_TX_EO_WR_IMMDLEN_S 0
#define FW_ETH_TX_EO_WR_IMMDLEN_M 0x1ff
#define FW_ETH_TX_EO_WR_IMMDLEN_V(x) ((x) << FW_ETH_TX_EO_WR_IMMDLEN_S)
#define FW_ETH_TX_EO_WR_IMMDLEN_G(x) \
(((x) >> FW_ETH_TX_EO_WR_IMMDLEN_S) & FW_ETH_TX_EO_WR_IMMDLEN_M)
struct fw_ofld_connection_wr { struct fw_ofld_connection_wr {
__be32 op_compl; __be32 op_compl;
__be32 len16_pkd; __be32 len16_pkd;
...@@ -660,6 +690,12 @@ enum fw_flowc_mnem_tcpstate { ...@@ -660,6 +690,12 @@ enum fw_flowc_mnem_tcpstate {
FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */ FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */
}; };
enum fw_flowc_mnem_eostate {
FW_FLOWC_MNEM_EOSTATE_ESTABLISHED = 1, /* default */
/* graceful close, after sending outstanding payload */
FW_FLOWC_MNEM_EOSTATE_CLOSING = 2,
};
enum fw_flowc_mnem { enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
FW_FLOWC_MNEM_CH, FW_FLOWC_MNEM_CH,
...@@ -1134,6 +1170,7 @@ enum fw_caps_config_nic { ...@@ -1134,6 +1170,7 @@ enum fw_caps_config_nic {
FW_CAPS_CONFIG_NIC = 0x00000001, FW_CAPS_CONFIG_NIC = 0x00000001,
FW_CAPS_CONFIG_NIC_VM = 0x00000002, FW_CAPS_CONFIG_NIC_VM = 0x00000002,
FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020, FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040,
}; };
enum fw_caps_config_ofld { enum fw_caps_config_ofld {
...@@ -1276,6 +1313,7 @@ enum fw_params_param_dev { ...@@ -1276,6 +1313,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28, FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29, FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A, FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E, FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment